repo_name
stringlengths
1
52
repo_creator
stringclasses
6 values
programming_language
stringclasses
4 values
code
stringlengths
0
9.68M
num_lines
int64
1
234k
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package test import ( "fmt" "github.com/imdario/mergo" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/ptr" ) type DeploymentOptions struct { metav1.ObjectMeta Labels map[string]string Replicas int32 PodOptions PodOptions } func Deployment(overrides ...DeploymentOptions) *appsv1.Deployment { options := DeploymentOptions{} for _, opts := range overrides { if err := mergo.Merge(&options, opts, mergo.WithOverride); err != nil { panic(fmt.Sprintf("Failed to merge deployment options: %s", err)) } } objectMeta := NamespacedObjectMeta(options.ObjectMeta) if options.PodOptions.Image == "" { options.PodOptions.Image = "public.ecr.aws/eks-distro/kubernetes/pause:3.2" } if options.PodOptions.Labels == nil { options.PodOptions.Labels = map[string]string{ "app": objectMeta.Name, } } pod := Pod(options.PodOptions) dep := &appsv1.Deployment{ ObjectMeta: objectMeta, Spec: appsv1.DeploymentSpec{ Replicas: ptr.Int32(options.Replicas), Selector: &metav1.LabelSelector{MatchLabels: options.PodOptions.Labels}, Template: v1.PodTemplateSpec{ ObjectMeta: ObjectMeta(options.PodOptions.ObjectMeta), Spec: pod.Spec, }, }, } return dep }
66
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package test import ( "context" "log" "os" "strings" "github.com/samber/lo" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/version" "k8s.io/client-go/kubernetes" "knative.dev/pkg/system" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" "github.com/aws/karpenter-core/pkg/utils/env" "github.com/aws/karpenter-core/pkg/utils/functional" ) type Environment struct { envtest.Environment Client client.Client KubernetesInterface kubernetes.Interface Version *version.Version Done chan struct{} Cancel context.CancelFunc } type EnvironmentOptions struct { crds []*v1.CustomResourceDefinition fieldIndexers []func(cache.Cache) error } // WithCRDs registers the specified CRDs to the apiserver for use in testing func WithCRDs(crds ...*v1.CustomResourceDefinition) functional.Option[EnvironmentOptions] { return func(o EnvironmentOptions) EnvironmentOptions { o.crds = append(o.crds, crds...) return o } } // WithFieldIndexers expects a function that indexes fields against the cache such as cache.IndexField(...) func WithFieldIndexers(fieldIndexers ...func(cache.Cache) error) functional.Option[EnvironmentOptions] { return func(o EnvironmentOptions) EnvironmentOptions { o.fieldIndexers = append(o.fieldIndexers, fieldIndexers...) return o } } func NewEnvironment(scheme *runtime.Scheme, options ...functional.Option[EnvironmentOptions]) *Environment { opts := functional.ResolveOptions(options...) ctx, cancel := context.WithCancel(context.Background()) os.Setenv(system.NamespaceEnvKey, "default") version := version.MustParseSemantic(strings.Replace(env.WithDefaultString("K8S_VERSION", "1.24.x"), ".x", ".0", -1)) environment := envtest.Environment{Scheme: scheme, CRDs: opts.crds} if version.Minor() >= 21 { // PodAffinityNamespaceSelector is used for label selectors in pod affinities. If the feature-gate is turned off, // the api-server just clears out the label selector so we never see it. If we turn it on, the label selectors // are passed to us and we handle them. This feature is alpha in v1.21, beta in v1.22 and will be GA in 1.24. See // https://github.com/kubernetes/enhancements/issues/2249 for more info. environment.ControlPlane.GetAPIServer().Configure().Set("feature-gates", "PodAffinityNamespaceSelector=true") } if version.Minor() >= 24 { // MinDomainsInPodTopologySpread enforces a minimum number of eligible node domains for pod scheduling // See https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/#spread-constraint-definition // Ref: https://github.com/aws/karpenter-core/pull/330 environment.ControlPlane.GetAPIServer().Configure().Set("feature-gates", "MinDomainsInPodTopologySpread=true") } _ = lo.Must(environment.Start()) c := lo.Must(client.New(environment.Config, client.Options{Scheme: scheme})) // We use a modified client if we need field indexers if len(opts.fieldIndexers) > 0 { cache := lo.Must(cache.New(environment.Config, cache.Options{Scheme: scheme})) for _, index := range opts.fieldIndexers { lo.Must0(index(cache)) } lo.Must0(cache.IndexField(ctx, &corev1.Pod{}, "spec.nodeName", func(o client.Object) []string { pod := o.(*corev1.Pod) return []string{pod.Spec.NodeName} })) c = &CacheSyncingClient{ Client: lo.Must(client.NewDelegatingClient(client.NewDelegatingClientInput{ CacheReader: cache, Client: c, })), } go func() { lo.Must0(cache.Start(ctx)) }() if !cache.WaitForCacheSync(ctx) { log.Fatalf("cache failed to sync") } } return &Environment{ Environment: environment, Client: c, KubernetesInterface: kubernetes.NewForConfigOrDie(environment.Config), Version: version, Done: make(chan struct{}), Cancel: cancel, } } func (e *Environment) Stop() error { close(e.Done) e.Cancel() return e.Environment.Stop() }
131
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package test import ( "sync" "github.com/samber/lo" "github.com/aws/karpenter-core/pkg/events" ) var _ events.Recorder = (*EventRecorder)(nil) // EventRecorder is a mock event recorder that is used to facilitate testing. type EventRecorder struct { mu sync.RWMutex calls map[string]int events []events.Event } func NewEventRecorder() *EventRecorder { return &EventRecorder{ calls: map[string]int{}, } } func (e *EventRecorder) Publish(evts ...events.Event) { e.mu.Lock() defer e.mu.Unlock() e.events = append(e.events, evts...) for _, evt := range evts { e.calls[evt.Reason]++ } } func (e *EventRecorder) Calls(reason string) int { e.mu.RLock() defer e.mu.RUnlock() return e.calls[reason] } func (e *EventRecorder) Reset() { e.mu.Lock() defer e.mu.Unlock() e.events = nil e.calls = map[string]int{} } func (e *EventRecorder) Events() (res []events.Event) { e.mu.RLock() defer e.mu.RUnlock() for _, evt := range e.events { res = append(res, events.Event{ InvolvedObject: evt.InvolvedObject, Type: evt.Type, Reason: evt.Reason, Message: evt.Message, DedupeValues: lo.Map(evt.DedupeValues, func(v string, _ int) string { return v }), DedupeTimeout: evt.DedupeTimeout, RateLimiter: evt.RateLimiter, }) } return res } func (e *EventRecorder) ForEachEvent(f func(evt events.Event)) { e.mu.RLock() defer e.mu.RUnlock() for _, e := range e.events { f(e) } } func (e *EventRecorder) DetectedEvent(msg string) bool { foundEvent := false e.ForEachEvent(func(evt events.Event) { if evt.Message == msg { foundEvent = true } }) return foundEvent }
97
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package test import ( "fmt" "github.com/imdario/mergo" v1 "k8s.io/api/core/v1" "github.com/aws/karpenter-core/pkg/apis/v1alpha5" ) // Machine creates a test machine with defaults that can be overridden by MachineOptions. // Overrides are applied in order, with a last write wins semantic. func Machine(overrides ...v1alpha5.Machine) *v1alpha5.Machine { override := v1alpha5.Machine{} for _, opts := range overrides { if err := mergo.Merge(&override, opts, mergo.WithOverride); err != nil { panic(fmt.Sprintf("failed to merge: %v", err)) } } if override.Name == "" { override.Name = RandomName() } if override.Status.ProviderID == "" { override.Status.ProviderID = RandomProviderID() } return &v1alpha5.Machine{ ObjectMeta: ObjectMeta(override.ObjectMeta), Spec: override.Spec, Status: override.Status, } } func MachineAndNode(overrides ...v1alpha5.Machine) (*v1alpha5.Machine, *v1.Node) { m := Machine(overrides...) return m, MachineLinkedNode(m) } // MachinesAndNodes creates homogeneous groups of machines and nodes based on the passed in options, evenly divided by the total machines requested func MachinesAndNodes(total int, options ...v1alpha5.Machine) ([]*v1alpha5.Machine, []*v1.Node) { machines := make([]*v1alpha5.Machine, total) nodes := make([]*v1.Node, total) for _, opts := range options { for i := 0; i < total/len(options); i++ { machine, node := MachineAndNode(opts) machines[i] = machine nodes[i] = node } } return machines, nodes }
66
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package test import ( "fmt" "math/rand" "strings" "sync" "time" "github.com/Pallinder/go-randomdata" "github.com/imdario/mergo" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/aws/karpenter-core/pkg/apis/v1alpha5" ) const DiscoveryLabel = v1alpha5.TestingGroup + "/test-id" var ( sequentialNumber = 0 randomizer = rand.New(rand.NewSource(time.Now().UnixNano())) //nolint sequentialNumberLock = new(sync.Mutex) ) func RandomName() string { sequentialNumberLock.Lock() defer sequentialNumberLock.Unlock() sequentialNumber++ return strings.ToLower(fmt.Sprintf("%s-%d-%s", randomdata.SillyName(), sequentialNumber, randomdata.Alphanumeric(10))) } func NamespacedObjectMeta(overrides ...metav1.ObjectMeta) metav1.ObjectMeta { return MustMerge(ObjectMeta(metav1.ObjectMeta{ Namespace: "default", }), overrides...) } func ObjectMeta(overrides ...metav1.ObjectMeta) metav1.ObjectMeta { return MustMerge(metav1.ObjectMeta{ Name: RandomName(), Labels: map[string]string{DiscoveryLabel: "unspecified"}, // For cleanup discovery }, overrides...) } func MustMerge[T interface{}](dest T, srcs ...T) T { for _, src := range srcs { if err := mergo.Merge(&dest, src, mergo.WithOverride); err != nil { panic(fmt.Sprintf("Failed to merge object: %s", err)) } } return dest } func RandomProviderID() string { return ProviderID(randomdata.Alphanumeric(17)) } func ProviderID(base string) string { return fmt.Sprintf("fake:///%s", base) }
75
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package test import ( "fmt" "github.com/imdario/mergo" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // PodOptions customizes a Pod. type NamespaceOptions struct { metav1.ObjectMeta } // Namespace creates a Namespace. func Namespace(overrides ...NamespaceOptions) *corev1.Namespace { options := NamespaceOptions{} for _, opts := range overrides { if err := mergo.Merge(&options, opts, mergo.WithOverride); err != nil { panic(fmt.Sprintf("Failed to merge namespace options: %s", err)) } } return &corev1.Namespace{ ObjectMeta: ObjectMeta(options.ObjectMeta), } }
42
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package test import ( "fmt" "github.com/imdario/mergo" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/aws/karpenter-core/pkg/apis/v1alpha5" ) type NodeOptions struct { metav1.ObjectMeta ReadyStatus v1.ConditionStatus ReadyReason string Conditions []v1.NodeCondition Unschedulable bool ProviderID string Taints []v1.Taint Allocatable v1.ResourceList Capacity v1.ResourceList } func Node(overrides ...NodeOptions) *v1.Node { options := NodeOptions{} for _, opts := range overrides { if err := mergo.Merge(&options, opts, mergo.WithOverride); err != nil { panic(fmt.Sprintf("Failed to merge node options: %s", err)) } } if options.ReadyStatus == "" { options.ReadyStatus = v1.ConditionTrue } if options.Capacity == nil { options.Capacity = options.Allocatable } return &v1.Node{ ObjectMeta: ObjectMeta(options.ObjectMeta), Spec: v1.NodeSpec{ Unschedulable: options.Unschedulable, Taints: options.Taints, ProviderID: options.ProviderID, }, Status: v1.NodeStatus{ Allocatable: options.Allocatable, Capacity: options.Capacity, Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: options.ReadyStatus, Reason: options.ReadyReason}}, }, } } func MachineLinkedNode(machine *v1alpha5.Machine) *v1.Node { return Node( NodeOptions{ ObjectMeta: metav1.ObjectMeta{ Labels: machine.Labels, Annotations: machine.Annotations, Finalizers: machine.Finalizers, }, Taints: append(machine.Spec.Taints, machine.Spec.StartupTaints...), Capacity: machine.Status.Capacity, Allocatable: machine.Status.Allocatable, ProviderID: machine.Status.ProviderID, }, ) }
83
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package test import ( "fmt" "github.com/imdario/mergo" v1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) // PodOptions customizes a Pod. type PodOptions struct { metav1.ObjectMeta Image string InitImage string NodeName string PriorityClassName string InitResourceRequirements v1.ResourceRequirements ResourceRequirements v1.ResourceRequirements NodeSelector map[string]string NodeRequirements []v1.NodeSelectorRequirement NodePreferences []v1.NodeSelectorRequirement PodRequirements []v1.PodAffinityTerm PodPreferences []v1.WeightedPodAffinityTerm PodAntiRequirements []v1.PodAffinityTerm PodAntiPreferences []v1.WeightedPodAffinityTerm TopologySpreadConstraints []v1.TopologySpreadConstraint Tolerations []v1.Toleration PersistentVolumeClaims []string Conditions []v1.PodCondition Phase v1.PodPhase RestartPolicy v1.RestartPolicy TerminationGracePeriodSeconds *int64 } type PDBOptions struct { metav1.ObjectMeta Labels map[string]string MinAvailable *intstr.IntOrString MaxUnavailable *intstr.IntOrString Status *policyv1.PodDisruptionBudgetStatus } // Pod creates a test pod with defaults that can be overridden by PodOptions. // Overrides are applied in order, with a last write wins semantic. func Pod(overrides ...PodOptions) *v1.Pod { options := PodOptions{} for _, opts := range overrides { if err := mergo.Merge(&options, opts, mergo.WithOverride); err != nil { panic(fmt.Sprintf("Failed to merge pod options: %s", err)) } } if options.Image == "" { options.Image = "public.ecr.aws/eks-distro/kubernetes/pause:3.2" } var volumes []v1.Volume for _, pvc := range options.PersistentVolumeClaims { volumes = append(volumes, v1.Volume{ Name: RandomName(), VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvc}}, }) } p := &v1.Pod{ ObjectMeta: NamespacedObjectMeta(options.ObjectMeta), Spec: v1.PodSpec{ NodeSelector: options.NodeSelector, Affinity: buildAffinity(options), TopologySpreadConstraints: options.TopologySpreadConstraints, Tolerations: options.Tolerations, Containers: []v1.Container{{ Name: RandomName(), Image: options.Image, Resources: options.ResourceRequirements, }}, NodeName: options.NodeName, Volumes: volumes, PriorityClassName: options.PriorityClassName, RestartPolicy: options.RestartPolicy, TerminationGracePeriodSeconds: options.TerminationGracePeriodSeconds, }, Status: v1.PodStatus{ Conditions: options.Conditions, Phase: options.Phase, }, } if options.InitImage != "" { p.Spec.InitContainers = []v1.Container{{ Name: RandomName(), Image: options.InitImage, Resources: options.InitResourceRequirements, }} } return p } // Pods creates homogeneous groups of pods based on the passed in options, evenly divided by the total pods requested func Pods(total int, options ...PodOptions) []*v1.Pod { pods := []*v1.Pod{} for _, opts := range options { for i := 0; i < total/len(options); i++ { pods = append(pods, Pod(opts)) } } return pods } func UnscheduleablePodOptions(overrides ...PodOptions) PodOptions { options := PodOptions{Conditions: []v1.PodCondition{{Type: v1.PodScheduled, Reason: v1.PodReasonUnschedulable, Status: v1.ConditionFalse}}} for _, opts := range overrides { if err := mergo.Merge(&options, opts, mergo.WithOverride); err != nil { panic(fmt.Sprintf("Failed to merge pod options: %s", err)) } } return options } // UnschedulablePod creates a test pod with a pending scheduling status condition func UnschedulablePod(options ...PodOptions) *v1.Pod { return Pod(append(options, PodOptions{ Conditions: []v1.PodCondition{{Type: v1.PodScheduled, Reason: v1.PodReasonUnschedulable, Status: v1.ConditionFalse}}, })...) } // UnschedulablePods returns slice of configurable length of identical test pods with a pending scheduling status condition func UnschedulablePods(options PodOptions, num int) []*v1.Pod { var pods []*v1.Pod for i := 0; i < num; i++ { pods = append(pods, UnschedulablePod(options)) } return pods } // PodDisruptionBudget creates a PodDisruptionBudget. To function properly, it should have its status applied func PodDisruptionBudget(overrides ...PDBOptions) *policyv1.PodDisruptionBudget { options := PDBOptions{} for _, opts := range overrides { if err := mergo.Merge(&options, opts, mergo.WithOverride); err != nil { panic(fmt.Sprintf("Failed to merge pdb options: %s", err)) } } status := policyv1.PodDisruptionBudgetStatus{ // To be considered for application by eviction, the Status.ObservedGeneration must be >= the PDB generation. // kube-controller-manager normally sets ObservedGeneration, but we don't have one when running under // EnvTest. If this isn't modified the eviction controller assumes that the PDB hasn't been processed // by the disruption controller yet and adds a 10 second retry to our evict() call ObservedGeneration: 1, } if options.Status != nil { status = *options.Status } return &policyv1.PodDisruptionBudget{ ObjectMeta: NamespacedObjectMeta(options.ObjectMeta), Spec: policyv1.PodDisruptionBudgetSpec{ MinAvailable: options.MinAvailable, Selector: &metav1.LabelSelector{ MatchLabels: options.Labels, }, MaxUnavailable: options.MaxUnavailable, }, Status: status, } } func buildAffinity(options PodOptions) *v1.Affinity { affinity := &v1.Affinity{} if nodeAffinity := buildNodeAffinity(options.NodeRequirements, options.NodePreferences); nodeAffinity != nil { affinity.NodeAffinity = nodeAffinity } if podAffinity := buildPodAffinity(options.PodRequirements, options.PodPreferences); podAffinity != nil { affinity.PodAffinity = podAffinity } if podAntiAffinity := buildPodAntiAffinity(options.PodAntiRequirements, options.PodAntiPreferences); podAntiAffinity != nil { affinity.PodAntiAffinity = podAntiAffinity } if affinity.NodeAffinity == nil && affinity.PodAffinity == nil && affinity.PodAntiAffinity == nil { return nil } return affinity } func buildPodAffinity(podRequirements []v1.PodAffinityTerm, podPreferences []v1.WeightedPodAffinityTerm) *v1.PodAffinity { var podAffinity *v1.PodAffinity if podRequirements == nil && podPreferences == nil { return podAffinity } podAffinity = &v1.PodAffinity{} if podRequirements != nil { podAffinity.RequiredDuringSchedulingIgnoredDuringExecution = podRequirements } if podPreferences != nil { podAffinity.PreferredDuringSchedulingIgnoredDuringExecution = podPreferences } return podAffinity } func buildPodAntiAffinity(podAntiRequirements []v1.PodAffinityTerm, podAntiPreferences []v1.WeightedPodAffinityTerm) *v1.PodAntiAffinity { var podAntiAffinity *v1.PodAntiAffinity if podAntiRequirements == nil && podAntiPreferences == nil { return podAntiAffinity } podAntiAffinity = &v1.PodAntiAffinity{} if podAntiRequirements != nil { podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = podAntiRequirements } if podAntiPreferences != nil { podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = podAntiPreferences } return podAntiAffinity } func buildNodeAffinity(nodeRequirements []v1.NodeSelectorRequirement, nodePreferences []v1.NodeSelectorRequirement) *v1.NodeAffinity { var nodeAffinity *v1.NodeAffinity if nodeRequirements == nil && nodePreferences == nil { return nodeAffinity } nodeAffinity = &v1.NodeAffinity{} if nodeRequirements != nil { nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{{MatchExpressions: nodeRequirements}}, } } if nodePreferences != nil { nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []v1.PreferredSchedulingTerm{ {Weight: 1, Preference: v1.NodeSelectorTerm{MatchExpressions: nodePreferences}}, } } return nodeAffinity }
250
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package test import ( "encoding/json" "fmt" "github.com/imdario/mergo" "github.com/samber/lo" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "github.com/aws/karpenter-core/pkg/apis/v1alpha5" ) // ProvisionerOptions customizes a Provisioner. type ProvisionerOptions struct { metav1.ObjectMeta Limits v1.ResourceList Provider interface{} ProviderRef *v1alpha5.MachineTemplateRef Kubelet *v1alpha5.KubeletConfiguration Annotations map[string]string Labels map[string]string Taints []v1.Taint StartupTaints []v1.Taint Requirements []v1.NodeSelectorRequirement Status v1alpha5.ProvisionerStatus TTLSecondsUntilExpired *int64 Weight *int32 TTLSecondsAfterEmpty *int64 Consolidation *v1alpha5.Consolidation } // Provisioner creates a test provisioner with defaults that can be overridden by ProvisionerOptions. // Overrides are applied in order, with a last write wins semantic. func Provisioner(overrides ...ProvisionerOptions) *v1alpha5.Provisioner { options := ProvisionerOptions{} for _, opts := range overrides { if err := mergo.Merge(&options, opts, mergo.WithOverride); err != nil { panic(fmt.Sprintf("Failed to merge provisioner options: %s", err)) } } if options.Name == "" { options.Name = RandomName() } if options.Limits == nil { options.Limits = v1.ResourceList{v1.ResourceCPU: resource.MustParse("2000")} } raw := &runtime.RawExtension{} lo.Must0(raw.UnmarshalJSON(lo.Must(json.Marshal(options.Provider)))) provisioner := &v1alpha5.Provisioner{ ObjectMeta: ObjectMeta(options.ObjectMeta), Spec: v1alpha5.ProvisionerSpec{ Requirements: options.Requirements, KubeletConfiguration: options.Kubelet, ProviderRef: options.ProviderRef, Taints: options.Taints, StartupTaints: options.StartupTaints, Annotations: options.Annotations, Labels: lo.Assign(options.Labels, map[string]string{DiscoveryLabel: "unspecified"}), // For node cleanup discovery Limits: &v1alpha5.Limits{Resources: options.Limits}, TTLSecondsAfterEmpty: options.TTLSecondsAfterEmpty, TTLSecondsUntilExpired: options.TTLSecondsUntilExpired, Weight: options.Weight, Consolidation: options.Consolidation, Provider: raw, }, Status: options.Status, } if options.ProviderRef == nil { if options.Provider == nil { options.Provider = struct{}{} } provider, err := json.Marshal(options.Provider) if err != nil { panic(err.Error()) } provisioner.Spec.Provider = &runtime.RawExtension{Raw: provider} } return provisioner }
100
karpenter-core
aws
Go
//go:build random_test_delay /* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package test import ( "math/rand" "sync" "time" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/onsi/gomega/types" ) // If the random_test_delay build flag is used, every Expect() call gets an additional random delay added to it. This // is intended to attempt to make tests more robust by eliminating tests that depend on timing. func init() { gomega.Default = &gomegaWrapper{ inner: gomega.Default, r: rand.New(rand.NewSource(ginkgo.GinkgoRandomSeed())), } } type gomegaWrapper struct { inner gomega.Gomega mu sync.Mutex r *rand.Rand } func (g *gomegaWrapper) randomDelay() { g.mu.Lock() delay := time.Duration(g.r.Intn(5)) * time.Millisecond g.mu.Unlock() time.Sleep(delay) } func (g *gomegaWrapper) Ω(actual interface{}, extra ...interface{}) types.Assertion { g.randomDelay() return g.inner.Ω(actual, extra...) } func (g *gomegaWrapper) Expect(actual interface{}, extra ...interface{}) types.Assertion { g.randomDelay() return g.inner.Expect(actual, extra...) } func (g *gomegaWrapper) ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) types.Assertion { g.randomDelay() return g.inner.ExpectWithOffset(offset, actual, extra...) } func (g *gomegaWrapper) Eventually(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { g.randomDelay() return g.inner.Eventually(actualOrCtx, args...) } func (g *gomegaWrapper) EventuallyWithOffset(offset int, actual interface{}, args ...interface{}) types.AsyncAssertion { g.randomDelay() return g.inner.EventuallyWithOffset(offset, actual, args...) } func (g *gomegaWrapper) Consistently(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { g.randomDelay() return g.inner.Consistently(actualOrCtx, args...) } func (g *gomegaWrapper) ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { g.randomDelay() return g.inner.ConsistentlyWithOffset(offset, actualOrCtx, args...) } func (g *gomegaWrapper) SetDefaultEventuallyTimeout(duration time.Duration) { g.inner.SetDefaultEventuallyTimeout(duration) } func (g *gomegaWrapper) SetDefaultEventuallyPollingInterval(duration time.Duration) { g.inner.SetDefaultEventuallyPollingInterval(duration) } func (g *gomegaWrapper) SetDefaultConsistentlyDuration(duration time.Duration) { g.inner.SetDefaultConsistentlyDuration(duration) } func (g *gomegaWrapper) SetDefaultConsistentlyPollingInterval(duration time.Duration) { g.inner.SetDefaultConsistentlyPollingInterval(duration) } func (g *gomegaWrapper) Inner() gomega.Gomega { return g.inner }
105
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package test import ( "fmt" "strings" "github.com/Pallinder/go-randomdata" "github.com/imdario/mergo" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // ReplicaSetOptions customizes a ReplicaSet. type ReplicaSetOptions struct { metav1.ObjectMeta Selector map[string]string PodOptions PodOptions } // ReplicaSet creates a test ReplicaSet with defaults that can be overridden by ReplicaSetOptions. // Overrides are applied in order, with a last write wins semantic. func ReplicaSet(overrides ...ReplicaSetOptions) *appsv1.ReplicaSet { options := ReplicaSetOptions{} for _, opts := range overrides { if err := mergo.Merge(&options, opts, mergo.WithOverride); err != nil { panic(fmt.Sprintf("Failed to merge pod options: %s", err)) } } if options.Name == "" { options.Name = strings.ToLower(randomdata.SillyName()) } if options.Namespace == "" { options.Namespace = "default" } if options.Selector == nil { options.Selector = map[string]string{"app": options.Name} } return &appsv1.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{Name: options.Name, Namespace: options.Namespace}, Spec: appsv1.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: options.Selector}, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{Labels: options.Selector}, Spec: Pod(options.PodOptions).Spec, }, }, } }
65
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package test import ( "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/imdario/mergo" "github.com/aws/karpenter-core/pkg/apis/settings" ) func Settings(overrides ...settings.Settings) *settings.Settings { options := settings.Settings{} for _, opts := range overrides { if err := mergo.Merge(&options, opts, mergo.WithOverride); err != nil { panic(fmt.Sprintf("Failed to merge pod options: %s", err)) } } if options.BatchMaxDuration == nil { options.BatchMaxDuration = &metav1.Duration{} } if options.BatchIdleDuration == nil { options.BatchIdleDuration = &metav1.Duration{} } return &settings.Settings{ BatchMaxDuration: options.BatchMaxDuration, BatchIdleDuration: options.BatchIdleDuration, DriftEnabled: options.DriftEnabled, } }
46
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package test import ( "fmt" "github.com/imdario/mergo" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/ptr" ) type PersistentVolumeOptions struct { metav1.ObjectMeta Zones []string StorageClassName string Driver string } func PersistentVolume(overrides ...PersistentVolumeOptions) *v1.PersistentVolume { options := PersistentVolumeOptions{} for _, opts := range overrides { if err := mergo.Merge(&options, opts, mergo.WithOverride); err != nil { panic(fmt.Sprintf("Failed to merge options: %s", err)) } } if options.Driver == "" { options.Driver = "test.driver" } var nodeAffinity *v1.VolumeNodeAffinity if len(options.Zones) != 0 { nodeAffinity = &v1.VolumeNodeAffinity{Required: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{{MatchExpressions: []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: options.Zones}, }}}}} } return &v1.PersistentVolume{ ObjectMeta: NamespacedObjectMeta(metav1.ObjectMeta{}), Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{CSI: &v1.CSIPersistentVolumeSource{Driver: options.Driver, VolumeHandle: "test-handle"}}, StorageClassName: options.StorageClassName, AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, Capacity: v1.ResourceList{v1.ResourceStorage: resource.MustParse("100Gi")}, NodeAffinity: nodeAffinity, }, } } type PersistentVolumeClaimOptions struct { metav1.ObjectMeta StorageClassName *string VolumeName string Resources v1.ResourceRequirements } func PersistentVolumeClaim(overrides ...PersistentVolumeClaimOptions) *v1.PersistentVolumeClaim { options := PersistentVolumeClaimOptions{} for _, opts := range overrides { if err := mergo.Merge(&options, opts, mergo.WithOverride); err != nil { panic(fmt.Sprintf("Failed to merge options: %s", err)) } } if len(options.Resources.Requests) == 0 { options.Resources = v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Gi")}} } return &v1.PersistentVolumeClaim{ ObjectMeta: NamespacedObjectMeta(options.ObjectMeta), Spec: v1.PersistentVolumeClaimSpec{ StorageClassName: options.StorageClassName, VolumeName: options.VolumeName, AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, Resources: options.Resources, }, } } type StorageClassOptions struct { metav1.ObjectMeta Zones []string Provisioner *string VolumeBindingMode *storagev1.VolumeBindingMode } func StorageClass(overrides ...StorageClassOptions) *storagev1.StorageClass { options := StorageClassOptions{} for _, opts := range overrides { if err := mergo.Merge(&options, opts, mergo.WithOverride); err != nil { panic(fmt.Sprintf("Failed to merge options: %s", err)) } } var allowedTopologies []v1.TopologySelectorTerm if options.Zones != nil { allowedTopologies = []v1.TopologySelectorTerm{{MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{{Key: v1.LabelTopologyZone, Values: options.Zones}}}} } if options.Provisioner == nil { options.Provisioner = ptr.String("test-provisioner") } return &storagev1.StorageClass{ ObjectMeta: ObjectMeta(options.ObjectMeta), Provisioner: *options.Provisioner, AllowedTopologies: allowedTopologies, VolumeBindingMode: options.VolumeBindingMode, } }
123
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ //nolint:revive package expectations import ( "context" "fmt" "log" "reflect" "sync" "time" . "github.com/onsi/ginkgo/v2" //nolint:revive,stylecheck . "github.com/onsi/gomega" //nolint:revive,stylecheck prometheus "github.com/prometheus/client_model/go" "github.com/samber/lo" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "knative.dev/pkg/apis" "knative.dev/pkg/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" crmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/cloudprovider" "github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle" "github.com/aws/karpenter-core/pkg/controllers/provisioning" "github.com/aws/karpenter-core/pkg/controllers/provisioning/scheduling" "github.com/aws/karpenter-core/pkg/controllers/state" "github.com/aws/karpenter-core/pkg/metrics" "github.com/aws/karpenter-core/pkg/operator/scheme" pscheduling "github.com/aws/karpenter-core/pkg/scheduling" "github.com/aws/karpenter-core/pkg/test" ) const ( ReconcilerPropagationTime = 10 * time.Second RequestInterval = 1 * time.Second ) type Bindings map[*v1.Pod]*Binding type Binding struct { Machine *v1alpha5.Machine Node *v1.Node } func (b Bindings) Get(p *v1.Pod) *Binding { for k, v := range b { if client.ObjectKeyFromObject(k) == client.ObjectKeyFromObject(p) { return v } } return nil } func ExpectExists[T client.Object](ctx context.Context, c client.Client, obj T) T { return ExpectExistsWithOffset(1, ctx, c, obj) } func ExpectExistsWithOffset[T client.Object](offset int, ctx context.Context, c client.Client, obj T) T { resp := reflect.New(reflect.TypeOf(*new(T)).Elem()).Interface().(T) ExpectWithOffset(offset+1, c.Get(ctx, client.ObjectKeyFromObject(obj), resp)).To(Succeed()) return resp } func ExpectPodExists(ctx context.Context, c client.Client, name string, namespace string) *v1.Pod { return ExpectPodExistsWithOffset(1, ctx, c, name, namespace) } func ExpectPodExistsWithOffset(offset int, ctx context.Context, c client.Client, name string, namespace string) *v1.Pod { return ExpectExistsWithOffset(offset+1, ctx, c, &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}}) } func ExpectNodeExists(ctx context.Context, c client.Client, name string) *v1.Node { return ExpectNodeExistsWithOffset(1, ctx, c, name) } func ExpectNodeExistsWithOffset(offset int, ctx context.Context, c client.Client, name string) *v1.Node { return ExpectExistsWithOffset(offset+1, ctx, c, &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name}}) } func ExpectNotFound(ctx context.Context, c client.Client, objects ...client.Object) { ExpectNotFoundWithOffset(1, ctx, c, objects...) } func ExpectNotFoundWithOffset(offset int, ctx context.Context, c client.Client, objects ...client.Object) { for _, object := range objects { EventuallyWithOffset(offset+1, func() bool { return errors.IsNotFound(c.Get(ctx, types.NamespacedName{Name: object.GetName(), Namespace: object.GetNamespace()}, object)) }, ReconcilerPropagationTime, RequestInterval).Should(BeTrue(), func() string { return fmt.Sprintf("expected %s/%s to be deleted, but it still exists", lo.Must(apiutil.GVKForObject(object, scheme.Scheme)), client.ObjectKeyFromObject(object)) }) } } func ExpectScheduled(ctx context.Context, c client.Client, pod *v1.Pod) *v1.Node { p := ExpectPodExistsWithOffset(1, ctx, c, pod.Name, pod.Namespace) ExpectWithOffset(1, p.Spec.NodeName).ToNot(BeEmpty(), fmt.Sprintf("expected %s/%s to be scheduled", pod.Namespace, pod.Name)) return ExpectNodeExistsWithOffset(1, ctx, c, p.Spec.NodeName) } func ExpectNotScheduled(ctx context.Context, c client.Client, pod *v1.Pod) *v1.Pod { p := ExpectPodExistsWithOffset(1, ctx, c, pod.Name, pod.Namespace) EventuallyWithOffset(1, p.Spec.NodeName).Should(BeEmpty(), fmt.Sprintf("expected %s/%s to not be scheduled", pod.Namespace, pod.Name)) return p } func ExpectApplied(ctx context.Context, c client.Client, objects ...client.Object) { ExpectAppliedWithOffset(1, ctx, c, objects...) } func ExpectAppliedWithOffset(offset int, ctx context.Context, c client.Client, objects ...client.Object) { for _, object := range objects { deletionTimestampSet := !object.GetDeletionTimestamp().IsZero() current := object.DeepCopyObject().(client.Object) statuscopy := object.DeepCopyObject().(client.Object) // Snapshot the status, since create/update may override // Create or Update if err := c.Get(ctx, client.ObjectKeyFromObject(current), current); err != nil { if errors.IsNotFound(err) { ExpectWithOffset(offset+1, c.Create(ctx, object)).To(Succeed()) } else { ExpectWithOffset(offset+1, err).ToNot(HaveOccurred()) } } else { object.SetResourceVersion(current.GetResourceVersion()) ExpectWithOffset(offset+1, c.Update(ctx, object)).To(Succeed()) } // Update status statuscopy.SetResourceVersion(object.GetResourceVersion()) ExpectWithOffset(offset+1, c.Status().Update(ctx, statuscopy)).To(Or(Succeed(), MatchError("the server could not find the requested resource"))) // Some objects do not have a status // Re-get the object to grab the updated spec and status Expect(c.Get(ctx, client.ObjectKeyFromObject(object), object)).To(Succeed()) // Set the deletion timestamp by adding a finalizer and deleting if deletionTimestampSet { ExpectDeletionTimestampSetWithOffset(1, ctx, c, object) } } } func ExpectDeleted(ctx context.Context, c client.Client, objects ...client.Object) { for _, object := range objects { if err := c.Delete(ctx, object, &client.DeleteOptions{GracePeriodSeconds: ptr.Int64(0)}); !errors.IsNotFound(err) { ExpectWithOffset(1, err).To(BeNil()) } ExpectNotFoundWithOffset(1, ctx, c, object) } } func ExpectDeletionTimestampSet(ctx context.Context, c client.Client, objects ...client.Object) { ExpectDeletionTimestampSetWithOffset(1, ctx, c, objects...) } // ExpectDeletionTimestampSetWithOffset ensures that the deletion timestamp is set on the objects by adding a finalizer // and then deleting the object immediately after. This holds the object until the finalizer is patched out in the DeferCleanup func ExpectDeletionTimestampSetWithOffset(offset int, ctx context.Context, c client.Client, objects ...client.Object) { for _, object := range objects { ExpectWithOffset(offset+1, c.Get(ctx, client.ObjectKeyFromObject(object), object)).To(Succeed()) controllerutil.AddFinalizer(object, v1alpha5.TestingGroup+"/finalizer") ExpectWithOffset(offset+1, c.Update(ctx, object)).To(Succeed()) ExpectWithOffset(offset+1, c.Delete(ctx, object)).To(Succeed()) DeferCleanup(func(obj client.Object) { mergeFrom := client.MergeFrom(obj.DeepCopyObject().(client.Object)) obj.SetFinalizers([]string{}) ExpectWithOffset(offset+1, c.Patch(ctx, obj, mergeFrom)).To(Succeed()) }, object) } } func ExpectCleanedUp(ctx context.Context, c client.Client) { wg := sync.WaitGroup{} namespaces := &v1.NamespaceList{} ExpectWithOffset(1, c.List(ctx, namespaces)).To(Succeed()) ExpectFinalizersRemovedFromList(ctx, c, &v1.NodeList{}, &v1alpha5.MachineList{}, &v1.PersistentVolumeClaimList{}) for _, object := range []client.Object{ &v1.Pod{}, &v1.Node{}, &appsv1.DaemonSet{}, &policyv1.PodDisruptionBudget{}, &v1.PersistentVolumeClaim{}, &v1.PersistentVolume{}, &storagev1.StorageClass{}, &v1alpha5.Provisioner{}, &v1alpha5.Machine{}, } { for _, namespace := range namespaces.Items { wg.Add(1) go func(object client.Object, namespace string) { defer wg.Done() defer GinkgoRecover() ExpectWithOffset(1, c.DeleteAllOf(ctx, object, client.InNamespace(namespace), &client.DeleteAllOfOptions{DeleteOptions: client.DeleteOptions{GracePeriodSeconds: ptr.Int64(0)}})).ToNot(HaveOccurred()) }(object, namespace.Name) } } wg.Wait() } func ExpectFinalizersRemovedFromList(ctx context.Context, c client.Client, objectLists ...client.ObjectList) { for _, list := range objectLists { ExpectWithOffset(1, c.List(ctx, list)).To(Succeed()) ExpectWithOffset(1, meta.EachListItem(list, func(o runtime.Object) error { obj := o.(client.Object) stored := obj.DeepCopyObject().(client.Object) obj.SetFinalizers([]string{}) Expect(client.IgnoreNotFound(c.Patch(ctx, obj, client.MergeFrom(stored)))).To(Succeed()) return nil })).To(Succeed()) } } func ExpectFinalizersRemoved(ctx context.Context, c client.Client, objs ...client.Object) { for _, obj := range objs { ExpectWithOffset(1, client.IgnoreNotFound(c.Get(ctx, client.ObjectKeyFromObject(obj), obj))).To(Succeed()) stored := obj.DeepCopyObject().(client.Object) obj.SetFinalizers([]string{}) ExpectWithOffset(1, client.IgnoreNotFound(c.Patch(ctx, obj, client.MergeFrom(stored)))).To(Succeed()) } } func ExpectProvisioned(ctx context.Context, c client.Client, cluster *state.Cluster, cloudProvider cloudprovider.CloudProvider, provisioner *provisioning.Provisioner, pods ...*v1.Pod) Bindings { bindings := ExpectProvisionedNoBindingWithOffset(1, ctx, c, cluster, cloudProvider, provisioner, pods...) podKeys := sets.NewString(lo.Map(pods, func(p *v1.Pod, _ int) string { return client.ObjectKeyFromObject(p).String() })...) for pod, binding := range bindings { // Only bind the pods that are passed through if podKeys.Has(client.ObjectKeyFromObject(pod).String()) { ExpectManualBindingWithOffset(1, ctx, c, pod, binding.Node) ExpectWithOffset(1, cluster.UpdatePod(ctx, pod)).To(Succeed()) // track pod bindings } } return bindings } func ExpectProvisionedNoBinding(ctx context.Context, c client.Client, cluster *state.Cluster, cloudProvider cloudprovider.CloudProvider, provisioner *provisioning.Provisioner, pods ...*v1.Pod) Bindings { return ExpectProvisionedNoBindingWithOffset(1, ctx, c, cluster, cloudProvider, provisioner, pods...) } func ExpectProvisionedNoBindingWithOffset(offset int, ctx context.Context, c client.Client, cluster *state.Cluster, cloudProvider cloudprovider.CloudProvider, provisioner *provisioning.Provisioner, pods ...*v1.Pod) Bindings { // Persist objects for _, pod := range pods { ExpectAppliedWithOffset(offset+1, ctx, c, pod) } // TODO: Check the error on the provisioner scheduling round results, err := provisioner.Schedule(ctx) bindings := Bindings{} if err != nil { log.Printf("error provisioning in test, %s", err) return bindings } for _, m := range results.NewMachines { // TODO: Check the error on the provisioner launch name, err := provisioner.Launch(ctx, m, provisioning.WithReason(metrics.ProvisioningReason)) if err != nil { return bindings } machine := &v1alpha5.Machine{} ExpectWithOffset(offset+1, c.Get(ctx, types.NamespacedName{Name: name}, machine)).To(Succeed()) machine, node := ExpectMachineDeployedWithOffset(offset+1, ctx, c, cluster, cloudProvider, machine) if machine != nil && node != nil { for _, pod := range m.Pods { bindings[pod] = &Binding{ Machine: machine, Node: node, } } } } for _, node := range results.ExistingNodes { for _, pod := range node.Pods { bindings[pod] = &Binding{ Node: node.Node, Machine: node.Machine, } } } return bindings } func ExpectMachineDeployedNoNode(ctx context.Context, c client.Client, cluster *state.Cluster, cloudProvider cloudprovider.CloudProvider, m *v1alpha5.Machine) (*v1alpha5.Machine, error) { return ExpectMachineDeployedNoNodeWithOffset(1, ctx, c, cluster, cloudProvider, m) } func ExpectMachineDeployedNoNodeWithOffset(offset int, ctx context.Context, c client.Client, cluster *state.Cluster, cloudProvider cloudprovider.CloudProvider, m *v1alpha5.Machine) (*v1alpha5.Machine, error) { resolved, err := cloudProvider.Create(ctx, m) // TODO @joinnis: Check this error rather than swallowing it. This is swallowed right now due to how we are doing some testing in the cloudprovider if err != nil { return m, err } ExpectWithOffset(offset+1, err).To(Succeed()) // Make the machine ready in the status conditions lifecycle.PopulateMachineDetails(m, resolved) m.StatusConditions().MarkTrue(v1alpha5.MachineLaunched) ExpectAppliedWithOffset(offset+1, ctx, c, m) cluster.UpdateMachine(m) return m, nil } func ExpectMachineDeployed(ctx context.Context, c client.Client, cluster *state.Cluster, cloudProvider cloudprovider.CloudProvider, machine *v1alpha5.Machine) (*v1alpha5.Machine, *v1.Node) { return ExpectMachineDeployedWithOffset(1, ctx, c, cluster, cloudProvider, machine) } func ExpectMachineDeployedWithOffset(offset int, ctx context.Context, c client.Client, cluster *state.Cluster, cloudProvider cloudprovider.CloudProvider, m *v1alpha5.Machine) (*v1alpha5.Machine, *v1.Node) { m, err := ExpectMachineDeployedNoNodeWithOffset(offset+1, ctx, c, cluster, cloudProvider, m) if err != nil { return m, nil } m.StatusConditions().MarkTrue(v1alpha5.MachineRegistered) // Mock the machine launch and node joining at the apiserver node := test.MachineLinkedNode(m) ExpectAppliedWithOffset(offset+1, ctx, c, m, node) ExpectWithOffset(offset+1, cluster.UpdateNode(ctx, node)).To(Succeed()) cluster.UpdateMachine(m) return m, node } func ExpectMachinesCascadeDeletion(ctx context.Context, c client.Client, machines ...*v1alpha5.Machine) { nodes := ExpectNodesWithOffset(1, ctx, c) for _, machine := range machines { err := c.Get(ctx, client.ObjectKeyFromObject(machine), &v1alpha5.Machine{}) if !errors.IsNotFound(err) { continue } for _, node := range nodes { if node.Spec.ProviderID == machine.Status.ProviderID { Expect(c.Delete(ctx, node)) ExpectFinalizersRemoved(ctx, c, node) ExpectNotFound(ctx, c, node) } } } } func ExpectMakeMachinesInitialized(ctx context.Context, c client.Client, machines ...*v1alpha5.Machine) { ExpectMakeMachinesInitializedWithOffset(1, ctx, c, machines...) } func ExpectMakeMachinesInitializedWithOffset(offset int, ctx context.Context, c client.Client, machines ...*v1alpha5.Machine) { for i := range machines { machines[i] = ExpectExistsWithOffset(offset+1, ctx, c, machines[i]) machines[i].StatusConditions().MarkTrue(v1alpha5.MachineLaunched) machines[i].StatusConditions().MarkTrue(v1alpha5.MachineRegistered) machines[i].StatusConditions().MarkTrue(v1alpha5.MachineInitialized) ExpectAppliedWithOffset(offset+1, ctx, c, machines[i]) } } func ExpectMakeNodesInitialized(ctx context.Context, c client.Client, nodes ...*v1.Node) { ExpectMakeNodesInitializedWithOffset(1, ctx, c, nodes...) } func ExpectMakeNodesInitializedWithOffset(offset int, ctx context.Context, c client.Client, nodes ...*v1.Node) { ExpectMakeNodesReadyWithOffset(offset+1, ctx, c, nodes...) for i := range nodes { nodes[i].Labels[v1alpha5.LabelNodeInitialized] = "true" ExpectAppliedWithOffset(offset+1, ctx, c, nodes[i]) } } func ExpectMakeNodesReady(ctx context.Context, c client.Client, nodes ...*v1.Node) { ExpectMakeNodesReadyWithOffset(1, ctx, c, nodes...) } func ExpectMakeNodesReadyWithOffset(offset int, ctx context.Context, c client.Client, nodes ...*v1.Node) { for i := range nodes { nodes[i] = ExpectExistsWithOffset(offset+1, ctx, c, nodes[i]) nodes[i].Status.Phase = v1.NodeRunning nodes[i].Status.Conditions = []v1.NodeCondition{ { Type: v1.NodeReady, Status: v1.ConditionTrue, LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "KubeletReady", }, } if nodes[i].Labels == nil { nodes[i].Labels = map[string]string{} } // Remove any of the known ephemeral taints to make the Node ready nodes[i].Spec.Taints = lo.Reject(nodes[i].Spec.Taints, func(taint v1.Taint, _ int) bool { _, found := lo.Find(pscheduling.KnownEphemeralTaints, func(t v1.Taint) bool { return t.MatchTaint(&taint) }) return found }) ExpectAppliedWithOffset(offset+1, ctx, c, nodes[i]) } } func ExpectReconcileSucceeded(ctx context.Context, reconciler reconcile.Reconciler, key client.ObjectKey) reconcile.Result { return ExpectReconcileSucceededWithOffset(1, ctx, reconciler, key) } func ExpectReconcileSucceededWithOffset(offset int, ctx context.Context, reconciler reconcile.Reconciler, key client.ObjectKey) reconcile.Result { result, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: key}) ExpectWithOffset(offset+1, err).ToNot(HaveOccurred()) return result } func ExpectReconcileFailed(ctx context.Context, reconciler reconcile.Reconciler, key client.ObjectKey) { _, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: key}) ExpectWithOffset(1, err).To(HaveOccurred()) } func ExpectStatusConditionExists(obj apis.ConditionsAccessor, t apis.ConditionType) apis.Condition { conds := obj.GetConditions() cond, ok := lo.Find(conds, func(c apis.Condition) bool { return c.Type == t }) ExpectWithOffset(1, ok).To(BeTrue()) return cond } func ExpectOwnerReferenceExists(obj, owner client.Object) metav1.OwnerReference { or, found := lo.Find(obj.GetOwnerReferences(), func(o metav1.OwnerReference) bool { return o.UID == owner.GetUID() }) Expect(found).To(BeTrue()) return or } // FindMetricWithLabelValues attempts to find a metric with a name with a set of label values // If no metric is found, the *prometheus.Metric will be nil func FindMetricWithLabelValues(name string, labelValues map[string]string) (*prometheus.Metric, bool) { metrics, err := crmetrics.Registry.Gather() ExpectWithOffset(1, err).To(BeNil()) mf, found := lo.Find(metrics, func(mf *prometheus.MetricFamily) bool { return mf.GetName() == name }) if !found { return nil, false } for _, m := range mf.Metric { temp := lo.Assign(labelValues) for _, labelPair := range m.Label { if v, ok := temp[labelPair.GetName()]; ok && v == labelPair.GetValue() { delete(temp, labelPair.GetName()) } } if len(temp) == 0 { return m, true } } return nil, false } func ExpectManualBinding(ctx context.Context, c client.Client, pod *v1.Pod, node *v1.Node) { ExpectManualBindingWithOffset(1, ctx, c, pod, node) } func ExpectManualBindingWithOffset(offset int, ctx context.Context, c client.Client, pod *v1.Pod, node *v1.Node) { ExpectWithOffset(offset+1, c.Create(ctx, &v1.Binding{ TypeMeta: pod.TypeMeta, ObjectMeta: metav1.ObjectMeta{ Name: pod.ObjectMeta.Name, Namespace: pod.ObjectMeta.Namespace, UID: pod.ObjectMeta.UID, }, Target: v1.ObjectReference{ Name: node.Name, }, })).To(Succeed()) Eventually(func(g Gomega) { g.Expect(c.Get(ctx, client.ObjectKeyFromObject(pod), pod)).To(Succeed()) g.Expect(pod.Spec.NodeName).To(Equal(node.Name)) }).Should(Succeed()) } func ExpectSkew(ctx context.Context, c client.Client, namespace string, constraint *v1.TopologySpreadConstraint) Assertion { nodes := &v1.NodeList{} ExpectWithOffset(1, c.List(ctx, nodes)).To(Succeed()) pods := &v1.PodList{} ExpectWithOffset(1, c.List(ctx, pods, scheduling.TopologyListOptions(namespace, constraint.LabelSelector))).To(Succeed()) skew := map[string]int{} for i, pod := range pods.Items { if scheduling.IgnoredForTopology(&pods.Items[i]) { continue } for _, node := range nodes.Items { if pod.Spec.NodeName == node.Name { switch constraint.TopologyKey { case v1.LabelHostname: skew[node.Name]++ // Check node name since hostname labels aren't applied default: if key, ok := node.Labels[constraint.TopologyKey]; ok { skew[key]++ } } } } } return Expect(skew) } // ExpectResources expects all the resources in expected to exist in real with the same values func ExpectResources(expected, real v1.ResourceList) { for k, v := range expected { realV := real[k] ExpectWithOffset(1, v.Value()).To(BeNumerically("~", realV.Value())) } } func ExpectNodes(ctx context.Context, c client.Client) []*v1.Node { return ExpectNodesWithOffset(1, ctx, c) } func ExpectNodesWithOffset(offset int, ctx context.Context, c client.Client) []*v1.Node { nodeList := &v1.NodeList{} ExpectWithOffset(offset+1, c.List(ctx, nodeList)).To(Succeed()) return lo.Map(nodeList.Items, func(n v1.Node, _ int) *v1.Node { return &n }) } func ExpectMachines(ctx context.Context, c client.Client) []*v1alpha5.Machine { return ExpectMachinesWithOffset(1, ctx, c) } func ExpectMachinesWithOffset(offset int, ctx context.Context, c client.Client) []*v1alpha5.Machine { machineList := &v1alpha5.MachineList{} ExpectWithOffset(offset+1, c.List(ctx, machineList)).To(Succeed()) return lo.Map(machineList.Items, func(m v1alpha5.Machine, _ int) *v1alpha5.Machine { return &m }) }
557
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package atomic import ( "context" "sync" "github.com/samber/lo" "github.com/aws/karpenter-core/pkg/utils/functional" ) type Options struct { ignoreCache bool } func IgnoreCacheOption(o Options) Options { o.ignoreCache = true return o } // Lazy persistently stores a value in memory by evaluating // the Resolve function when the value is accessed type Lazy[T any] struct { value *T mu sync.RWMutex Resolve func(context.Context) (T, error) } // Set assigns the passed value func (c *Lazy[T]) Set(v T) { c.mu.Lock() defer c.mu.Unlock() c.value = &v } // TryGet attempts to get a non-nil value from the internal value. If the internal value is nil, the Resolve function // will attempt to resolve the value, setting the value to be persistently stored if the resolve of Resolve is non-nil. func (c *Lazy[T]) TryGet(ctx context.Context, opts ...functional.Option[Options]) (T, error) { o := functional.ResolveOptions(opts...) c.mu.RLock() if c.value != nil && !o.ignoreCache { ret := *c.value c.mu.RUnlock() return ret, nil } c.mu.RUnlock() c.mu.Lock() defer c.mu.Unlock() // We have to check if the field is set again here in case multiple threads make it past the read-locked section if c.value != nil && !o.ignoreCache { return *c.value, nil } if c.Resolve == nil { return *new(T), nil } ret, err := c.Resolve(ctx) if err != nil { return *new(T), err } c.value = lo.ToPtr(ret) // copies the value so we don't keep the reference return ret, nil }
77
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package atomic import ( "sync" ) // Slice exposes a slice of a type in a race-free manner. type Slice[T any] struct { mu sync.RWMutex values []T } func (a *Slice[T]) Reset() { a.mu.Lock() defer a.mu.Unlock() a.values = nil } func (a *Slice[T]) Add(input T) { a.mu.Lock() defer a.mu.Unlock() a.values = append(a.values, input) } func (a *Slice[T]) Range(f func(pool T) bool) { a.mu.RLock() defer a.mu.RUnlock() for _, v := range a.values { if !f(v) { return } } } func (a *Slice[T]) Set(values []T) { a.mu.Lock() defer a.mu.Unlock() a.values = values } func (a *Slice[T]) Len() int { a.mu.RLock() defer a.mu.RUnlock() return len(a.values) }
60
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package atomic_test import ( "context" "fmt" "sync" "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/aws/karpenter-core/pkg/utils/atomic" ) func TestAtomic(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Atomic") } var _ = Describe("Atomic", func() { It("should resolve a value when set", func() { str := atomic.Lazy[string]{} str.Resolve = func(_ context.Context) (string, error) { return "", nil } str.Set("value") ret, err := str.TryGet(context.Background()) Expect(err).To(Succeed()) Expect(ret).To(Equal("value")) }) It("should resolve a value and set a value when empty", func() { str := atomic.Lazy[string]{} str.Resolve = func(_ context.Context) (string, error) { return "value", nil } ret, err := str.TryGet(context.Background()) Expect(err).To(Succeed()) Expect(ret).To(Equal("value")) }) It("should error out when the fallback function returns an err", func() { str := atomic.Lazy[string]{} str.Resolve = func(_ context.Context) (string, error) { return "value", fmt.Errorf("failed") } ret, err := str.TryGet(context.Background()) Expect(err).ToNot(Succeed()) Expect(ret).To(BeEmpty()) }) It("should ignore the cache when option set", func() { str := atomic.Lazy[string]{} str.Resolve = func(_ context.Context) (string, error) { return "newvalue", nil } str.Set("hasvalue") ret, err := str.TryGet(context.Background(), atomic.IgnoreCacheOption) Expect(err).To(Succeed()) Expect(ret).To(Equal("newvalue")) }) It("shouldn't deadlock on multiple reads", func() { calls := 0 str := atomic.Lazy[string]{} str.Resolve = func(_ context.Context) (string, error) { calls++; return "value", nil } wg := &sync.WaitGroup{} for i := 0; i < 100; i++ { wg.Add(1) go func() { defer wg.Done() defer GinkgoRecover() ret, err := str.TryGet(context.Background()) Expect(err).To(Succeed()) Expect(ret).To(Equal("value")) }() } wg.Wait() Expect(calls).To(Equal(1)) }) It("shouldn't deadlock on multiple writes", func() { calls := 0 str := atomic.Lazy[string]{} str.Resolve = func(_ context.Context) (string, error) { calls++; return "value", nil } wg := &sync.WaitGroup{} for i := 0; i < 100; i++ { wg.Add(1) go func() { defer wg.Done() defer GinkgoRecover() ret, err := str.TryGet(context.Background(), atomic.IgnoreCacheOption) Expect(err).To(Succeed()) Expect(ret).To(Equal("value")) }() } wg.Wait() Expect(calls).To(Equal(100)) }) })
102
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package env import ( "os" "strconv" ) // WithDefaultInt returns the int value of the supplied environment variable or, if not present, // the supplied default value. If the int conversion fails, returns the default func WithDefaultInt(key string, def int) int { val, ok := os.LookupEnv(key) if !ok { return def } i, err := strconv.Atoi(val) if err != nil { return def } return i } // WithDefaultInt64 returns the int value of the supplied environment variable or, if not present, // the supplied default value. If the int conversion fails, returns the default func WithDefaultInt64(key string, def int64) int64 { val, ok := os.LookupEnv(key) if !ok { return def } i, err := strconv.ParseInt(val, 10, 64) if err != nil { return def } return i } // WithDefaultFloat64 returns the float64 value of the supplied environment variable or, if not present, // the supplied default value. If the float64 conversion fails, returns the default func WithDefaultFloat64(key string, def float64) float64 { val, ok := os.LookupEnv(key) if !ok { return def } f, err := strconv.ParseFloat(val, 64) if err != nil { return def } return f } // WithDefaultString returns the string value of the supplied environment variable or, if not present, // the supplied default value. func WithDefaultString(key string, def string) string { val, ok := os.LookupEnv(key) if !ok { return def } return val } // WithDefaultBool returns the boolean value of the supplied environment variable or, if not present, // the supplied default value. func WithDefaultBool(key string, def bool) bool { val, ok := os.LookupEnv(key) if !ok { return def } parsedVal, err := strconv.ParseBool(val) if err != nil { return def } return parsedVal }
87
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package functional import ( "strings" "k8s.io/apimachinery/pkg/util/yaml" ) type Pair[A, B any] struct { First A Second B } type Option[T any] func(T) T func ResolveOptions[T any](opts ...Option[T]) T { o := *new(T) for _, opt := range opts { if opt != nil { o = opt(o) } } return o } // HasAnyPrefix returns true if any of the provided prefixes match the given string s func HasAnyPrefix(s string, prefixes ...string) bool { for _, prefix := range prefixes { if strings.HasPrefix(s, prefix) { return true } } return false } // SplitCommaSeparatedString splits a string by commas, removes whitespace, and returns // a slice of strings func SplitCommaSeparatedString(value string) []string { var result []string for _, value := range strings.Split(value, ",") { result = append(result, strings.TrimSpace(value)) } return result } func Unmarshal[T any](raw []byte) (*T, error) { t := *new(T) if err := yaml.Unmarshal(raw, &t); err != nil { return nil, err } return &t, nil } func FilterMap[K comparable, V any](m map[K]V, f func(K, V) bool) map[K]V { ret := map[K]V{} for k, v := range m { if f(k, v) { ret[k] = v } } return ret }
77
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package functional import ( "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) func TestFunctional(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Functional Suite") } var _ = Describe("Functional", func() { Context("SplitCommaSeparatedString", func() { // No commas in input should produce identical output (single value) Specify("no commas in string", func() { input := "foo" expected := []string{input} Expect(SplitCommaSeparatedString(input)).To(Equal(expected)) }) // Multiple elements in input, no extraneous whitespace Specify("multiple elements without whitespace", func() { expected := []string{"a", "b"} Expect(SplitCommaSeparatedString("a,b")).To(Equal(expected)) }) // Multiple elements in input, lots of extraneous whitespace Specify("multiple elements with whitespace", func() { expected := []string{"a", "b"} Expect(SplitCommaSeparatedString(" a\t ,\n\t b \n\t ")).To(Equal(expected)) }) }) })
49
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package machine import ( "context" "errors" "fmt" "github.com/samber/lo" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/scheduling" ) // EventHandler is a watcher on v1alpha5.Machine that maps Machines to Nodes based on provider ids // and enqueues reconcile.Requests for the Nodes func EventHandler(ctx context.Context, c client.Client) handler.EventHandler { return handler.EnqueueRequestsFromMapFunc(func(o client.Object) []reconcile.Request { machine := o.(*v1alpha5.Machine) nodeList := &v1.NodeList{} if machine.Status.ProviderID == "" { return nil } if err := c.List(ctx, nodeList, client.MatchingFields{"spec.providerID": machine.Status.ProviderID}); err != nil { return nil } return lo.Map(nodeList.Items, func(n v1.Node, _ int) reconcile.Request { return reconcile.Request{ NamespacedName: client.ObjectKeyFromObject(&n), } }) }) } // NodeEventHandler is a watcher on v1.Node that maps Nodes to Machines based on provider ids // and enqueues reconcile.Requests for the Machines func NodeEventHandler(ctx context.Context, c client.Client) handler.EventHandler { return handler.EnqueueRequestsFromMapFunc(func(o client.Object) []reconcile.Request { node := o.(*v1.Node) machineList := &v1alpha5.MachineList{} if err := c.List(ctx, machineList, client.MatchingFields{"status.providerID": node.Spec.ProviderID}); err != nil { return []reconcile.Request{} } return lo.Map(machineList.Items, func(m v1alpha5.Machine, _ int) reconcile.Request { return reconcile.Request{ NamespacedName: client.ObjectKeyFromObject(&m), } }) }) } // NodeNotFoundError is an error returned when no v1.Nodes are found matching the passed providerID type NodeNotFoundError struct { ProviderID string } func (e *NodeNotFoundError) Error() string { return fmt.Sprintf("no nodes found for provider id '%s'", e.ProviderID) } func IsNodeNotFoundError(err error) bool { if err == nil { return false } nnfErr := &NodeNotFoundError{} return errors.As(err, &nnfErr) } func IgnoreNodeNotFoundError(err error) error { if !IsNodeNotFoundError(err) { return err } return nil } // DuplicateNodeError is an error returned when multiple v1.Nodes are found matching the passed providerID type DuplicateNodeError struct { ProviderID string } func (e *DuplicateNodeError) Error() string { return fmt.Sprintf("multiple found for provider id '%s'", e.ProviderID) } func IsDuplicateNodeError(err error) bool { if err == nil { return false } dnErr := &DuplicateNodeError{} return errors.As(err, &dnErr) } func IgnoreDuplicateNodeError(err error) error { if !IsDuplicateNodeError(err) { return err } return nil } // NodeForMachine is a helper function that takes a v1alpha5.Machine and attempts to find the matching v1.Node by its providerID // This function will return errors if: // 1. No v1.Nodes match the v1alpha5.Machine providerID // 2. Multiple v1.Nodes match the v1alpha5.Machine providerID func NodeForMachine(ctx context.Context, c client.Client, machine *v1alpha5.Machine) (*v1.Node, error) { nodes, err := AllNodesForMachine(ctx, c, machine) if err != nil { return nil, err } // If the providerID is defined, use that value; else, use the machine linked annotation if it's on the machine providerID := lo.Ternary(machine.Status.ProviderID != "", machine.Status.ProviderID, machine.Annotations[v1alpha5.MachineLinkedAnnotationKey]) if len(nodes) > 1 { return nil, &DuplicateNodeError{ProviderID: providerID} } if len(nodes) == 0 { return nil, &NodeNotFoundError{ProviderID: providerID} } return nodes[0], nil } // AllNodesForMachine is a helper function that takes a v1alpha5.Machine and finds ALL matching v1.Nodes by their providerID // If the providerID is not resolved for a Machine, then no Nodes will map to it func AllNodesForMachine(ctx context.Context, c client.Client, machine *v1alpha5.Machine) ([]*v1.Node, error) { // If the providerID is defined, use that value; else, use the machine linked annotation if it's on the machine providerID := lo.Ternary(machine.Status.ProviderID != "", machine.Status.ProviderID, machine.Annotations[v1alpha5.MachineLinkedAnnotationKey]) // Machines that have no resolved providerID have no nodes mapped to them if providerID == "" { return nil, nil } nodeList := v1.NodeList{} if err := c.List(ctx, &nodeList, client.MatchingFields{"spec.providerID": providerID}); err != nil { return nil, fmt.Errorf("listing nodes, %w", err) } return lo.ToSlicePtr(nodeList.Items), nil } // New converts a node into a Machine using known values from the node and provisioner spec values // Deprecated: This Machine generator function can be removed when v1beta1 migration has completed. func New(node *v1.Node, provisioner *v1alpha5.Provisioner) *v1alpha5.Machine { machine := NewFromNode(node) machine.Annotations = lo.Assign(provisioner.Annotations, v1alpha5.ProviderAnnotation(provisioner.Spec.Provider)) machine.Labels = lo.Assign(provisioner.Labels, map[string]string{v1alpha5.ProvisionerNameLabelKey: provisioner.Name}) machine.OwnerReferences = []metav1.OwnerReference{ { APIVersion: v1alpha5.SchemeGroupVersion.String(), Kind: "Provisioner", Name: provisioner.Name, UID: provisioner.UID, BlockOwnerDeletion: ptr.Bool(true), }, } machine.Spec.Kubelet = provisioner.Spec.KubeletConfiguration machine.Spec.Taints = provisioner.Spec.Taints machine.Spec.StartupTaints = provisioner.Spec.StartupTaints machine.Spec.Requirements = provisioner.Spec.Requirements machine.Spec.MachineTemplateRef = provisioner.Spec.ProviderRef return machine } // NewFromNode converts a node into a pseudo-Machine using known values from the node // Deprecated: This Machine generator function can be removed when v1beta1 migration has completed. func NewFromNode(node *v1.Node) *v1alpha5.Machine { m := &v1alpha5.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: node.Name, Annotations: node.Annotations, Labels: node.Labels, Finalizers: []string{v1alpha5.TerminationFinalizer}, }, Spec: v1alpha5.MachineSpec{ Taints: node.Spec.Taints, Requirements: scheduling.NewLabelRequirements(node.Labels).NodeSelectorRequirements(), Resources: v1alpha5.ResourceRequirements{ Requests: node.Status.Allocatable, }, }, Status: v1alpha5.MachineStatus{ ProviderID: node.Spec.ProviderID, Capacity: node.Status.Capacity, Allocatable: node.Status.Allocatable, }, } if _, ok := node.Labels[v1alpha5.LabelNodeInitialized]; ok { m.StatusConditions().MarkTrue(v1alpha5.MachineInitialized) } m.StatusConditions().MarkTrue(v1alpha5.MachineLaunched) m.StatusConditions().MarkTrue(v1alpha5.MachineRegistered) return m }
208
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package node import ( "context" "fmt" "time" v1 "k8s.io/api/core/v1" "k8s.io/utils/clock" "knative.dev/pkg/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/utils/pod" ) // GetNodePods gets the list of schedulable pods from a variadic list of nodes // It ignores pods that are owned by the node, a daemonset or are in a terminal // or terminating state func GetNodePods(ctx context.Context, kubeClient client.Client, nodes ...*v1.Node) ([]*v1.Pod, error) { var pods []*v1.Pod for _, node := range nodes { var podList v1.PodList if err := kubeClient.List(ctx, &podList, client.MatchingFields{"spec.nodeName": node.Name}); err != nil { return nil, fmt.Errorf("listing pods, %w", err) } for i := range podList.Items { // these pods don't need to be rescheduled if pod.IsOwnedByNode(&podList.Items[i]) || pod.IsOwnedByDaemonSet(&podList.Items[i]) || pod.IsTerminal(&podList.Items[i]) || pod.IsTerminating(&podList.Items[i]) { continue } pods = append(pods, &podList.Items[i]) } } return pods, nil } func GetCondition(n *v1.Node, match v1.NodeConditionType) v1.NodeCondition { for _, condition := range n.Status.Conditions { if condition.Type == match { return condition } } return v1.NodeCondition{} } func GetExpirationTime(node *v1.Node, provisioner *v1alpha5.Provisioner) time.Time { if provisioner == nil || provisioner.Spec.TTLSecondsUntilExpired == nil || node == nil { // If not defined, return some much larger time. return time.Date(5000, 0, 0, 0, 0, 0, 0, time.UTC) } expirationTTL := time.Duration(ptr.Int64Value(provisioner.Spec.TTLSecondsUntilExpired)) * time.Second return node.CreationTimestamp.Add(expirationTTL) } func IsExpired(n *v1.Node, clock clock.Clock, provisioner *v1alpha5.Provisioner) bool { return clock.Now().After(GetExpirationTime(n, provisioner)) }
76
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pod import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/scheduling" ) func IsProvisionable(pod *v1.Pod) bool { return !IsScheduled(pod) && !IsPreempting(pod) && FailedToSchedule(pod) && !IsOwnedByDaemonSet(pod) && !IsOwnedByNode(pod) } func FailedToSchedule(pod *v1.Pod) bool { for _, condition := range pod.Status.Conditions { if condition.Type == v1.PodScheduled && condition.Reason == v1.PodReasonUnschedulable { return true } } return false } func IsScheduled(pod *v1.Pod) bool { return pod.Spec.NodeName != "" } func IsPreempting(pod *v1.Pod) bool { return pod.Status.NominatedNodeName != "" } func IsTerminal(pod *v1.Pod) bool { return pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded } func IsTerminating(pod *v1.Pod) bool { return pod.DeletionTimestamp != nil } func IsOwnedByDaemonSet(pod *v1.Pod) bool { return IsOwnedBy(pod, []schema.GroupVersionKind{ {Group: "apps", Version: "v1", Kind: "DaemonSet"}, }) } // IsOwnedByNode returns true if the pod is a static pod owned by a specific node func IsOwnedByNode(pod *v1.Pod) bool { return IsOwnedBy(pod, []schema.GroupVersionKind{ {Version: "v1", Kind: "Node"}, }) } func IsOwnedBy(pod *v1.Pod, gvks []schema.GroupVersionKind) bool { for _, ignoredOwner := range gvks { for _, owner := range pod.ObjectMeta.OwnerReferences { if owner.APIVersion == ignoredOwner.GroupVersion().String() && owner.Kind == ignoredOwner.Kind { return true } } } return false } func HasDoNotEvict(pod *v1.Pod) bool { if pod.Annotations == nil { return false } return pod.Annotations[v1alpha5.DoNotEvictPodAnnotationKey] == "true" } // HasUnschedulableToleration returns true if the pod tolerates node.kubernetes.io/unschedulable taint func ToleratesUnschedulableTaint(pod *v1.Pod) bool { return (scheduling.Taints{{Key: v1.TaintNodeUnschedulable, Effect: v1.TaintEffectNoSchedule}}).Tolerates(pod) == nil } // HasRequiredPodAntiAffinity returns true if a non-empty PodAntiAffinity/RequiredDuringSchedulingIgnoredDuringExecution // is defined in the pod spec func HasRequiredPodAntiAffinity(pod *v1.Pod) bool { return HasPodAntiAffinity(pod) && len(pod.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 } // HasPodAntiAffinity returns true if a non-empty PodAntiAffinity is defined in the pod spec func HasPodAntiAffinity(pod *v1.Pod) bool { return pod.Spec.Affinity != nil && pod.Spec.Affinity.PodAntiAffinity != nil && (len(pod.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 || len(pod.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution) != 0) }
107
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pretty import ( "time" "github.com/mitchellh/hashstructure/v2" "github.com/patrickmn/go-cache" ) // ChangeMonitor is used to reduce logging when discovering information that may change. The values recorded expire after // 24 hours by default to prevent a value from being logged at startup only which could impede debugging if full sets // of logs aren't available. type ChangeMonitor struct { lastSeen *cache.Cache } func NewChangeMonitor() *ChangeMonitor { return &ChangeMonitor{ lastSeen: cache.New(24*time.Hour, 12*time.Hour), } } // Reconfigure allows reconfiguring the change monitor with a new duration. This resets any previously recorded // changes and should only be done at construction. func (c *ChangeMonitor) Reconfigure(expiration time.Duration) { c.lastSeen = cache.New(expiration, expiration/2) } // HasChanged takes a key and value and returns true if the hash of the value has changed since the last tine the // change monitor was called. func (c *ChangeMonitor) HasChanged(key string, value any) bool { hv, _ := hashstructure.Hash(value, hashstructure.FormatV2, &hashstructure.HashOptions{SlicesAsSets: true}) existing, ok := c.lastSeen.Get(key) var existingHash uint64 if ok { existingHash = existing.(uint64) } if !ok || existingHash != hv { c.lastSeen.SetDefault(key, hv) return true } return false }
58
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pretty import ( "encoding/json" ) func Concise(o interface{}) string { bytes, err := json.Marshal(o) if err != nil { return err.Error() } return string(bytes) }
28
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package resources import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "github.com/aws/karpenter-core/pkg/utils/pretty" ) // RequestsForPods returns the total resources of a variadic list of podspecs. func RequestsForPods(pods ...*v1.Pod) v1.ResourceList { var resources []v1.ResourceList for _, pod := range pods { resources = append(resources, Ceiling(pod).Requests) } merged := Merge(resources...) merged[v1.ResourcePods] = *resource.NewQuantity(int64(len(pods)), resource.DecimalExponent) return merged } // LimitsForPods returns the total resources of a variadic list of podspecs func LimitsForPods(pods ...*v1.Pod) v1.ResourceList { var resources []v1.ResourceList for _, pod := range pods { resources = append(resources, Ceiling(pod).Limits) } merged := Merge(resources...) merged[v1.ResourcePods] = *resource.NewQuantity(int64(len(pods)), resource.DecimalExponent) return merged } // Merge the resources from the variadic into a single v1.ResourceList func Merge(resources ...v1.ResourceList) v1.ResourceList { if len(resources) == 0 { return v1.ResourceList{} } result := make(v1.ResourceList, len(resources[0])) for _, resourceList := range resources { for resourceName, quantity := range resourceList { current := result[resourceName] current.Add(quantity) result[resourceName] = current } } return result } func Subtract(lhs, rhs v1.ResourceList) v1.ResourceList { result := make(v1.ResourceList, len(lhs)) for k, v := range lhs { result[k] = v.DeepCopy() } for resourceName := range lhs { current := lhs[resourceName] if rhsValue, ok := rhs[resourceName]; ok { current.Sub(rhsValue) } result[resourceName] = current } return result } // Ceiling calculates the max between the sum of container resources and max of initContainers func Ceiling(pod *v1.Pod) v1.ResourceRequirements { var resources v1.ResourceRequirements for _, container := range pod.Spec.Containers { resources.Requests = Merge(resources.Requests, MergeResourceLimitsIntoRequests(container)) resources.Limits = Merge(resources.Limits, container.Resources.Limits) } for _, container := range pod.Spec.InitContainers { resources.Requests = MaxResources(resources.Requests, MergeResourceLimitsIntoRequests(container)) resources.Limits = MaxResources(resources.Limits, container.Resources.Limits) } return resources } // MaxResources returns the maximum quantities for a given list of resources func MaxResources(resources ...v1.ResourceList) v1.ResourceList { resourceList := v1.ResourceList{} for _, resource := range resources { for resourceName, quantity := range resource { if value, ok := resourceList[resourceName]; !ok || quantity.Cmp(value) > 0 { resourceList[resourceName] = quantity } } } return resourceList } // MergeResourceLimitsIntoRequests merges resource limits into requests if no request exists for the given resource func MergeResourceLimitsIntoRequests(container v1.Container) v1.ResourceList { resources := container.Resources.DeepCopy() if resources.Requests == nil { resources.Requests = v1.ResourceList{} } if resources.Limits != nil { for resourceName, quantity := range resources.Limits { if _, ok := resources.Requests[resourceName]; !ok { resources.Requests[resourceName] = quantity } } } return resources.Requests } // Quantity parses the string value into a *Quantity func Quantity(value string) *resource.Quantity { r := resource.MustParse(value) return &r } // IsZero implements r.IsZero(). This method is provided to make some code a bit cleaner as the Quantity.IsZero() takes // a pointer receiver and map index expressions aren't addressable, so it can't be called directly. func IsZero(r resource.Quantity) bool { return r.IsZero() } func Cmp(lhs resource.Quantity, rhs resource.Quantity) int { return lhs.Cmp(rhs) } // Fits returns true if the candidate set of resources is less than or equal to the total set of resources. func Fits(candidate, total v1.ResourceList) bool { // If any of the total resource values are negative then the resource will never fit for _, quantity := range total { if Cmp(resource.MustParse("0"), quantity) > 0 { return false } } for resourceName, quantity := range candidate { if Cmp(quantity, total[resourceName]) > 0 { return false } } return true } // String returns a string version of the resource list suitable for presenting in a log func String(list v1.ResourceList) string { if len(list) == 0 { return "{}" } return pretty.Concise(list) } // StringMap returns the string map representation of the resource list func StringMap(list v1.ResourceList) map[string]string { if list == nil { return nil } m := make(map[string]string) for k, v := range list { m[k.String()] = v.String() } return m }
172
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package result import ( "math" "time" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) // Min returns the result that wants to requeue the soonest func Min(results ...reconcile.Result) (result reconcile.Result) { min := time.Duration(math.MaxInt64) for _, r := range results { if r.IsZero() { continue } if r.RequeueAfter < min { min = r.RequeueAfter result.RequeueAfter = min result.Requeue = true } } return }
39
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package sets type Empty struct{} // Set implements a generic implementation of k8s.io/apimachinery/pkg/util/sets // TODO: Remove our local generic implementation when migrating to a later version of // https://github.com/kubernetes/apimachinery/blob/master/pkg/util/sets/set.go type Set[T comparable] map[T]Empty func New[T comparable](items ...T) Set[T] { ss := make(Set[T], len(items)) ss.Insert(items...) return ss } // Insert adds items to the set. func (s Set[T]) Insert(items ...T) Set[T] { for _, item := range items { s[item] = Empty{} } return s } // Delete removes all items from the set. func (s Set[T]) Delete(items ...T) Set[T] { for _, item := range items { delete(s, item) } return s } // Has returns true if and only if item is contained in the set. func (s Set[T]) Has(item T) bool { _, contained := s[item] return contained } // HasAll returns true if and only if all items are contained in the set. func (s Set[T]) HasAll(items ...T) bool { for _, item := range items { if !s.Has(item) { return false } } return true } // HasAny returns true if any items are contained in the set. func (s Set[T]) HasAny(items ...T) bool { for _, item := range items { if s.Has(item) { return true } } return false } // Clone returns a new set which is a copy of the current set. func (s Set[T]) Clone() Set[T] { result := make(Set[T], len(s)) for key := range s { result.Insert(key) } return result } // Difference returns a set of objects that are not in s2 // For example: // s1 = {a1, a2, a3} // s2 = {a1, a2, a4, a5} // s1.Difference(s2) = {a3} // s2.Difference(s1) = {a4, a5} func (s Set[T]) Difference(s2 Set[T]) Set[T] { result := New[T]() for key := range s { if !s2.Has(key) { result.Insert(key) } } return result } // Union returns a new set which includes items in either s1 or s2. // For example: // s1 = {a1, a2} // s2 = {a3, a4} // s1.Union(s2) = {a1, a2, a3, a4} // s2.Union(s1) = {a1, a2, a3, a4} func (s Set[T]) Union(s2 Set[T]) Set[T] { result := s.Clone() for key := range s2 { result.Insert(key) } return result } // Intersection returns a new set which includes the item in BOTH s1 and s2 // For example: // s1 = {a1, a2} // s2 = {a2, a3} // s1.Intersection(s2) = {a2} func (s Set[T]) Intersection(s2 Set[T]) Set[T] { var walk, other Set[T] result := New[T]() if s.Len() < s2.Len() { walk = s other = s2 } else { walk = s2 other = s } for key := range walk { if other.Has(key) { result.Insert(key) } } return result } // IsSuperset returns true if and only if s1 is a superset of s2. func (s Set[T]) IsSuperset(s2 Set[T]) bool { for item := range s2 { if !s.Has(item) { return false } } return true } func (s Set[T]) Equal(s2 Set[T]) bool { if s.Len() != s2.Len() { return false } for item := range s2 { if !s.Has(item) { return false } } return true } // List returns the slice with contents in random order. func (s Set[T]) List() []T { res := make([]T, 0, len(s)) for key := range s { res = append(res, key) } return res } // PopAny returns a single element from the set. func (s Set[T]) PopAny() (T, bool) { for key := range s { s.Delete(key) return key, true } var zeroValue T return zeroValue, false } // Len returns the size of the set. func (s Set[T]) Len() int { return len(s) }
179
karpenter-core
aws
Go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package webhooks import ( "context" "k8s.io/apimachinery/pkg/runtime/schema" "knative.dev/pkg/configmap" "knative.dev/pkg/controller" knativeinjection "knative.dev/pkg/injection" "knative.dev/pkg/logging" "knative.dev/pkg/webhook/certificates" "knative.dev/pkg/webhook/configmaps" "knative.dev/pkg/webhook/resourcesemantics" "knative.dev/pkg/webhook/resourcesemantics/validation" "github.com/aws/karpenter-core/pkg/apis/v1alpha5" ) func NewWebhooks() []knativeinjection.ControllerConstructor { return []knativeinjection.ControllerConstructor{ certificates.NewController, NewCRDValidationWebhook, NewConfigValidationWebhook, } } func NewCRDValidationWebhook(ctx context.Context, w configmap.Watcher) *controller.Impl { return validation.NewAdmissionController(ctx, "validation.webhook.karpenter.sh", "/validate/karpenter.sh", Resources, func(ctx context.Context) context.Context { return ctx }, true, ) } func NewConfigValidationWebhook(ctx context.Context, cmw configmap.Watcher) *controller.Impl { return configmaps.NewAdmissionController(ctx, "validation.webhook.config.karpenter.sh", "/validate/config.karpenter.sh", configmap.Constructors{ logging.ConfigMapName(): logging.NewConfigFromConfigMap, }, ) } var Resources = map[schema.GroupVersionKind]resourcesemantics.GenericCRD{ v1alpha5.SchemeGroupVersion.WithKind("Provisioner"): &v1alpha5.Provisioner{}, }
64
lightsailctl
aws
Go
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 // Package main is where lightsailctl command begins. package main import ( "fmt" "log" "os" "regexp" "github.com/aws/lightsailctl/internal" "github.com/aws/lightsailctl/internal/plugin" ) func main() { log.SetFlags(0) pluginPattern := regexp.MustCompile(`^--?plugin$`) getverPattern := regexp.MustCompile(`^--?version$`) switch { case len(os.Args) > 1 && pluginPattern.MatchString(os.Args[1]): pluginMain(os.Args[0]+" "+os.Args[1], os.Args[2:]) case len(os.Args) > 1 && getverPattern.MatchString(os.Args[1]): fmt.Println(internal.Version) default: log.Fatalf("%s can't be used directly, it is meant to be invoked by AWS CLI", os.Args[0]) } } // May be set by tests to something else. var pluginMain = plugin.Main
35
lightsailctl
aws
Go
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package main import ( "os" "reflect" "testing" "github.com/aws/lightsailctl/internal/plugin" ) func setArgs(args []string) { os.Args = args } func setPluginMain(f func(string, []string)) { pluginMain = f } func TestMainCallsPluginMain(t *testing.T) { defer setArgs(os.Args) defer setPluginMain(plugin.Main) var gotProgname []string var gotArgs [][]string pluginMain = func(progname string, args []string) { gotProgname = append(gotProgname, progname) gotArgs = append(gotArgs, args) } os.Args = []string{"program", "-plugin", "--foo", "55"} main() os.Args = []string{"program", "--plugin", "--bar", "42"} main() if want := []string{"program -plugin", "program --plugin"}; !reflect.DeepEqual(gotProgname, want) { t.Errorf("got: %v", gotProgname) t.Logf("want: %v", want) } if want := [][]string{{"--foo", "55"}, {"--bar", "42"}}; !reflect.DeepEqual(gotArgs, want) { t.Errorf("got: %v", gotArgs) t.Logf("want: %v", want) } }
48
lightsailctl
aws
Go
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package internal import ( "context" "fmt" "log" "github.com/aws/aws-sdk-go-v2/service/lightsail" ) type ContainerAPIMetadataGetter interface { GetContainerAPIMetadata( context.Context, *lightsail.GetContainerAPIMetadataInput, ...func(*lightsail.Options), ) (*lightsail.GetContainerAPIMetadataOutput, error) } func CheckForUpdates( ctx context.Context, debugLog *log.Logger, g ContainerAPIMetadataGetter, inUse Semver, ) { available, err := getLatestLightsailctlVersion(ctx, g) if err != nil { debugLog.Print(err.Error()) return } if inUse.Less(available) { log.Printf(`WARNING: You are using lightsailctl %s, but %s is available. To download, visit https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-install-software`, inUse, available) } } func getLatestLightsailctlVersion( ctx context.Context, g ContainerAPIMetadataGetter, ) (Semver, error) { res, err := g.GetContainerAPIMetadata(ctx, &lightsail.GetContainerAPIMetadataInput{}) if err != nil { return "", fmt.Errorf("could not get latest lightsailctl version: %w", err) } var rawSemver string for _, md := range res.Metadata { if md["name"] == "lightsailctlVersion" { rawSemver = md["value"] } } if rawSemver == "" { return "", fmt.Errorf("latest lightsailctl version was not in GetContainerAPIMetadata response") } ver := Semver(rawSemver) if !ver.IsValid() { return "", fmt.Errorf("latest lightsailctl version is not a semver: %q", rawSemver) } return ver, nil }
68
lightsailctl
aws
Go
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package internal import ( "context" "errors" "fmt" "io" "log" "os" "strconv" "strings" "testing" "github.com/aws/aws-sdk-go-v2/service/lightsail" ) type fakeContainerAPIMetadataGetter string func (f fakeContainerAPIMetadataGetter) GetContainerAPIMetadata( context.Context, *lightsail.GetContainerAPIMetadataInput, ...func(*lightsail.Options), ) (*lightsail.GetContainerAPIMetadataOutput, error) { switch { case f == "": return &lightsail.GetContainerAPIMetadataOutput{}, nil case strings.Contains(string(f), "error"): return nil, errors.New(string(f)) default: return &lightsail.GetContainerAPIMetadataOutput{ Metadata: []map[string]string{ { "name": "lightsailctlVersion", "value": string(f), }, }, }, nil } } func ExampleCheckForUpdates() { defer func(w io.Writer, flags int, p string) { log.SetOutput(w) log.SetFlags(flags) log.SetPrefix(p) }(log.Writer(), log.Flags(), log.Prefix()) log.SetOutput(os.Stdout) log.SetFlags(0) log.SetPrefix("[logger] ") debugLog := log.New(log.Writer(), log.Prefix(), log.Flags()) ctx := context.Background() CheckForUpdates(ctx, debugLog, fakeContainerAPIMetadataGetter("1.4.33"), "v1.4.33") CheckForUpdates(ctx, debugLog, fakeContainerAPIMetadataGetter("very bad error occurred"), "v1.4.33") fmt.Println("now we should get warnings") CheckForUpdates(ctx, debugLog, fakeContainerAPIMetadataGetter("v1.6.11"), "v1.4.33") CheckForUpdates(ctx, debugLog, fakeContainerAPIMetadataGetter("v2.7.3"), "v2.7.3-beta") // Output: // [logger] could not get latest lightsailctl version: very bad error occurred // now we should get warnings // [logger] WARNING: You are using lightsailctl v1.4.33, but v1.6.11 is available. // To download, visit https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-install-software // [logger] WARNING: You are using lightsailctl v2.7.3-beta, but v2.7.3 is available. // To download, visit https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-install-software } func TestGetLatestLightsailctlVersion(t *testing.T) { ctx := context.Background() for i, c := range []struct { input string wantVer Semver wantErr string }{ { input: "network timeout error", wantVer: "", wantErr: "could not get latest lightsailctl version: network timeout error", }, { input: "", wantVer: "", wantErr: "latest lightsailctl version was not in GetContainerAPIMetadata response", }, { input: "bogus", wantVer: "", wantErr: `latest lightsailctl version is not a semver: "bogus"`, }, { input: "1.4.0-beta", wantVer: "1.4.0-beta", wantErr: "", }, { input: "1.4.1", wantVer: "1.4.1", wantErr: "", }, } { t.Run(strconv.Itoa(i+1), func(t *testing.T) { gotErr := "" gotVer, err := getLatestLightsailctlVersion(ctx, fakeContainerAPIMetadataGetter(c.input)) if err != nil { gotErr = err.Error() } if c.wantErr != gotErr { t.Errorf("got error %q, want error %q", gotErr, c.wantErr) } if c.wantVer != gotVer { t.Errorf("got ver %q, want ver %q", gotVer, c.wantVer) } }) } }
123
lightsailctl
aws
Go
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package internal import ( "strings" "golang.org/x/mod/semver" ) const Version Semver = "v1.0.6-fix26" type Semver string func (v Semver) IsValid() bool { return semver.IsValid(v.String()) } func (v Semver) Less(other Semver) bool { return semver.Compare(v.String(), other.String()) < 0 } func (v Semver) String() string { s := string(v) if s == "" || strings.HasPrefix(s, "v") { return semver.Canonical(s) } return semver.Canonical("v" + s) }
31
lightsailctl
aws
Go
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package internal_test import ( "testing" "github.com/aws/lightsailctl/internal" ) func TestVersionGlobalIsValid(t *testing.T) { if !internal.Version.IsValid() { t.Errorf("internal.Version value %q is not a valid semver", string(internal.Version)) } }
18
lightsailctl
aws
Go
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cs import ( "context" "encoding/base64" "encoding/json" "errors" "io" "log" "os" "strings" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/client" "github.com/docker/docker/pkg/jsonmessage" "github.com/moby/term" ) // DockerEngine defines a subset of client-side // operations against local Docker Engine, relevant to lightsailctl. type DockerEngine struct { c *client.Client } // RemoteImage combines remote server auth details, address // and an image tag into a value that has everything that // one needs to push this image to a remote repo. type RemoteImage struct { registry.AuthConfig Tag string } func (r *RemoteImage) Ref() string { return r.ServerAddress + ":" + r.Tag } func NewDockerEngine(ctx context.Context) (*DockerEngine, error) { dc, err := client.NewClientWithOpts(client.FromEnv) if err != nil { return nil, err } dc.NegotiateAPIVersion(ctx) return &DockerEngine{c: dc}, nil } func (e *DockerEngine) TagImage(ctx context.Context, source, target string) error { return e.c.ImageTag(ctx, source, target) } func (e *DockerEngine) UntagImage(ctx context.Context, image string) error { _, err := e.c.ImageRemove(ctx, image, types.ImageRemoveOptions{}) return err } func (e *DockerEngine) PushImage(ctx context.Context, remoteImage RemoteImage) (digest string, err error) { authBytes, err := json.Marshal(remoteImage.AuthConfig) if err != nil { return "", err } pushRes, err := e.c.ImagePush(ctx, remoteImage.Ref(), types.ImagePushOptions{ RegistryAuth: base64.URLEncoding.EncodeToString(authBytes), }) if err != nil { return "", err } defer pushRes.Close() termFd, isTerm := term.GetFdInfo(os.Stderr) if err = jsonmessage.DisplayJSONMessagesStream( // Skip statuses that have irrelevant details such as repo address. skipStatuses(pushRes, remoteImage.ServerAddress, remoteImage.Tag), os.Stderr, termFd, isTerm, extractDigest(&digest)); err != nil { return "", err } if digest == "" { return "", errors.New("image push response does not contain the image digest") } return digest, nil } func skipStatuses(input io.Reader, s ...string) io.Reader { r, w := io.Pipe() go func() { defer w.Close() dec := json.NewDecoder(input) enc := json.NewEncoder(w) InputLoop: for { m := jsonmessage.JSONMessage{} if err := dec.Decode(&m); err != nil { if err != io.EOF { log.Printf("skipStatuses: %v", err) } break } for _, skip := range s { if strings.Contains(m.Status, skip) { continue InputLoop } } if err := enc.Encode(m); err != nil { log.Printf("skipStatuses: %v", err) } } }() return r } func extractDigest(p *string) func(jsonmessage.JSONMessage) { return func(m jsonmessage.JSONMessage) { aux := struct{ Digest string }{} if err := json.Unmarshal(*m.Aux, &aux); err != nil { log.Printf("extractDigest: %v", err) return } *p = aux.Digest } }
124
lightsailctl
aws
Go
package cs import ( "encoding/json" "fmt" "io" "os" "strings" "testing" "github.com/docker/docker/pkg/jsonmessage" ) func TestExtractDigest(t *testing.T) { got := "" badAux := json.RawMessage("42") extractDigest(&got)(jsonmessage.JSONMessage{Aux: &badAux}) if got != "" { t.Errorf("unexpected got: %q", got) } wantDigest := "sha256:b95cf9b496720e43b12ce435775d5e337a6648147825c0fc8fc0ff93616c69a0" goodAux := json.RawMessage(`{"digest": "` + wantDigest + `"}`) extractDigest(&got)(jsonmessage.JSONMessage{Aux: &goodAux}) if got != wantDigest { t.Errorf("got: %q", got) t.Logf("want: %q", wantDigest) } } func Example_skipStatuses() { r := skipStatuses( strings.NewReader(` {"status": "keep me"} {"status": "xyz skip1 abc"} {"status": "also keep me!"} {"status": "\tskip2"}`), "skip2", "skip1", ) if _, err := io.Copy(os.Stdout, r); err != nil { fmt.Println(err) return } // Output: // {"status":"keep me"} // {"status":"also keep me!"} }
47
lightsailctl
aws
Go
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 // Package cs implements features related to Lightsail (C)ontainer (S)ervice. package cs import ( "context" "crypto/rand" "encoding/base32" "fmt" "io" "log" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/lightsail" "github.com/docker/docker/api/types/registry" ) type PushImageInput struct { Service string Image string Label string } type RegistryLoginCreator interface { CreateContainerServiceRegistryLogin( context.Context, *lightsail.CreateContainerServiceRegistryLoginInput, ...func(*lightsail.Options), ) (*lightsail.CreateContainerServiceRegistryLoginOutput, error) } type LightsailImageOperator interface { RegistryLoginCreator RegisterContainerImage( context.Context, *lightsail.RegisterContainerImageInput, ...func(*lightsail.Options), ) (*lightsail.RegisterContainerImageOutput, error) } type ImageOperator interface { TagImage(ctx context.Context, source, target string) error UntagImage(ctx context.Context, image string) error PushImage(ctx context.Context, r RemoteImage) (digest string, err error) } // PushImage pushes and registers the image to Lightsail service registry. func PushImage(ctx context.Context, in *PushImageInput, lio LightsailImageOperator, imgo ImageOperator) error { authConfig, err := getServiceRegistryAuth(ctx, lio) if err != nil { return err } remoteImage := RemoteImage{AuthConfig: *authConfig, Tag: generateUniqueTag()} err = imgo.TagImage(ctx, in.Image, remoteImage.Ref()) if err != nil { return err } defer tryUntagImage(ctx, imgo, remoteImage.Ref()) digest, err := imgo.PushImage(ctx, remoteImage) if err != nil { return err } registered, err := lio.RegisterContainerImage( ctx, &lightsail.RegisterContainerImageInput{ ServiceName: &in.Service, Label: &in.Label, Digest: &digest, }, ) if err != nil { return err } fmt.Printf("Digest: %s\nImage %q registered.\nRefer to this image as %q in deployments.\n", aws.ToString(registered.ContainerImage.Digest), in.Image, aws.ToString(registered.ContainerImage.Image)) return nil } // getServiceRegistryAuth returns the server address and // the temporary credentials sufficient to push images to // Lightsail Containers service repo (aka "sr"). // // Note that "sr" repo only retains image tags generated // when RegisterContainerImage API is called with specific image // digests. The purpose of this repo is to keep images that are // strictly related to your Lightsail container service deployments. func getServiceRegistryAuth(ctx context.Context, rlc RegistryLoginCreator) (*registry.AuthConfig, error) { out, err := rlc.CreateContainerServiceRegistryLogin( ctx, new(lightsail.CreateContainerServiceRegistryLoginInput), ) if err != nil { return nil, err } return &registry.AuthConfig{ Username: aws.ToString(out.RegistryLogin.Username), Password: aws.ToString(out.RegistryLogin.Password), ServerAddress: aws.ToString(out.RegistryLogin.Registry) + "/sr", }, nil } // tryUntagImage is the same as ImageOperator.UntagImage // except it doesn't return error and instead logs it. func tryUntagImage(ctx context.Context, imgo ImageOperator, image string) { if err := imgo.UntagImage(ctx, image); err != nil { log.Println(err) } } func generateUniqueTag() string { now := time.Now() if testNow != nil { now = testNow() } return fmt.Sprintf("%v-%s", now.UnixNano(), randomName13()) } func randomName13() string { r := rand.Reader if testRngReader != nil { r = testRngReader } b := make([]byte, 8) if _, err := io.ReadFull(r, b); err != nil { panic(err) } return b32.EncodeToString(b) } var ( b32 = base32.NewEncoding("0123456789abcdefghijklmnopqrstuv").WithPadding(base32.NoPadding) testNow func() time.Time testRngReader io.Reader )
150
lightsailctl
aws
Go
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cs import ( "context" "fmt" "reflect" "strconv" "strings" "testing" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/lightsail" "github.com/aws/aws-sdk-go-v2/service/lightsail/types" "github.com/docker/docker/api/types/registry" ) func TestGenerateUniqueTag(t *testing.T) { defer func() { testNow, testRngReader = nil, nil }() testNow = func() time.Time { return time.Unix(0, 1593224653252075123) } testRngReader = strings.NewReader("abcdefgh") if want, got := "1593224653252075123-c5h66p35cpjmg", generateUniqueTag(); got != want { t.Errorf("got %q, want %q", got, want) } } func TestGetServiceRegistryAuth(t *testing.T) { ctx := context.Background() if got, err := getServiceRegistryAuth(ctx, &fakeRegistryLoginCreator{failToCreateLogin: true}); err == nil || got != nil { t.Errorf("got err: %v", err) t.Errorf("got out: %#v", got) } want := &registry.AuthConfig{ Username: "gollum", Password: "precious", ServerAddress: "123456789012.dkr.ecr.so-fake-2.amazonaws.com/sr", } if got, err := getServiceRegistryAuth(ctx, &fakeRegistryLoginCreator{}); err != nil { t.Errorf("got err: %v", err) } else if !reflect.DeepEqual(got, want) { t.Errorf("got: %#v", got) t.Logf("want: %#v", want) } } func TestPushImageErrors(t *testing.T) { defer func() { testNow, testRngReader = nil, nil }() testNow = func() time.Time { return time.Unix(1611800397, 0) } testRngReader = strings.NewReader("abcdefgh") type test struct { ls fakeLightsailImageOperator imgo fakeImageOperator want string } ctx := context.Background() in := &PushImageInput{Service: "doge", Image: "nginx:latest", Label: "www"} for i, test := range []test{ { ls: fakeLightsailImageOperator{fakeRegistryLoginCreator: fakeRegistryLoginCreator{failToCreateLogin: true}}, want: "failed: create login", }, { ls: fakeLightsailImageOperator{failToRegister: true}, want: "failed: register (doge, www, sha256:10b8cc432d56da8b61b070f4c7d2543a9ed17c2b23010b43af434fd40e2ca4aa)", }, { imgo: fakeImageOperator{failToTag: true}, want: `failed: tag "nginx:latest" as "123456789012.dkr.ecr.so-fake-2.amazonaws.com/sr:1611800397000000000-c5h66p35cpjmg"`, }, { imgo: fakeImageOperator{failToUntag: true}, want: "", // Untagging errors are ignored in current implementation. }, { imgo: fakeImageOperator{failToPush: true}, want: `failed: push "123456789012.dkr.ecr.so-fake-2.amazonaws.com/sr:1611800397000000000-c5h66p35cpjmg"`, }, } { t.Run(strconv.Itoa(i+1), func(t *testing.T) { testRngReader = strings.NewReader("abcdefgh") err := PushImage(ctx, in, &test.ls, &test.imgo) if err == nil && test.want == "" { // succeeded as expected return } if err == nil { t.Error("unexpectedly succeeded") return } if err.Error() != test.want { t.Errorf("got: %v", err) t.Logf("want: %v", test.want) } }) } } func ExamplePushImage() { defer func() { testNow, testRngReader = nil, nil }() testNow = func() time.Time { return time.Unix(1611796436, 0) } testRngReader = strings.NewReader("abcdefgh") ctx := context.Background() fls := &fakeLightsailImageOperator{} fimgo := &fakeImageOperator{} if err := PushImage(ctx, &PushImageInput{Service: "doge", Image: "nginx:latest", Label: "www"}, fls, fimgo); err != nil { fmt.Println(err) return } fmt.Println("docker engine call log:") for _, s := range fimgo.log { fmt.Println(" ", s) } fmt.Println("lightsail api call log:") for _, s := range fls.log { fmt.Println(" ", s) } // Output: // Digest: sha256:10b8cc432d56da8b61b070f4c7d2543a9ed17c2b23010b43af434fd40e2ca4aa // Image "nginx:latest" registered. // Refer to this image as ":doge.www.12345" in deployments. // docker engine call log: // tag "nginx:latest" as "123456789012.dkr.ecr.so-fake-2.amazonaws.com/sr:1611796436000000000-c5h66p35cpjmg" // push "123456789012.dkr.ecr.so-fake-2.amazonaws.com/sr:1611796436000000000-c5h66p35cpjmg" // untag "123456789012.dkr.ecr.so-fake-2.amazonaws.com/sr:1611796436000000000-c5h66p35cpjmg" // lightsail api call log: // create login // register (doge, www, sha256:10b8cc432d56da8b61b070f4c7d2543a9ed17c2b23010b43af434fd40e2ca4aa) } type fakeRegistryLoginCreator struct { failToCreateLogin bool log []string } func (f *fakeRegistryLoginCreator) CreateContainerServiceRegistryLogin( context.Context, *lightsail.CreateContainerServiceRegistryLoginInput, ...func(*lightsail.Options), ) (*lightsail.CreateContainerServiceRegistryLoginOutput, error) { op := "create login" if f.failToCreateLogin { return nil, fmt.Errorf("failed: %s", op) } f.log = append(f.log, op) return &lightsail.CreateContainerServiceRegistryLoginOutput{ RegistryLogin: &types.ContainerServiceRegistryLogin{ Username: aws.String("gollum"), Password: aws.String("precious"), Registry: aws.String("123456789012.dkr.ecr.so-fake-2.amazonaws.com"), }, }, nil } type fakeLightsailImageOperator struct { fakeRegistryLoginCreator failToRegister bool } func (f *fakeLightsailImageOperator) RegisterContainerImage( _ context.Context, in *lightsail.RegisterContainerImageInput, _ ...func(*lightsail.Options), ) (*lightsail.RegisterContainerImageOutput, error) { op := fmt.Sprintf("register (%s, %s, %s)", aws.ToString(in.ServiceName), aws.ToString(in.Label), aws.ToString(in.Digest)) if f.failToRegister { return nil, fmt.Errorf("failed: %s", op) } f.log = append(f.log, op) return &lightsail.RegisterContainerImageOutput{ ContainerImage: &types.ContainerImage{ Digest: in.Digest, Image: aws.String(":" + aws.ToString(in.ServiceName) + "." + aws.ToString(in.Label) + ".12345"), }, }, nil } type fakeImageOperator struct { failToTag, failToUntag, failToPush bool log []string } func (f *fakeImageOperator) TagImage(_ context.Context, source, target string) error { op := fmt.Sprintf("tag %q as %q", source, target) if f.failToTag { return fmt.Errorf("failed: %s", op) } f.log = append(f.log, op) return nil } func (f *fakeImageOperator) UntagImage(_ context.Context, image string) error { op := fmt.Sprintf("untag %q", image) if f.failToUntag { return fmt.Errorf("failed: %s", op) } f.log = append(f.log, op) return nil } func (f *fakeImageOperator) PushImage(_ context.Context, remoteImage RemoteImage) (string, error) { op := fmt.Sprintf("push %q", remoteImage.Ref()) if f.failToPush { return "", fmt.Errorf("failed: %s", op) } f.log = append(f.log, op) return "sha256:10b8cc432d56da8b61b070f4c7d2543a9ed17c2b23010b43af434fd40e2ca4aa", nil }
223
lightsailctl
aws
Go
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 // Package plugin implements extensions for AWS CLI's lightsail subcommand. // See: https://github.com/aws/aws-cli/tree/ce7dc9a61b/awscli/customizations/lightsail package plugin import ( "bytes" "context" "crypto/tls" "encoding/json" "flag" "fmt" "io" "log" "net/http" "os" "strconv" "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/lightsail" "github.com/aws/lightsailctl/internal" "github.com/aws/lightsailctl/internal/cs" smithyMW "github.com/aws/smithy-go/middleware" ) func Main(progname string, args []string) { input, inputStdin := "", false fs := flag.NewFlagSet(progname, flag.ExitOnError) const inputFlag = "input" fs.StringVar(&input, inputFlag, "", "plugin `payload`") const inputStdinFlag = "input-stdin" fs.BoolVar(&inputStdin, inputStdinFlag, false, "receive plugin payload on stdin") _ = fs.Parse(args) if input == "" && !inputStdin { fs.Usage() log.Fatalf("no plugin input: either %q or %q flag must be specified", fs.Lookup(inputFlag).Name, fs.Lookup(inputStdinFlag).Name) } var r io.Reader = strings.NewReader(input) if inputStdin { r = os.Stdin } in, err := parseInput(r) if err != nil { log.Fatalf("invalid plugin input: %v", err) } // This is a logger used for extra diagnostics, when the debugging mode is on. debugLog := log.New(log.Writer(), log.Prefix(), log.Flags()) if !in.Configuration.Debug { debugLog.SetOutput(io.Discard) } if err := invokeOperation(context.Background(), in, debugLog); err != nil { log.Fatal(err) } } type Input struct { InputVersion string `json:"inputVersion"` Operation string `json:"operation"` Payload json.RawMessage `json:"payload"` Configuration OperationConfig `json:"configuration"` } type OperationConfig struct { Debug bool `json:"debug,omitempty"` Endpoint string `json:"endpoint,omitempty"` Region string `json:"region,omitempty"` Profile string `json:"profile,omitempty"` CABundle string `json:"caBundle,omitempty"` DoNotVerifySSL bool `json:"doNotVerifySSL,omitempty"` // CLIVersion is the version of the calling CLI, // for diagnostics and logging purposes. CLIVersion string `json:"cliVersion"` } func (c *OperationConfig) awsConfig(ctx context.Context) (aws.Config, error) { var opts []func(*config.LoadOptions) error opts = append(opts, config.WithAPIOptions([]func(*smithyMW.Stack) error{ middleware.AddUserAgentKeyValue("lightsailctl", internal.Version.String()), })) if c.Region != "" { opts = append(opts, config.WithRegion(c.Region)) } if ep := strings.TrimRight(c.Endpoint, "/"); ep != "" { opts = append(opts, config.WithEndpointResolverWithOptions( aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) { return aws.Endpoint{URL: ep}, nil }))) } if c.Profile != "" { opts = append(opts, config.WithSharedConfigProfile(c.Profile)) } if c.Debug { opts = append(opts, config.WithClientLogMode(aws.LogSigning|aws.LogRequestWithBody|aws.LogResponseWithBody)) } if c.DoNotVerifySSL { opts = append(opts, config.WithHTTPClient(&http.Client{ Transport: &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, }, })) } if c.CABundle != "" { b, err := os.ReadFile(c.CABundle) if err != nil { return aws.Config{}, fmt.Errorf("read CA bundle file: %w", err) } opts = append(opts, config.WithCustomCABundle(bytes.NewReader(b))) } return config.LoadDefaultConfig(ctx, opts...) } func parseInput(r io.Reader) (*Input, error) { in := new(Input) if err := json.NewDecoder(r).Decode(in); err != nil { return nil, fmt.Errorf("unable to unmarshal JSON input: %v", err) } if ver, err := strconv.Atoi(in.InputVersion); err != nil || ver < 0 { return nil, fmt.Errorf("invalid inputVersion: it must contain a non-negative number") } return in, nil } func invokeOperation(ctx context.Context, in *Input, debugLog *log.Logger) error { switch in.Operation { case "PushContainerImage": cfg, err := in.Configuration.awsConfig(ctx) if err != nil { return err } ls := lightsail.NewFromConfig(cfg) internal.CheckForUpdates(ctx, debugLog, ls, internal.Version) r, err := parsePushContainerImagePayload(in.Payload) if err != nil { return fmt.Errorf("unable to parse the input's payload field: %w", err) } dc, err := cs.NewDockerEngine(ctx) if err != nil { return err } if err := cs.PushImage(ctx, r, ls, dc); err != nil { return err } default: return fmt.Errorf("unknown plugin operation: %q", in.Operation) } return nil } func parsePushContainerImagePayload(data json.RawMessage) (*cs.PushImageInput, error) { p := struct { Service string `json:"service"` Image string `json:"image"` Label string `json:"label"` }{} if err := json.Unmarshal(data, &p); err != nil { return nil, err } for _, check := range []struct{ what, input string }{ {"service name", p.Service}, {"container image", p.Image}, {"container label", p.Label}, } { if len(check.input) != 0 { continue } return nil, fmt.Errorf("push container image: %s is not specified", check.what) } return &cs.PushImageInput{Service: p.Service, Image: p.Image, Label: p.Label}, nil }
201
lightsailctl
aws
Go
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package plugin import ( "fmt" "reflect" "strconv" "strings" "testing" "testing/quick" "github.com/aws/lightsailctl/internal/cs" ) func TestInputVersion(t *testing.T) { var tests = []struct { pass bool input string }{ {input: `{"inputVersion": ""}`}, {input: `{"inputVersion": "v1"}`}, {input: `{"inputVersion": "bogus"}`}, { pass: true, input: `{"inputVersion": "55"}`, }, } for _, test := range tests { _, err := parseInput(strings.NewReader(test.input)) if test.pass { if err != nil { t.Errorf("%s: %v", test.input, err) } continue } if err == nil { t.Errorf("%s: unexpected succeeded", test.input) } } f := func(i int) bool { input := fmt.Sprintf(`{"inputVersion": "%v"}`, i) parsed, err := parseInput(strings.NewReader(input)) if i < 0 { return err != nil } if err != nil { return false } return parsed.InputVersion == strconv.Itoa(i) } if err := quick.Check(f, nil); err != nil { t.Error(err) } } func TestParseInput(t *testing.T) { input := `{ "inputVersion": "1", "operation": "Whatever", "payload": 42, "configuration": {"region": "us-west-2", "cliVersion": "2.0.47"} }` got, err := parseInput(strings.NewReader(input)) if err != nil { t.Fatal(err) } want := &Input{ InputVersion: "1", Operation: "Whatever", Payload: []byte{'4', '2'}, Configuration: OperationConfig{Region: "us-west-2", CLIVersion: "2.0.47"}, } if !reflect.DeepEqual(got, want) { t.Errorf("got %#v, want %#v", got, want) } } func TestParsePushContainerImagePayload(t *testing.T) { inputf := `{ "inputVersion": "1", "operation": "PushContainerImage", "payload": %s, "configuration": {"region": "us-west-2", "cliVersion": "2.0.47"} }` for i, test := range []struct { pass bool payload, errContains string want *cs.PushImageInput }{ { payload: `{"service": "dyservicev3", "image": "hello:latest"}`, errContains: "container label", }, { payload: `{"service": "dyservicev3", "label": "david16"}`, errContains: "container image", }, { payload: `{"image": "hello:latest", "label": "david16"}`, errContains: "service name", }, { pass: true, payload: `{"service": "dyservicev3", "image": "hello:latest", "label": "david16"}`, want: &cs.PushImageInput{Service: "dyservicev3", Image: "hello:latest", Label: "david16"}, }, } { t.Run(strconv.Itoa(i+1), func(t *testing.T) { in, err := parseInput(strings.NewReader(fmt.Sprintf(inputf, test.payload))) if err != nil { t.Error(err) return } got, err := parsePushContainerImagePayload(in.Payload) if test.pass { if err != nil { t.Error(err) } if !reflect.DeepEqual(got, test.want) { t.Errorf("got %#v, want %#v", got, test.want) } return } if err == nil { t.Error("unexpectedly succeeded") return } if !strings.Contains(err.Error(), test.errContains) { t.Errorf("got err: %v, that doesn't contain %q", err, test.errContains) } }) } }
143
porting-advisor-for-graviton
aws
Go
package main import "fmt" func main() { fmt.Println("Hello world") }
8
porting-advisor-for-graviton
aws
Go
// You can edit this code! // Click here and start typing. package main import "fmt" func main() { fmt.Println("Hello world") }
10
rolesanywhere-credential-helper
aws
Go
package aws_signing_helper import ( "crypto/tls" "crypto/x509" "encoding/base64" "errors" "net/http" "runtime" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/rolesanywhere-credential-helper/rolesanywhere" ) type CredentialsOpts struct { PrivateKeyId string CertificateId string CertificateBundleId string RoleArn string ProfileArnStr string TrustAnchorArnStr string SessionDuration int Region string Endpoint string NoVerifySSL bool WithProxy bool Debug bool Version string } // Function to create session and generate credentials func GenerateCredentials(opts *CredentialsOpts) (CredentialProcessOutput, error) { // assign values to region and endpoint if they haven't already been assigned trustAnchorArn, err := arn.Parse(opts.TrustAnchorArnStr) if err != nil { return CredentialProcessOutput{}, err } profileArn, err := arn.Parse(opts.ProfileArnStr) if err != nil { return CredentialProcessOutput{}, err } if trustAnchorArn.Region != profileArn.Region { return CredentialProcessOutput{}, err } if opts.Region == "" { opts.Region = trustAnchorArn.Region } privateKey, err := ReadPrivateKeyData(opts.PrivateKeyId) if err != nil { return CredentialProcessOutput{}, err } certificateData, err := ReadCertificateData(opts.CertificateId) if err != nil { return CredentialProcessOutput{}, err } certificateDerData, err := base64.StdEncoding.DecodeString(certificateData.CertificateData) if err != nil { return CredentialProcessOutput{}, err } certificate, err := x509.ParseCertificate([]byte(certificateDerData)) if err != nil { return CredentialProcessOutput{}, err } var certificateChain []x509.Certificate if opts.CertificateBundleId != "" { certificateChainPointers, err := ReadCertificateBundleData(opts.CertificateBundleId) if err != nil { return CredentialProcessOutput{}, err } for _, certificate := range certificateChainPointers { certificateChain = append(certificateChain, *certificate) } } mySession := session.Must(session.NewSession()) var logLevel aws.LogLevelType if opts.Debug { logLevel = aws.LogDebug } else { logLevel = aws.LogOff } var tr *http.Transport if opts.WithProxy { tr = &http.Transport{ TLSClientConfig: &tls.Config{MinVersion: tls.VersionTLS12, InsecureSkipVerify: opts.NoVerifySSL}, Proxy: http.ProxyFromEnvironment, } } else { tr = &http.Transport{ TLSClientConfig: &tls.Config{MinVersion: tls.VersionTLS12, InsecureSkipVerify: opts.NoVerifySSL}, } } client := &http.Client{Transport: tr} config := aws.NewConfig().WithRegion(opts.Region).WithHTTPClient(client).WithLogLevel(logLevel) if opts.Endpoint != "" { config.WithEndpoint(opts.Endpoint) } rolesAnywhereClient := rolesanywhere.New(mySession, config) rolesAnywhereClient.Handlers.Build.RemoveByName("core.SDKVersionUserAgentHandler") rolesAnywhereClient.Handlers.Build.PushBackNamed(request.NamedHandler{Name: "v4x509.CredHelperUserAgentHandler", Fn: request.MakeAddToUserAgentHandler("CredHelper", opts.Version, runtime.Version(), runtime.GOOS, runtime.GOARCH)}) rolesAnywhereClient.Handlers.Sign.Clear() rolesAnywhereClient.Handlers.Sign.PushBackNamed(request.NamedHandler{Name: "v4x509.SignRequestHandler", Fn: CreateSignFunction(privateKey, *certificate, certificateChain)}) durationSeconds := int64(opts.SessionDuration) createSessionRequest := rolesanywhere.CreateSessionInput{ Cert: &certificateData.CertificateData, ProfileArn: &opts.ProfileArnStr, TrustAnchorArn: &opts.TrustAnchorArnStr, DurationSeconds: &(durationSeconds), InstanceProperties: nil, RoleArn: &opts.RoleArn, SessionName: nil, } output, err := rolesAnywhereClient.CreateSession(&createSessionRequest) if err != nil { return CredentialProcessOutput{}, err } if len(output.CredentialSet) == 0 { msg := "unable to obtain temporary security credentials from CreateSession" return CredentialProcessOutput{}, errors.New(msg) } credentials := output.CredentialSet[0].Credentials credentialProcessOutput := CredentialProcessOutput{ Version: 1, AccessKeyId: *credentials.AccessKeyId, SecretAccessKey: *credentials.SecretAccessKey, SessionToken: *credentials.SessionToken, Expiration: *credentials.Expiration, } return credentialProcessOutput, nil }
141
rolesanywhere-credential-helper
aws
Go
package aws_signing_helper import ( "crypto/rand" "encoding/base64" "encoding/json" "errors" "fmt" "io" "log" "net" "net/http" "os" "strconv" "strings" "sync" "time" "github.com/aws/aws-sdk-go/aws/arn" ) const DefaultPort = 9911 const LocalHostAddress = "127.0.0.1" var RefreshTime = time.Minute * time.Duration(5) type RefreshableCred struct { AccessKeyId string SecretAccessKey string Token string Code string Type string Expiration time.Time LastUpdated time.Time } type Endpoint struct { PortNum int Server *http.Server TmpCred RefreshableCred } type SessionToken struct { Expiration time.Time } const TOKEN_RESOURCE_PATH = "/latest/api/token" const SECURITY_CREDENTIALS_RESOURCE_PATH = "/latest/meta-data/iam/security-credentials/" const EC2_METADATA_TOKEN_HEADER = "x-aws-ec2-metadata-token" const EC2_METADATA_TOKEN_TTL_HEADER = "x-aws-ec2-metadata-token-ttl-seconds" const DEFAULT_TOKEN_TTL_SECONDS = "21600" const X_FORWARDED_FOR_HEADER = "X-Forwarded-For" const REFRESHABLE_CRED_TYPE = "AWS-HMAC" const REFRESHABLE_CRED_CODE = "Success" const MAX_TOKENS = 256 var mutex sync.Mutex var tokenMap = make(map[string]time.Time) // Generates a random string with the specified length func GenerateToken(length int) (string, error) { if length < 0 || length >= 128 { msg := "invalid token length" return "", errors.New(msg) } randomBytes := make([]byte, 128) _, err := rand.Read(randomBytes) if err != nil { return "", err } return base64.StdEncoding.EncodeToString(randomBytes)[:length], nil } // Removes the token that expires the earliest func InsertToken(token string, expirationTime time.Time) error { mutex.Lock() if len(tokenMap) == MAX_TOKENS { earliestExpirationTime := time.Unix(1<<63-1, 0) var earliestExpiringToken string for key, value := range tokenMap { if earliestExpirationTime.After(value) { earliestExpiringToken = key earliestExpirationTime = value } } delete(tokenMap, earliestExpiringToken) log.Printf("evicting earliest expiring token: %s", earliestExpiringToken) } tokenMap[token] = expirationTime mutex.Unlock() return nil } // Helper function that checks to see whether the token provided in the request is valid func CheckValidToken(w http.ResponseWriter, r *http.Request) error { token := r.Header.Get(EC2_METADATA_TOKEN_HEADER) if token == "" { w.WriteHeader(http.StatusUnauthorized) msg := "no token provided" io.WriteString(w, msg) return errors.New(msg) } mutex.Lock() expiration, ok := tokenMap[token] mutex.Unlock() if ok { if time.Now().After(expiration) { w.WriteHeader(http.StatusUnauthorized) msg := "invalid token provided" io.WriteString(w, msg) return errors.New(msg) } } else { w.WriteHeader(http.StatusUnauthorized) msg := "invalid token provided" io.WriteString(w, msg) return errors.New(msg) } return nil } // Helper function that finds a token's TTL in seconds func FindTokenTTLSeconds(r *http.Request) (string, error) { token := r.Header.Get(EC2_METADATA_TOKEN_HEADER) if token == "" { msg := "no token provided" return "", errors.New(msg) } mutex.Lock() expiration, ok := tokenMap[token] mutex.Unlock() if ok { tokenTTLFloat := expiration.Sub(time.Now()).Seconds() tokenTTLInt64 := int64(tokenTTLFloat) return strconv.FormatInt(tokenTTLInt64, 10), nil } else { msg := "invalid token provided" return "", errors.New(msg) } } func AllIssuesHandlers(cred *RefreshableCred, roleName string, opts *CredentialsOpts) (http.HandlerFunc, http.HandlerFunc, http.HandlerFunc) { // Handles PUT requests to /latest/api/token/ putTokenHandler := func(w http.ResponseWriter, r *http.Request) { if r.Method != "PUT" { w.WriteHeader(http.StatusMethodNotAllowed) return } // Check for the presence of the X-Forwarded-For header xForwardedForHeader := r.Header.Get(X_FORWARDED_FOR_HEADER) // canonicalized headers are used (casing doesn't matter) if xForwardedForHeader != "" { w.WriteHeader(http.StatusBadRequest) io.WriteString(w, "unable to process requests with X-Forwarded-For header") return } // Obtain the token TTL tokenTTLStr := r.Header.Get(EC2_METADATA_TOKEN_TTL_HEADER) if tokenTTLStr == "" { tokenTTLStr = DEFAULT_TOKEN_TTL_SECONDS } tokenTTL, err := strconv.Atoi(tokenTTLStr) if err != nil || tokenTTL < 1 || tokenTTL > 21600 { w.WriteHeader(http.StatusBadRequest) io.WriteString(w, "invalid token TTL") return } // Generate token and insert it into map token, err := GenerateToken(100) if err != nil { w.WriteHeader(http.StatusInternalServerError) io.WriteString(w, "unable to generate token") return } expirationTime := time.Now().Add(time.Second * time.Duration(tokenTTL)) InsertToken(token, expirationTime) w.Header().Set(EC2_METADATA_TOKEN_TTL_HEADER, tokenTTLStr) io.WriteString(w, token) // nosemgrep } // Handles requests to /latest/meta-data/iam/security-credentials/ getRoleNameHandler := func(w http.ResponseWriter, r *http.Request) { if r.Method != "GET" { w.WriteHeader(http.StatusMethodNotAllowed) return } err := CheckValidToken(w, r) if err != nil { return } tokenTTL, err := FindTokenTTLSeconds(r) if err != nil { w.WriteHeader(http.StatusUnauthorized) return } w.Header().Set(EC2_METADATA_TOKEN_TTL_HEADER, tokenTTL) io.WriteString(w, roleName) // nosemgrep } // Handles GET requests to /latest/meta-data/iam/security-credentials/<ROLE_NAME> getCredentialsHandler := func(w http.ResponseWriter, r *http.Request) { if r.Method != "GET" { w.WriteHeader(http.StatusMethodNotAllowed) return } err := CheckValidToken(w, r) if err != nil { return } var nextRefreshTime = cred.Expiration.Add(-RefreshTime) if time.Until(nextRefreshTime) < RefreshTime { credentialProcessOutput, _ := GenerateCredentials(opts) cred.AccessKeyId = credentialProcessOutput.AccessKeyId cred.SecretAccessKey = credentialProcessOutput.SecretAccessKey cred.Token = credentialProcessOutput.SessionToken cred.Expiration, _ = time.Parse(time.RFC3339, credentialProcessOutput.Expiration) cred.Code = REFRESHABLE_CRED_CODE cred.LastUpdated = time.Now() cred.Type = REFRESHABLE_CRED_TYPE err := json.NewEncoder(w).Encode(cred) if err != nil { w.WriteHeader(http.StatusInternalServerError) io.WriteString(w, "failed to encode credentials") return } } else { err := json.NewEncoder(w).Encode(cred) if err != nil { w.WriteHeader(http.StatusInternalServerError) io.WriteString(w, "failed to encode credentials") return } } tokenTTL, err := FindTokenTTLSeconds(r) if err != nil { w.WriteHeader(http.StatusUnauthorized) return } w.Header().Set(EC2_METADATA_TOKEN_TTL_HEADER, tokenTTL) } return putTokenHandler, getRoleNameHandler, getCredentialsHandler } func Serve(port int, credentialsOptions CredentialsOpts) { var refreshableCred = RefreshableCred{} roleArn, err := arn.Parse(credentialsOptions.RoleArn) if err != nil { log.Println("invalid role ARN") os.Exit(1) } credentialProcessOutput, _ := GenerateCredentials(&credentialsOptions) refreshableCred.AccessKeyId = credentialProcessOutput.AccessKeyId refreshableCred.SecretAccessKey = credentialProcessOutput.SecretAccessKey refreshableCred.Token = credentialProcessOutput.SessionToken refreshableCred.Expiration, _ = time.Parse(time.RFC3339, credentialProcessOutput.Expiration) refreshableCred.Code = REFRESHABLE_CRED_CODE refreshableCred.LastUpdated = time.Now() refreshableCred.Type = REFRESHABLE_CRED_TYPE endpoint := &Endpoint{PortNum: port, TmpCred: refreshableCred} endpoint.Server = &http.Server{} roleResourceParts := strings.Split(roleArn.Resource, "/") roleName := roleResourceParts[len(roleResourceParts)-1] // Find role name without path putTokenHandler, getRoleNameHandler, getCredentialsHandler := AllIssuesHandlers(&endpoint.TmpCred, roleName, &credentialsOptions) http.HandleFunc(TOKEN_RESOURCE_PATH, putTokenHandler) http.HandleFunc(SECURITY_CREDENTIALS_RESOURCE_PATH, getRoleNameHandler) http.HandleFunc(SECURITY_CREDENTIALS_RESOURCE_PATH+roleName, getCredentialsHandler) // Background thread that cleans up expired tokens ticker := time.NewTicker(5 * time.Second) go func() { for range ticker.C { curTime := time.Now() mutex.Lock() for key, value := range tokenMap { if curTime.After(value) { delete(tokenMap, key) log.Printf("removed expired token: %s", key) } } mutex.Unlock() } }() // Start the credentials endpoint listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", LocalHostAddress, endpoint.PortNum)) if err != nil { log.Println("failed to create listener") os.Exit(1) } endpoint.PortNum = listener.Addr().(*net.TCPAddr).Port log.Println("Local server started on port:", endpoint.PortNum) log.Println("Make it available to the sdk by running:") log.Printf("export AWS_EC2_METADATA_SERVICE_ENDPOINT=http://%s:%d/", LocalHostAddress, endpoint.PortNum) if err := endpoint.Server.Serve(listener); err != nil { log.Println("Httpserver: ListenAndServe() error") os.Exit(1) } }
320
rolesanywhere-credential-helper
aws
Go
package aws_signing_helper import ( "bytes" "crypto" "crypto/ecdsa" "crypto/rand" "crypto/rsa" "crypto/sha256" "crypto/sha512" "crypto/x509" "encoding/base64" "encoding/hex" "encoding/pem" "errors" "fmt" "io" "log" "net/http" "os" "sort" "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/request" ) type SigningOpts struct { // Private key to use for the signing operation. PrivateKey crypto.PrivateKey // Digest to use in the signing operation. For example, SHA256 Digest crypto.Hash } // Container for data that will be sent in a request to CreateSession. type RequestOpts struct { // ARN of the Role to assume in the CreateSession call. RoleArn string // ARN of the Configuration to use in the CreateSession call. ConfigurationArn string // Certificate, as base64-encoded DER; used in the `x-amz-x509` // header in the API request. CertificateData string // Duration of the session that will be returned by CreateSession. DurationSeconds int } type RequestHeaderOpts struct { // Certificate, as base64-encoded DER; used in the `x-amz-x509` // header in the API request. CertificateData string } type RequestQueryStringOpts struct { // ARN of the Role to assume in the CreateSession call. RoleArn string // ARN of the Configuration to use in the CreateSession call. ConfigurationArn string } type SignerParams struct { OverriddenDate time.Time RegionName string ServiceName string SigningAlgorithm string } // Container for data returned after performing a signing operation. type SigningResult struct { // Signature encoded in hex. Signature string `json:"signature"` } // Container for certificate data returned to the SDK as JSON. type CertificateData struct { // Type for the key contained in the certificate. // Passed back to the `sign-string` command KeyType string `json:"keyType"` // Certificate, as base64-encoded DER; used in the `x-amz-x509` // header in the API request. CertificateData string `json:"certificateData"` // Serial number of the certificate. Used in the credential // field of the Authorization header SerialNumber string `json:"serialNumber"` // Supported signing algorithms based on the KeyType Algorithms []string `json:"supportedAlgorithms"` } // Container that adheres to the format of credential_process output as specified by AWS. type CredentialProcessOutput struct { // This field should be hard-coded to 1 for now. Version int `json:"Version"` // AWS Access Key ID AccessKeyId string `json:"AccessKeyId"` // AWS Secret Access Key SecretAccessKey string `json:"SecretAccessKey"` // AWS Session Token for temporary credentials SessionToken string `json:"SessionToken"` // ISO8601 timestamp for when the credentials expire Expiration string `json:"Expiration"` } type RolesAnywhereSigner struct { PrivateKey crypto.PrivateKey Certificate x509.Certificate CertificateChain []x509.Certificate } // Define constants used in signing const ( aws4_x509_rsa_sha256 = "AWS4-X509-RSA-SHA256" aws4_x509_ecdsa_sha256 = "AWS4-X509-ECDSA-SHA256" timeFormat = "20060102T150405Z" shortTimeFormat = "20060102" x_amz_date = "X-Amz-Date" x_amz_x509 = "X-Amz-X509" x_amz_x509_chain = "X-Amz-X509-Chain" x_amz_content_sha256 = "X-Amz-Content-Sha256" authorization = "Authorization" host = "Host" emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` ) // Headers that aren't included in calculating the signature var ignoredHeaderKeys = map[string]bool{ "Authorization": true, "User-Agent": true, "X-Amzn-Trace-Id": true, } // Obtain the date-time, formatted as specified by SigV4 func (signerParams *SignerParams) GetFormattedSigningDateTime() string { return signerParams.OverriddenDate.UTC().Format(timeFormat) } // Obtain the short date-time, formatted as specified by SigV4 func (signerParams *SignerParams) GetFormattedShortSigningDateTime() string { return signerParams.OverriddenDate.UTC().Format(shortTimeFormat) } // Obtain the scope as part of the SigV4-X509 signature func (signerParams *SignerParams) GetScope() string { var scopeStringBuilder strings.Builder scopeStringBuilder.WriteString(signerParams.GetFormattedShortSigningDateTime()) scopeStringBuilder.WriteString("/") scopeStringBuilder.WriteString(signerParams.RegionName) scopeStringBuilder.WriteString("/") scopeStringBuilder.WriteString(signerParams.ServiceName) scopeStringBuilder.WriteString("/") scopeStringBuilder.WriteString("aws4_request") return scopeStringBuilder.String() } // Convert certificate to string, so that it can be present in the HTTP request header func certificateToString(certificate x509.Certificate) string { return base64.StdEncoding.EncodeToString(certificate.Raw) } // Convert certificate chain to string, so that it can be pressent in the HTTP request header func certificateChainToString(certificateChain []x509.Certificate) string { var x509ChainString strings.Builder for i, certificate := range certificateChain { x509ChainString.WriteString(certificateToString(certificate)) if i != len(certificateChain)-1 { x509ChainString.WriteString(",") } } return x509ChainString.String() } // Create a function that will sign requests, given the signing certificate, optional certificate chain, and the private key func CreateSignFunction(privateKey crypto.PrivateKey, certificate x509.Certificate, certificateChain []x509.Certificate) func(*request.Request) { v4x509 := RolesAnywhereSigner{privateKey, certificate, certificateChain} return func(r *request.Request) { v4x509.SignWithCurrTime(r) } } // Sign the request using the current time func (v4x509 RolesAnywhereSigner) SignWithCurrTime(req *request.Request) error { // Find the signing algorithm var signingAlgorithm string _, isRsaKey := v4x509.PrivateKey.(rsa.PrivateKey) if isRsaKey { signingAlgorithm = aws4_x509_rsa_sha256 } _, isEcKey := v4x509.PrivateKey.(ecdsa.PrivateKey) if isEcKey { signingAlgorithm = aws4_x509_ecdsa_sha256 } if signingAlgorithm == "" { log.Println("unsupported algorithm") return errors.New("unsupported algorithm") } region := req.ClientInfo.SigningRegion if region == "" { region = aws.StringValue(req.Config.Region) } name := req.ClientInfo.SigningName if name == "" { name = req.ClientInfo.ServiceName } signerParams := SignerParams{time.Now(), region, name, signingAlgorithm} // Set headers that are necessary for signing req.HTTPRequest.Header.Set(host, req.HTTPRequest.URL.Host) req.HTTPRequest.Header.Set(x_amz_date, signerParams.GetFormattedSigningDateTime()) req.HTTPRequest.Header.Set(x_amz_x509, certificateToString(v4x509.Certificate)) if v4x509.CertificateChain != nil { req.HTTPRequest.Header.Set(x_amz_x509_chain, certificateChainToString(v4x509.CertificateChain)) } contentSha256 := calculateContentHash(req.HTTPRequest, req.Body) if req.HTTPRequest.Header.Get(x_amz_content_sha256) == "required" { req.HTTPRequest.Header.Set(x_amz_content_sha256, contentSha256) } canonicalRequest, signedHeadersString := createCanonicalRequest(req.HTTPRequest, req.Body, contentSha256) stringToSign := CreateStringToSign(canonicalRequest, signerParams) signingResult, _ := Sign([]byte(stringToSign), SigningOpts{v4x509.PrivateKey, crypto.SHA256}) req.HTTPRequest.Header.Set(authorization, BuildAuthorizationHeader(req.HTTPRequest, req.Body, signedHeadersString, signingResult.Signature, v4x509.Certificate, signerParams)) req.SignedHeaderVals = req.HTTPRequest.Header return nil } // Find the SHA256 hash of the provided request body as a io.ReadSeeker func makeSha256Reader(reader io.ReadSeeker) []byte { hash := sha256.New() start, _ := reader.Seek(0, 1) defer reader.Seek(start, 0) io.Copy(hash, reader) return hash.Sum(nil) } // Calculate the hash of the request body func calculateContentHash(r *http.Request, body io.ReadSeeker) string { hash := r.Header.Get(x_amz_content_sha256) if hash == "" { if body == nil { hash = emptyStringSHA256 } else { hash = hex.EncodeToString(makeSha256Reader(body)) } } return hash } // Create the canonical query string. func createCanonicalQueryString(r *http.Request, body io.ReadSeeker) string { rawQuery := strings.Replace(r.URL.Query().Encode(), "+", "%20", -1) return rawQuery } // Create the canonical header string. func createCanonicalHeaderString(r *http.Request) (string, string) { var headers []string signedHeaderVals := make(http.Header) for k, v := range r.Header { canonicalKey := http.CanonicalHeaderKey(k) if ignoredHeaderKeys[canonicalKey] { continue } lowerCaseKey := strings.ToLower(k) if _, ok := signedHeaderVals[lowerCaseKey]; ok { // include additional values signedHeaderVals[lowerCaseKey] = append(signedHeaderVals[lowerCaseKey], v...) continue } headers = append(headers, lowerCaseKey) signedHeaderVals[lowerCaseKey] = v } sort.Strings(headers) headerValues := make([]string, len(headers)) for i, k := range headers { headerValues[i] = k + ":" + strings.Join(signedHeaderVals[k], ",") } stripExcessSpaces(headerValues) return strings.Join(headerValues, "\n"), strings.Join(headers, ";") } const doubleSpace = " " // stripExcessSpaces will rewrite the passed in slice's string values to not // contain muliple side-by-side spaces. func stripExcessSpaces(vals []string) { var j, k, l, m, spaces int for i, str := range vals { // Trim trailing spaces for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { } // Trim leading spaces for k = 0; k < j && str[k] == ' '; k++ { } str = str[k : j+1] // Strip multiple spaces. j = strings.Index(str, doubleSpace) if j < 0 { vals[i] = str continue } buf := []byte(str) for k, m, l = j, j, len(buf); k < l; k++ { if buf[k] == ' ' { if spaces == 0 { // First space. buf[m] = buf[k] m++ } spaces++ } else { // End of multiple spaces. spaces = 0 buf[m] = buf[k] m++ } } vals[i] = string(buf[:m]) } } // Create the canonical request. func createCanonicalRequest(r *http.Request, body io.ReadSeeker, contentSha256 string) (string, string) { var canonicalRequestStrBuilder strings.Builder canonicalHeaderString, signedHeadersString := createCanonicalHeaderString(r) canonicalRequestStrBuilder.WriteString("POST") canonicalRequestStrBuilder.WriteString("\n") canonicalRequestStrBuilder.WriteString("/sessions") canonicalRequestStrBuilder.WriteString("\n") canonicalRequestStrBuilder.WriteString(createCanonicalQueryString(r, body)) canonicalRequestStrBuilder.WriteString("\n") canonicalRequestStrBuilder.WriteString(canonicalHeaderString) canonicalRequestStrBuilder.WriteString("\n\n") canonicalRequestStrBuilder.WriteString(signedHeadersString) canonicalRequestStrBuilder.WriteString("\n") canonicalRequestStrBuilder.WriteString(contentSha256) canonicalRequestString := canonicalRequestStrBuilder.String() canonicalRequestStringHashBytes := sha256.Sum256([]byte(canonicalRequestString)) return hex.EncodeToString(canonicalRequestStringHashBytes[:]), signedHeadersString } // Create the string to sign. func CreateStringToSign(canonicalRequest string, signerParams SignerParams) string { var stringToSignStrBuilder strings.Builder stringToSignStrBuilder.WriteString(signerParams.SigningAlgorithm) stringToSignStrBuilder.WriteString("\n") stringToSignStrBuilder.WriteString(signerParams.GetFormattedSigningDateTime()) stringToSignStrBuilder.WriteString("\n") stringToSignStrBuilder.WriteString(signerParams.GetScope()) stringToSignStrBuilder.WriteString("\n") stringToSignStrBuilder.WriteString(canonicalRequest) stringToSign := stringToSignStrBuilder.String() return stringToSign } // Builds the complete authorization header func BuildAuthorizationHeader(request *http.Request, body io.ReadSeeker, signedHeadersString string, signature string, certificate x509.Certificate, signerParams SignerParams) string { signingCredentials := certificate.SerialNumber.String() + "/" + signerParams.GetScope() credential := "Credential=" + signingCredentials signerHeaders := "SignedHeaders=" + signedHeadersString signatureHeader := "Signature=" + signature var authHeaderStringBuilder strings.Builder authHeaderStringBuilder.WriteString(signerParams.SigningAlgorithm) authHeaderStringBuilder.WriteString(" ") authHeaderStringBuilder.WriteString(credential) authHeaderStringBuilder.WriteString(", ") authHeaderStringBuilder.WriteString(signerHeaders) authHeaderStringBuilder.WriteString(", ") authHeaderStringBuilder.WriteString(signatureHeader) authHeaderString := authHeaderStringBuilder.String() return authHeaderString } // Sign the provided payload with the specified options. func Sign(payload []byte, opts SigningOpts) (SigningResult, error) { var hash []byte switch opts.Digest { case crypto.SHA256: sum := sha256.Sum256(payload) hash = sum[:] case crypto.SHA384: sum := sha512.Sum384(payload) hash = sum[:] case crypto.SHA512: sum := sha512.Sum512(payload) hash = sum[:] default: log.Println("unsupported digest") return SigningResult{}, errors.New("unsupported digest") } ecdsaPrivateKey, ok := opts.PrivateKey.(ecdsa.PrivateKey) if ok { sig, err := ecdsa.SignASN1(rand.Reader, &ecdsaPrivateKey, hash[:]) if err == nil { return SigningResult{hex.EncodeToString(sig)}, nil } } rsaPrivateKey, ok := opts.PrivateKey.(rsa.PrivateKey) if ok { sig, err := rsa.SignPKCS1v15(rand.Reader, &rsaPrivateKey, opts.Digest, hash[:]) if err == nil { return SigningResult{hex.EncodeToString(sig)}, nil } } log.Println("unsupported algorithm") return SigningResult{}, errors.New("unsupported algorithm") } func encodeDer(der []byte) (string, error) { var buf bytes.Buffer encoder := base64.NewEncoder(base64.StdEncoding, &buf) encoder.Write(der) encoder.Close() return buf.String(), nil } func parseDERFromPEM(pemDataId string, blockType string) (*pem.Block, error) { bytes, err := os.ReadFile(pemDataId) if err != nil { log.Println(err) return nil, err } var block *pem.Block for len(bytes) > 0 { block, bytes = pem.Decode(bytes) if block == nil { return nil, errors.New("unable to parse PEM data") } if block.Type == blockType { return block, nil } } return nil, errors.New("requested block type could not be found") } // Reads certificate bundle data from a file, whose path is provided func ReadCertificateBundleData(certificateBundleId string) ([]*x509.Certificate, error) { bytes, err := os.ReadFile(certificateBundleId) if err != nil { log.Println(err) return nil, err } var derBytes []byte var block *pem.Block for len(bytes) > 0 { block, bytes = pem.Decode(bytes) if block == nil { return nil, errors.New("unable to parse PEM data") } if block.Type != "CERTIFICATE" { return nil, errors.New("invalid certificate chain") } blockBytes := block.Bytes derBytes = append(derBytes, blockBytes...) } return x509.ParseCertificates(derBytes) } func readECPrivateKey(privateKeyId string) (ecdsa.PrivateKey, error) { block, err := parseDERFromPEM(privateKeyId, "EC PRIVATE KEY") if err != nil { return ecdsa.PrivateKey{}, errors.New("could not parse PEM data") } privateKey, err := x509.ParseECPrivateKey(block.Bytes) if err != nil { return ecdsa.PrivateKey{}, errors.New("could not parse private key") } return *privateKey, nil } func readRSAPrivateKey(privateKeyId string) (rsa.PrivateKey, error) { block, err := parseDERFromPEM(privateKeyId, "RSA PRIVATE KEY") if err != nil { return rsa.PrivateKey{}, errors.New("could not parse PEM data") } privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) if err != nil { return rsa.PrivateKey{}, errors.New("could not parse private key") } return *privateKey, nil } func readPKCS8PrivateKey(privateKeyId string) (crypto.PrivateKey, error) { block, err := parseDERFromPEM(privateKeyId, "PRIVATE KEY") if err != nil { return nil, errors.New("could not parse PEM data") } privateKey, err := x509.ParsePKCS8PrivateKey(block.Bytes) if err != nil { return nil, errors.New("could not parse private key") } rsaPrivateKey, ok := privateKey.(*rsa.PrivateKey) if ok { return *rsaPrivateKey, nil } ecPrivateKey, ok := privateKey.(*ecdsa.PrivateKey) if ok { return *ecPrivateKey, nil } return nil, errors.New("could not parse PKCS8 private key") } // Load the private key referenced by `privateKeyId`. func ReadPrivateKeyData(privateKeyId string) (crypto.PrivateKey, error) { if key, err := readPKCS8PrivateKey(privateKeyId); err == nil { return key, nil } if key, err := readECPrivateKey(privateKeyId); err == nil { return key, nil } if key, err := readRSAPrivateKey(privateKeyId); err == nil { return key, nil } return nil, errors.New("unable to parse private key") } // Load the certificate referenced by `certificateId` and extract // details required by the SDK to construct the StringToSign. func ReadCertificateData(certificateId string) (CertificateData, error) { block, err := parseDERFromPEM(certificateId, "CERTIFICATE") if err != nil { return CertificateData{}, errors.New("could not parse PEM data") } cert, err := x509.ParseCertificate(block.Bytes) if err != nil { log.Println("could not parse certificate", err) return CertificateData{}, errors.New("could not parse certificate") } //extract serial number serialNumber := cert.SerialNumber.String() //encode certificate encodedDer, _ := encodeDer(block.Bytes) //extract key type var keyType string switch cert.PublicKeyAlgorithm { case x509.RSA: keyType = "RSA" case x509.ECDSA: keyType = "EC" default: keyType = "" } supportedAlgorithms := []string{ fmt.Sprintf("%sSHA256", keyType), fmt.Sprintf("%sSHA384", keyType), fmt.Sprintf("%sSHA512", keyType), } //return struct return CertificateData{keyType, encodedDer, serialNumber, supportedAlgorithms}, nil }
591
rolesanywhere-credential-helper
aws
Go
package aws_signing_helper import ( "crypto" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "crypto/rsa" "crypto/sha256" "crypto/sha512" "crypto/x509" "encoding/base64" "encoding/hex" "errors" "io/ioutil" "log" "net/http" "net/http/httptest" "os" "os/exec" "strings" "testing" "time" "unicode/utf8" "github.com/aws/aws-sdk-go/aws/request" ) const TestCredentialsFilePath = "/tmp/credentials" func setup() error { generateCertsScript := exec.Command("/bin/bash", "../generate-certs.sh") _, err := generateCertsScript.Output() if err != nil { return err } generateCredentialProcessDataScript := exec.Command("/bin/bash", "../generate-credential-process-data.sh") _, err = generateCredentialProcessDataScript.Output() return err } func TestMain(m *testing.M) { err := setup() if err != nil { log.Println(err.Error()) os.Exit(1) } code := m.Run() os.Exit(code) } // Simple struct to define fixtures type CertData struct { CertPath string KeyType string } // Certificate fixtures should be generated by the script ./generate-certs.sh // if they do not exist, or need to be updated. func TestReadCertificateData(t *testing.T) { fixtures := []CertData{ {"../tst/certs/ec-prime256v1-sha256-cert.pem", "EC"}, {"../tst/certs/rsa-2048-sha256-cert.pem", "RSA"}, } for _, fixture := range fixtures { certData, err := ReadCertificateData(fixture.CertPath) if err != nil { t.Log("Failed to read certificate data") t.Fail() } if certData.KeyType != fixture.KeyType { t.Logf("Wrong key type. Expected %s, got %s", fixture.KeyType, certData.KeyType) t.Fail() } } } func TestReadInvalidCertificateData(t *testing.T) { _, err := ReadCertificateData("../tst/certs/invalid-rsa-cert.pem") if err == nil || !strings.Contains(err.Error(), "could not parse certificate") { t.Log("Failed to throw a handled error") t.Fail() } } func TestReadCertificateBundleData(t *testing.T) { _, err := ReadCertificateBundleData("../tst/certs/cert-bundle.pem") if err != nil { t.Log("Failed to read certificate bundle data") t.Fail() } } func TestReadPrivateKeyData(t *testing.T) { fixtures := []string{ "../tst/certs/ec-prime256v1-key.pem", "../tst/certs/ec-prime256v1-key-pkcs8.pem", "../tst/certs/rsa-2048-key.pem", "../tst/certs/rsa-2048-key-pkcs8.pem", } for _, fixture := range fixtures { _, err := ReadPrivateKeyData(fixture) if err != nil { t.Log(fixture) t.Log(err) t.Log("Failed to read private key data") t.Fail() } } } func TestReadInvalidPrivateKeyData(t *testing.T) { _, err := ReadPrivateKeyData("../tst/certs/invalid-rsa-key.pem") if err == nil || !strings.Contains(err.Error(), "unable to parse private key") { t.Log("Failed to throw a handled error") t.Fail() } } func TestBuildAuthorizationHeader(t *testing.T) { testRequest, err := http.NewRequest("POST", "https://rolesanywhere.us-west-2.amazonaws.com", nil) if err != nil { t.Log(err) t.Fail() } privateKey, _ := ReadPrivateKeyData("../tst/certs/rsa-2048-key.pem") certificateData, _ := ReadCertificateData("../tst/certs/rsa-2048-sha256-cert.pem") certificateDerData, _ := base64.StdEncoding.DecodeString(certificateData.CertificateData) certificate, _ := x509.ParseCertificate([]byte(certificateDerData)) awsRequest := request.Request{HTTPRequest: testRequest} v4x509 := RolesAnywhereSigner{ PrivateKey: privateKey, Certificate: *certificate, } err = v4x509.SignWithCurrTime(&awsRequest) if err != nil { t.Log(err) t.Fail() } } // Verify that the provided payload was signed correctly with the provided options. // This function is specifically used for unit testing. func Verify(payload []byte, opts SigningOpts, sig []byte) (bool, error) { var hash []byte switch opts.Digest { case crypto.SHA256: sum := sha256.Sum256(payload) hash = sum[:] case crypto.SHA384: sum := sha512.Sum384(payload) hash = sum[:] case crypto.SHA512: sum := sha512.Sum512(payload) hash = sum[:] default: log.Fatal("Unsupported digest") return false, errors.New("Unsupported digest") } { privateKey, ok := opts.PrivateKey.(ecdsa.PrivateKey) if ok { valid := ecdsa.VerifyASN1(&privateKey.PublicKey, hash, sig) if valid { return valid, nil } } } { privateKey, ok := opts.PrivateKey.(rsa.PrivateKey) if ok { err := rsa.VerifyPKCS1v15(&privateKey.PublicKey, opts.Digest, hash, sig) if err == nil { return true, nil } } } return false, nil } func TestSign(t *testing.T) { msg := "test message" var privateKeyList [2]crypto.PrivateKey { privateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) privateKeyList[0] = *privateKey } { privateKey, _ := rsa.GenerateKey(rand.Reader, 2048) privateKeyList[1] = *privateKey } digestList := []crypto.Hash{crypto.SHA256, crypto.SHA384, crypto.SHA512} for _, privateKey := range privateKeyList { for _, digest := range digestList { signingResult, err := Sign([]byte(msg), SigningOpts{privateKey, digest}) if err != nil { t.Log("Failed to sign the input message") t.Fail() } sig, err := hex.DecodeString(signingResult.Signature) if err != nil { t.Log("Failed to decode the hex-encoded signature") t.Fail() } valid, _ := Verify([]byte(msg), SigningOpts{privateKey, digest}, sig) if !valid { t.Log("Failed to verify the signature") t.Fail() } } } } func TestCredentialProcess(t *testing.T) { testTable := []struct { name string server *httptest.Server }{ { name: "create-session-server-response", server: GetMockedCreateSessionResponseServer(), }, } for _, tc := range testTable { credentialsOpts := CredentialsOpts{ PrivateKeyId: "../credential-process-data/client-key.pem", CertificateId: "../credential-process-data/client-cert.pem", RoleArn: "arn:aws:iam::000000000000:role/ExampleS3WriteRole", ProfileArnStr: "arn:aws:rolesanywhere:us-east-1:000000000000:profile/41cl0bae-6783-40d4-ab20-65dc5d922e45", TrustAnchorArnStr: "arn:aws:rolesanywhere:us-east-1:000000000000:trust-anchor/41cl0bae-6783-40d4-ab20-65dc5d922e45", Endpoint: tc.server.URL, SessionDuration: 900, } t.Run(tc.name, func(t *testing.T) { defer tc.server.Close() resp, err := GenerateCredentials(&credentialsOpts) if err != nil { t.Log(err) t.Log("Unable to call credential-process") t.Fail() } if resp.AccessKeyId != "accessKeyId" { t.Log("Incorrect access key id") t.Fail() } if resp.SecretAccessKey != "secretAccessKey" { t.Log("Incorrect secret access key") t.Fail() } if resp.SessionToken != "sessionToken" { t.Log("Incorrect session token") t.Fail() } }) } } func TestUpdate(t *testing.T) { testTable := []struct { name string server *httptest.Server inputFileContents string profile string expectedFileContents string }{ { name: "test-space-separated-keys", server: GetMockedCreateSessionResponseServer(), inputFileContents: `test test test [test profile] aws_access_key_id = test [test] aws_secret_access_key = test`, profile: "test profile", expectedFileContents: `test test test [test profile] aws_access_key_id = accessKeyId aws_secret_access_key = secretAccessKey aws_session_token = sessionToken [test] aws_secret_access_key = test`, }, { name: "test-profile-with-other-keys", server: GetMockedCreateSessionResponseServer(), inputFileContents: `test test test [test profile] aws_access_key_id = test test_key = test [test] aws_secret_access_key = test`, profile: "test profile", expectedFileContents: `test test test [test profile] aws_access_key_id = accessKeyId test_key = test aws_secret_access_key = secretAccessKey aws_session_token = sessionToken [test] aws_secret_access_key = test`, }, { name: "test-commented-profile", server: GetMockedCreateSessionResponseServer(), inputFileContents: `test test test # [test profile] aws_access_key_id = test [test] aws_secret_access_key = test`, profile: "test profile", expectedFileContents: `test test test # [test profile] aws_access_key_id = test [test] aws_secret_access_key = test [test profile] aws_access_key_id = accessKeyId aws_secret_access_key = secretAccessKey aws_session_token = sessionToken `, }, { name: "test-profile-does-not-exist", server: GetMockedCreateSessionResponseServer(), inputFileContents: `test test test [test] aws_secret_access_key = test`, profile: "test profile", expectedFileContents: `test test test [test] aws_secret_access_key = test [test profile] aws_access_key_id = accessKeyId aws_secret_access_key = secretAccessKey aws_session_token = sessionToken `, }, { name: "test-first-word-in-profile-matches", server: GetMockedCreateSessionResponseServer(), inputFileContents: `test test test [test profile] aws_access_key_id = test [test] aws_secret_access_key = test`, profile: "test", expectedFileContents: `test test test [test profile] aws_access_key_id = test [test] aws_access_key_id = accessKeyId aws_secret_access_key = secretAccessKey aws_session_token = sessionToken`, }, { name: "test-multiple-profiles-with-same-name", server: GetMockedCreateSessionResponseServer(), inputFileContents: `test test test [test] test_key = test [test profile] aws_access_key_id = test [test] aws_secret_access_key = test`, profile: "test", expectedFileContents: `test test test [test] test_key = test aws_access_key_id = accessKeyId aws_secret_access_key = secretAccessKey aws_session_token = sessionToken [test profile] aws_access_key_id = test [test] aws_secret_access_key = test`, }, } for _, tc := range testTable { credentialsOpts := CredentialsOpts{ PrivateKeyId: "../credential-process-data/client-key.pem", CertificateId: "../credential-process-data/client-cert.pem", RoleArn: "arn:aws:iam::000000000000:role/ExampleS3WriteRole", ProfileArnStr: "arn:aws:rolesanywhere:us-east-1:000000000000:profile/41cl0bae-6783-40d4-ab20-65dc5d922e45", TrustAnchorArnStr: "arn:aws:rolesanywhere:us-east-1:000000000000:trust-anchor/41cl0bae-6783-40d4-ab20-65dc5d922e45", Endpoint: tc.server.URL, SessionDuration: 900, } t.Run(tc.name, func(t *testing.T) { SetupTests() defer tc.server.Close() os.Setenv(AwsSharedCredentialsFileEnvVarName, TestCredentialsFilePath) _, err := GetCredentialsFileContents() // first create the credentials file with the appropriate permissions if err != nil { t.Log("unable to create credentials file for testing") t.Fail() } writeOnlyCredentialsFile, err := GetWriteOnlyCredentialsFile() // then obtain a handle to the credentials file to perform write operations if err != nil { t.Log("unable to write to credentials file for testing") t.Fail() } defer writeOnlyCredentialsFile.Close() writeOnlyCredentialsFile.WriteString(tc.inputFileContents) Update(credentialsOpts, tc.profile, true) fileByteContents, _ := ioutil.ReadFile(TestCredentialsFilePath) fileStringContents := trimLastChar(string(fileByteContents)) if fileStringContents != tc.expectedFileContents { t.Log("unexpected file contents") t.Fail() } }) } } func TestUpdateFilePermissions(t *testing.T) { testTable := []struct { name string server *httptest.Server profile string expectedFileContents string }{ { name: "test-space-separated-keys", server: GetMockedCreateSessionResponseServer(), profile: "test profile", expectedFileContents: `[test profile] aws_access_key_id = accessKeyId aws_secret_access_key = secretAccessKey aws_session_token = sessionToken `, }, } for _, tc := range testTable { credentialsOpts := CredentialsOpts{ PrivateKeyId: "../credential-process-data/client-key.pem", CertificateId: "../credential-process-data/client-cert.pem", RoleArn: "arn:aws:iam::000000000000:role/ExampleS3WriteRole", ProfileArnStr: "arn:aws:rolesanywhere:us-east-1:000000000000:profile/41cl0bae-6783-40d4-ab20-65dc5d922e45", TrustAnchorArnStr: "arn:aws:rolesanywhere:us-east-1:000000000000:trust-anchor/41cl0bae-6783-40d4-ab20-65dc5d922e45", Endpoint: tc.server.URL, SessionDuration: 900, } t.Run(tc.name, func(t *testing.T) { SetupTests() defer tc.server.Close() os.Setenv(AwsSharedCredentialsFileEnvVarName, TestCredentialsFilePath) Update(credentialsOpts, tc.profile, true) fileByteContents, _ := ioutil.ReadFile(TestCredentialsFilePath) fileStringContents := trimLastChar(string(fileByteContents)) if fileStringContents != tc.expectedFileContents { t.Log("unexpected file contents") t.Fail() } info, _ := os.Stat(TestCredentialsFilePath) mode := info.Mode() if mode != ((1 << 8) | (1 << 7)) { t.Log("unexpected file mode") t.Fail() } }) } } func TestGenerateLongToken(t *testing.T) { _, err := GenerateToken(150) if err == nil { t.Log("token generation should've failed since token size is too large") t.Fail() } } func TestGenerateToken(t *testing.T) { token1, err := GenerateToken(100) if err != nil { t.Log("unexpected failure in generating token") t.Fail() } token2, err := GenerateToken(100) if err != nil { t.Log("unexpected failure in generating token") t.Fail() } if token1 == token2 { t.Log("expected two randomly generated tokens to be different") t.Fail() } } func TestStoreValidToken(t *testing.T) { token, err := GenerateToken(100) if err != nil { t.Log("unexpected failure in generating token") t.Fail() } err = InsertToken(token, time.Now().Add(time.Second*time.Duration(100))) if err != nil { t.Log("unexpected failure when inserting token") t.Fail() } httpRequest, err := http.NewRequest("GET", "http://127.0.0.1", nil) if err != nil { t.Log("unable to create test http request") t.Fail() } httpRequest.Header.Add(EC2_METADATA_TOKEN_HEADER, token) err = CheckValidToken(nil, httpRequest) if err != nil { t.Log("expected previously inserted token to be valid") t.Fail() } } func Test(t *testing.T) { httpRequest, err := http.NewRequest("GET", "http://127.0.0.1", nil) if err != nil { t.Log("unable to create test http request") t.Fail() } httpRequest.Header.Add("test-header", "test-header-value") headerNames := [4]string{"Test-Header", "test-header", "TEST-HEADER", "tEST-hEadeR"} for _, header := range headerNames { testHeaderValue := httpRequest.Header.Get(header) if testHeaderValue != "test-header-value" { t.Log("header name canonicalization not working as expected") t.Fail() } } } func SetupTests() { os.Remove(TestCredentialsFilePath) } func trimLastChar(s string) string { r, size := utf8.DecodeLastRuneInString(s) if r == utf8.RuneError && (size == 0 || size == 1) { size = 0 } return s[:len(s)-size] } func GetMockedCreateSessionResponseServer() *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusCreated) w.Write([]byte(`{ "credentialSet":[ { "assumedRoleUser": { "arn": "arn:aws:sts::000000000000:assumed-role/ExampleS3WriteRole", "assumedRoleId": "assumedRoleId" }, "credentials":{ "accessKeyId": "accessKeyId", "expiration": "2022-07-27T04:36:55Z", "secretAccessKey": "secretAccessKey", "sessionToken": "sessionToken" }, "packedPolicySize": 10, "roleArn": "arn:aws:iam::000000000000:role/ExampleS3WriteRole", "sourceIdentity": "sourceIdentity" } ], "subjectArn": "arn:aws:rolesanywhere:us-east-1:000000000000:subject/41cl0bae-6783-40d4-ab20-65dc5d922e45" }`)) })) }
617
rolesanywhere-credential-helper
aws
Go
package aws_signing_helper import ( "bufio" "log" "os" "path/filepath" "strings" "time" ) const UpdateRefreshTime = time.Minute * time.Duration(5) const AwsSharedCredentialsFileEnvVarName = "AWS_SHARED_CREDENTIALS_FILE" const BufferSize = 49152 // Structure to contain a temporary credential type TemporaryCredential struct { AccessKeyId string SecretAccessKey string SessionToken string Expiration time.Time } // Updates credentials in the credentials file for the specified profile func Update(credentialsOptions CredentialsOpts, profile string, once bool) { var refreshableCred = TemporaryCredential{} var nextRefreshTime time.Time for { credentialProcessOutput, err := GenerateCredentials(&credentialsOptions) if err != nil { log.Fatal(err) } // Assign credential values refreshableCred.AccessKeyId = credentialProcessOutput.AccessKeyId refreshableCred.SecretAccessKey = credentialProcessOutput.SecretAccessKey refreshableCred.SessionToken = credentialProcessOutput.SessionToken // nosemgrep refreshableCred.Expiration, _ = time.Parse(time.RFC3339, credentialProcessOutput.Expiration) if (refreshableCred == TemporaryCredential{}) { log.Println("no credentials created") os.Exit(1) } // Get credentials file contents lines, err := GetCredentialsFileContents() if err != nil { log.Println("unable to get credentials file contents") os.Exit(1) } // Write to credentials file err = WriteTo(profile, lines, &refreshableCred) if err != nil { log.Println("unable to write to AWS credentials file") os.Exit(1) } if once { break } nextRefreshTime = refreshableCred.Expiration.Add(-UpdateRefreshTime) log.Println("Credentials will be refreshed at", nextRefreshTime.String()) time.Sleep(time.Until(nextRefreshTime)) } } // Assume that the credentials file is located in the default path: `~/.aws/credentials` func GetCredentialsFileContents() ([]string, error) { homeDir, err := os.UserHomeDir() if err != nil { log.Println("unable to locate the home directory") return nil, err } awsCredentialsPath := os.Getenv(AwsSharedCredentialsFileEnvVarName) if awsCredentialsPath == "" { awsCredentialsPath = filepath.Join(homeDir, ".aws", "credentials") } if err = os.MkdirAll(filepath.Dir(awsCredentialsPath), 0600); err != nil { log.Println("unable to create credentials file") return nil, err } readOnlyCredentialsFile, err := os.OpenFile(awsCredentialsPath, os.O_RDONLY|os.O_CREATE, 0600) if err != nil { log.Println("unable to get or create read-only AWS credentials file") os.Exit(1) } defer readOnlyCredentialsFile.Close() // Read in all profiles in the credentials file var lines []string scanner := bufio.NewScanner(readOnlyCredentialsFile) for scanner.Scan() { lines = append(lines, scanner.Text()) } return lines, nil } // Assume that the credentials file exists already and open it for write operations // that will overwrite the existing contents of the file func GetWriteOnlyCredentialsFile() (*os.File, error) { homeDir, _ := os.UserHomeDir() awsCredentialsPath := os.Getenv(AwsSharedCredentialsFileEnvVarName) if awsCredentialsPath == "" { awsCredentialsPath = filepath.Join(homeDir, ".aws", "credentials") } return os.OpenFile(awsCredentialsPath, os.O_WRONLY|os.O_TRUNC, 0200) } // Function that will get the new conents of the credentials file after a // refresh has been done func GetNewCredentialsFileContents(profileName string, readLines []string, cred *TemporaryCredential) []string { var profileExist = false var profileSection = "[" + profileName + "]" // A variable that checks whether or not required fields are written to the destination file newCredVisit := map[string]bool{"aws_access_key_id": false, "aws_secret_access_key": false, "aws_session_token": false} accessKey := "aws_access_key_id = " + cred.AccessKeyId + "\n" secretKey := "aws_secret_access_key = " + cred.SecretAccessKey + "\n" sessionToken := "aws_session_token = " + cred.SessionToken + "\n" var writeLines = make([]string, 0) for readLinesIndex := 0; readLinesIndex < len(readLines); readLinesIndex++ { if !profileExist && readLines[readLinesIndex] == profileSection { writeLines = append(writeLines[:], profileSection+"\n") readLinesIndex += 1 for ; readLinesIndex < len(readLines); readLinesIndex++ { // If the last line of the credentials file is reached // OR the next profile section is reached if readLinesIndex == len(readLines)-1 || strings.HasPrefix(readLines[readLinesIndex], "[") { if !newCredVisit["aws_access_key_id"] { writeLines = append(writeLines[:], accessKey) } if !newCredVisit["aws_secret_access_key"] { writeLines = append(writeLines[:], secretKey) } if !newCredVisit["aws_session_token"] { writeLines = append(writeLines[:], sessionToken) } if readLinesIndex != len(readLines)-1 { readLinesIndex -= 1 } profileExist = true break } else if strings.HasPrefix(readLines[readLinesIndex], "aws_access_key_id") { // replace "aws_access_key_id" writeLines = append(writeLines[:], accessKey) newCredVisit["aws_access_key_id"] = true } else if strings.HasPrefix(readLines[readLinesIndex], "aws_secret_access_key") { // replace "aws_secret_access_key" writeLines = append(writeLines[:], secretKey) newCredVisit["aws_secret_access_key"] = true } else if strings.HasPrefix(readLines[readLinesIndex], "aws_session_token") { // replace "aws_session_token" writeLines = append(writeLines[:], sessionToken) newCredVisit["aws_session_token"] = true } else { // write other keys writeLines = append(writeLines[:], readLines[readLinesIndex]+"\n") } } } else { writeLines = append(writeLines[:], readLines[readLinesIndex]+"\n") } } // If the chosen profile does not exist if !profileExist { writeCredential := profileSection + "\n" + accessKey + secretKey + sessionToken writeLines = append(writeLines[:], writeCredential+"\n") } return writeLines } // Function to write existing credentials and newly-created credentials to a destination file func WriteTo(profileName string, readLines []string, cred *TemporaryCredential) error { destFile, err := GetWriteOnlyCredentialsFile() if err != nil { log.Println("unable to get write-only AWS credentials file") os.Exit(1) } defer destFile.Close() // Create buffered writer destFileWriter := bufio.NewWriterSize(destFile, BufferSize) for _, line := range GetNewCredentialsFileContents(profileName, readLines, cred) { _, err := destFileWriter.WriteString(line) if err != nil { log.Println("unable to write to credentials file") os.Exit(1) } } // Flush the contents of the buffer destFileWriter.Flush() return nil }
199
rolesanywhere-credential-helper
aws
Go
package main import ( "bufio" "crypto" "encoding/binary" "encoding/hex" "encoding/json" "flag" "fmt" "io/ioutil" "log" "os" "strings" helper "github.com/aws/rolesanywhere-credential-helper/aws_signing_helper" ) // Common flags that must be contained in all flag sets var ( privateKeyId string certificateId string certificateBundleId string digestArg string roleArnStr string profileArnStr string trustAnchorArnStr string sessionDuration int region string endpoint string noVerifySSL bool withProxy bool debug bool format string profile string once bool port int credentialProcessCmd = flag.NewFlagSet("credential-process", flag.ExitOnError) signStringCmd = flag.NewFlagSet("sign-string", flag.ExitOnError) readCertificateDataCmd = flag.NewFlagSet("read-certificate-data", flag.ExitOnError) updateCmd = flag.NewFlagSet("update", flag.ExitOnError) serveCmd = flag.NewFlagSet("serve", flag.ExitOnError) versionCmd = flag.NewFlagSet("version", flag.ExitOnError) ) var Version string var globalOptSet = map[string]bool{"--region": true, "--endpoint": true} var credentialCommands = map[string]struct{}{"credential-process": {}, "update": {}, "serve": {}} // Maps each command name to a flagset var commands = map[string]*flag.FlagSet{ credentialProcessCmd.Name(): credentialProcessCmd, signStringCmd.Name(): signStringCmd, readCertificateDataCmd.Name(): readCertificateDataCmd, updateCmd.Name(): updateCmd, serveCmd.Name(): serveCmd, versionCmd.Name(): versionCmd, } // Finds global parameters that can appear in any position // Return a map that maps the name of global parameter to its value // and a list of remaining arguments func findGlobalVar(argList []string) (map[string]string, []string) { globalVars := make(map[string]string) parseList := []string{} for i := 0; i < len(argList); i++ { if globalOptSet[argList[i]] { if !strings.HasPrefix(argList[i+1], "--") { globalVars[argList[i]] = argList[i+1] i = i + 1 } else { log.Println("Invalid value for ", argList[i]) os.Exit(1) } } else { parseList = append(parseList, argList[i]) } } return globalVars, parseList } // Assigns different flags to different commands func setupFlags() { for command, fs := range commands { // Common flags for all credential-related commands if _, ok := credentialCommands[command]; ok { fs.StringVar(&certificateId, "certificate", "", "Path to certificate file") fs.StringVar(&privateKeyId, "private-key", "", "Path to private key file") fs.StringVar(&roleArnStr, "role-arn", "", "Target role to assume") fs.StringVar(&profileArnStr, "profile-arn", "", "Profile to to pull policies from") fs.StringVar(&trustAnchorArnStr, "trust-anchor-arn", "", "Trust anchor to to use for authentication") fs.IntVar(&sessionDuration, "session-duration", 3600, "Duration, in seconds, for the resulting session") fs.StringVar(&region, "region", "", "Signing region") fs.StringVar(&endpoint, "endpoint", "", "Endpoint to retrieve session from") fs.StringVar(&certificateBundleId, "intermediates", "", "Path to intermediate certificate bundle") fs.BoolVar(&noVerifySSL, "no-verify-ssl", false, "To disable SSL verification") fs.BoolVar(&withProxy, "with-proxy", false, "To use credential-process with a proxy") fs.BoolVar(&debug, "debug", false, "To print debug output when SDK calls are made") } if command == "read-certificate-data" { fs.StringVar(&certificateId, "certificate", "", "Path to certificate file") } else if command == "sign-string" { fs.StringVar(&privateKeyId, "private-key", "", "Path to private key file") fs.StringVar(&format, "format", "json", "Output format. One of json, text, and bin") fs.StringVar(&digestArg, "digest", "SHA256", "One of SHA256, SHA384 and SHA512") } else if command == "update" { fs.StringVar(&profile, "profile", "default", "The aws profile to use (default 'default')") fs.BoolVar(&once, "once", false, "Update the credentials once") } else if command == "serve" { fs.IntVar(&port, "port", helper.DefaultPort, "The port used to run local server (default: 9911)") } } } func main() { setupFlags() // find and remove global variables globalVars, parseList := findGlobalVar(os.Args[1:]) tmpRegion, regionDetected := globalVars["--region"] tmpEndpoint, endpointDetected := globalVars["--endpoint"] if len(parseList) == 0 || strings.HasPrefix(parseList[0], "--") { log.Println("No command provided") os.Exit(1) } command := parseList[0] commandFs, valid := commands[command] // if the command does not exist in the command list if !valid { log.Println("Unrecognized command") os.Exit(1) } commandFs.Parse(parseList[1:]) // assign global variables if they have been detected if regionDetected { region = tmpRegion } if endpointDetected { endpoint = tmpEndpoint } credentialsOptions := helper.CredentialsOpts{ PrivateKeyId: privateKeyId, CertificateId: certificateId, CertificateBundleId: certificateBundleId, RoleArn: roleArnStr, ProfileArnStr: profileArnStr, TrustAnchorArnStr: trustAnchorArnStr, SessionDuration: sessionDuration, Region: region, Endpoint: endpoint, NoVerifySSL: noVerifySSL, WithProxy: withProxy, Debug: debug, Version: Version, } switch command { case "credential-process": // First check whether required arguments are present if privateKeyId == "" || certificateId == "" || profileArnStr == "" || trustAnchorArnStr == "" || roleArnStr == "" { msg := `Usage: aws_signing_helper credential-process --private-key <value> --certificate <value> --profile-arn <value> --trust-anchor-arn <value> --role-arn <value> [--endpoint <value>] [--region <value>] [--session-duration <value>] [--with-proxy] [--no-verify-ssl] [--debug] [--intermediates <value>]` log.Println(msg) os.Exit(1) } credentialProcessOutput, err := helper.GenerateCredentials(&credentialsOptions) if err != nil { log.Println(err) os.Exit(1) } buf, _ := json.Marshal(credentialProcessOutput) fmt.Print(string(buf[:])) case "sign-string": stringToSign, _ := ioutil.ReadAll(bufio.NewReader(os.Stdin)) privateKey, _ := helper.ReadPrivateKeyData(privateKeyId) var digest crypto.Hash switch strings.ToUpper(digestArg) { case "SHA256": digest = crypto.SHA256 case "SHA384": digest = crypto.SHA384 case "SHA512": digest = crypto.SHA512 default: digest = crypto.SHA256 } signingResult, _ := helper.Sign(stringToSign, helper.SigningOpts{PrivateKey: privateKey, Digest: digest}) switch strings.ToLower(format) { case "text": fmt.Print(signingResult.Signature) case "json": buf, _ := json.Marshal(signingResult) fmt.Print(string(buf[:])) case "bin": buf, _ := hex.DecodeString(signingResult.Signature) binary.Write(os.Stdout, binary.BigEndian, buf[:]) default: fmt.Print(signingResult.Signature) } case "read-certificate-data": data, _ := helper.ReadCertificateData(certificateId) buf, _ := json.Marshal(data) fmt.Print(string(buf[:])) case "version": fmt.Println(Version) case "update": if privateKeyId == "" || certificateId == "" || profileArnStr == "" || trustAnchorArnStr == "" || roleArnStr == "" { msg := `Usage: aws_signing_helper update --private-key <value> --certificate <value> --profile-arn <value> --trust-anchor-arn <value> --role-arn <value> [--endpoint <value>] [--region <value>] [--session-duration <value>] [--with-proxy] [--no-verify-ssl] [--intermediates <value>] [--profile <value>] [--once]` log.Println(msg) os.Exit(1) } helper.Update(credentialsOptions, profile, once) case "serve": // First check whether required arguments are present if privateKeyId == "" || certificateId == "" || profileArnStr == "" || trustAnchorArnStr == "" || roleArnStr == "" { msg := `Usage: aws_signing_helper serve --private-key <value> --certificate <value> --profile-arn <value> --trust-anchor-arn <value> --role-arn <value> [--endpoint <value>] [--region <value>] [--session-duration <value>] [--with-proxy] [--no-verify-ssl] [--debug] [--intermediates <value>] [--port <value>]` log.Println(msg) os.Exit(1) } helper.Serve(port, credentialsOptions) case "": log.Println("No command provided") os.Exit(1) default: log.Fatalf("Unrecognized command %s", command) } }
281
rolesanywhere-credential-helper
aws
Go
package main import ( "testing" ) func TestParseArgs(t *testing.T) { args := []string{ "read-certificate-data", "--certificate", "/path/to/cert.pem", } setupFlags() var command = commands[args[0]] command.Parse(args[1:]) if certificateId != "/path/to/cert.pem" { t.Errorf("Expected %s, got %s", "/path/to/cert.pem", certificateId) } }
21
rolesanywhere-credential-helper
aws
Go
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package rolesanywhere import ( "fmt" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/protocol" ) const opCreateSession = "CreateSession" // CreateSessionRequest generates a "aws/request.Request" representing the // client's request for the CreateSession operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreateSession for more information on using the CreateSession // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreateSessionRequest method. // req, resp := client.CreateSessionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/roles-anywhere-2018-05-10/CreateSession func (c *RolesAnywhere) CreateSessionRequest(input *CreateSessionInput) (req *request.Request, output *CreateSessionOutput) { op := &request.Operation{ Name: opCreateSession, HTTPMethod: "POST", HTTPPath: "/sessions", } if input == nil { input = &CreateSessionInput{} } output = &CreateSessionOutput{} req = c.newRequest(op, input, output) return } // CreateSession API operation for RolesAnywhere Service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for RolesAnywhere Service's // API operation CreateSession for usage and error information. // // Returned Error Types: // * ValidationException // // * ResourceNotFoundException // // * AccessDeniedException // // See also, https://docs.aws.amazon.com/goto/WebAPI/roles-anywhere-2018-05-10/CreateSession func (c *RolesAnywhere) CreateSession(input *CreateSessionInput) (*CreateSessionOutput, error) { req, out := c.CreateSessionRequest(input) return out, req.Send() } // CreateSessionWithContext is the same as CreateSession with the addition of // the ability to pass a context and additional request options. // // See CreateSession for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *RolesAnywhere) CreateSessionWithContext(ctx aws.Context, input *CreateSessionInput, opts ...request.Option) (*CreateSessionOutput, error) { req, out := c.CreateSessionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type CreateSessionInput struct { _ struct{} `type:"structure"` Cert *string `location:"header" locationName:"x-amz-x509" type:"string"` DurationSeconds *int64 `locationName:"durationSeconds" min:"900" type:"integer"` InstanceProperties map[string]*string `locationName:"instanceProperties" type:"map"` // ProfileArn is a required field ProfileArn *string `location:"querystring" locationName:"profileArn" type:"string" required:"true"` // RoleArn is a required field RoleArn *string `location:"querystring" locationName:"roleArn" type:"string" required:"true"` SessionName *string `locationName:"sessionName" min:"2" type:"string"` TrustAnchorArn *string `location:"querystring" locationName:"trustAnchorArn" type:"string"` } // String returns the string representation. // // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". func (s CreateSessionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation. // // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". func (s CreateSessionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateSessionInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateSessionInput"} if s.DurationSeconds != nil && *s.DurationSeconds < 900 { invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) } if s.ProfileArn == nil { invalidParams.Add(request.NewErrParamRequired("ProfileArn")) } if s.RoleArn == nil { invalidParams.Add(request.NewErrParamRequired("RoleArn")) } if s.SessionName != nil && len(*s.SessionName) < 2 { invalidParams.Add(request.NewErrParamMinLen("SessionName", 2)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCert sets the Cert field's value. func (s *CreateSessionInput) SetCert(v string) *CreateSessionInput { s.Cert = &v return s } // SetDurationSeconds sets the DurationSeconds field's value. func (s *CreateSessionInput) SetDurationSeconds(v int64) *CreateSessionInput { s.DurationSeconds = &v return s } // SetInstanceProperties sets the InstanceProperties field's value. func (s *CreateSessionInput) SetInstanceProperties(v map[string]*string) *CreateSessionInput { s.InstanceProperties = v return s } // SetProfileArn sets the ProfileArn field's value. func (s *CreateSessionInput) SetProfileArn(v string) *CreateSessionInput { s.ProfileArn = &v return s } // SetRoleArn sets the RoleArn field's value. func (s *CreateSessionInput) SetRoleArn(v string) *CreateSessionInput { s.RoleArn = &v return s } // SetSessionName sets the SessionName field's value. func (s *CreateSessionInput) SetSessionName(v string) *CreateSessionInput { s.SessionName = &v return s } // SetTrustAnchorArn sets the TrustAnchorArn field's value. func (s *CreateSessionInput) SetTrustAnchorArn(v string) *CreateSessionInput { s.TrustAnchorArn = &v return s } type CreateSessionOutput struct { _ struct{} `type:"structure"` CredentialSet []*CredentialResponse `locationName:"credentialSet" type:"list"` EnrollmentArn *string `locationName:"enrollmentArn" type:"string"` SubjectArn *string `locationName:"subjectArn" type:"string"` } // String returns the string representation. // // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". func (s CreateSessionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation. // // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". func (s CreateSessionOutput) GoString() string { return s.String() } // SetCredentialSet sets the CredentialSet field's value. func (s *CreateSessionOutput) SetCredentialSet(v []*CredentialResponse) *CreateSessionOutput { s.CredentialSet = v return s } // SetEnrollmentArn sets the EnrollmentArn field's value. func (s *CreateSessionOutput) SetEnrollmentArn(v string) *CreateSessionOutput { s.EnrollmentArn = &v return s } // SetSubjectArn sets the SubjectArn field's value. func (s *CreateSessionOutput) SetSubjectArn(v string) *CreateSessionOutput { s.SubjectArn = &v return s } type CredentialResponse struct { _ struct{} `type:"structure"` AssumedRoleUser *AssumedRoleUser `locationName:"assumedRoleUser" type:"structure"` Credentials *Credentials `locationName:"credentials" type:"structure"` PackedPolicySize *int64 `locationName:"packedPolicySize" type:"integer"` RoleArn *string `locationName:"roleArn" type:"string"` SourceIdentity *string `locationName:"sourceIdentity" type:"string"` } // String returns the string representation. // // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". func (s CredentialResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation. // // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". func (s CredentialResponse) GoString() string { return s.String() } // SetAssumedRoleUser sets the AssumedRoleUser field's value. func (s *CredentialResponse) SetAssumedRoleUser(v *AssumedRoleUser) *CredentialResponse { s.AssumedRoleUser = v return s } // SetCredentials sets the Credentials field's value. func (s *CredentialResponse) SetCredentials(v *Credentials) *CredentialResponse { s.Credentials = v return s } // SetPackedPolicySize sets the PackedPolicySize field's value. func (s *CredentialResponse) SetPackedPolicySize(v int64) *CredentialResponse { s.PackedPolicySize = &v return s } // SetRoleArn sets the RoleArn field's value. func (s *CredentialResponse) SetRoleArn(v string) *CredentialResponse { s.RoleArn = &v return s } // SetSourceIdentity sets the SourceIdentity field's value. func (s *CredentialResponse) SetSourceIdentity(v string) *CredentialResponse { s.SourceIdentity = &v return s } type CredentialSummary struct { _ struct{} `type:"structure"` Enabled *bool `locationName:"enabled" type:"boolean"` Failed *bool `locationName:"failed" type:"boolean"` Issuer *string `locationName:"issuer" type:"string"` SeenAt *time.Time `locationName:"seenAt" type:"timestamp" timestampFormat:"iso8601"` SerialNumber *string `locationName:"serialNumber" type:"string"` // X509Certificate is automatically base64 encoded/decoded by the SDK. X509Certificate []byte `locationName:"x509Certificate" type:"blob"` } // String returns the string representation. // // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". func (s CredentialSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation. // // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". func (s CredentialSummary) GoString() string { return s.String() } // SetEnabled sets the Enabled field's value. func (s *CredentialSummary) SetEnabled(v bool) *CredentialSummary { s.Enabled = &v return s } // SetFailed sets the Failed field's value. func (s *CredentialSummary) SetFailed(v bool) *CredentialSummary { s.Failed = &v return s } // SetIssuer sets the Issuer field's value. func (s *CredentialSummary) SetIssuer(v string) *CredentialSummary { s.Issuer = &v return s } // SetSeenAt sets the SeenAt field's value. func (s *CredentialSummary) SetSeenAt(v time.Time) *CredentialSummary { s.SeenAt = &v return s } // SetSerialNumber sets the SerialNumber field's value. func (s *CredentialSummary) SetSerialNumber(v string) *CredentialSummary { s.SerialNumber = &v return s } // SetX509Certificate sets the X509Certificate field's value. func (s *CredentialSummary) SetX509Certificate(v []byte) *CredentialSummary { s.X509Certificate = v return s } type Credentials struct { _ struct{} `type:"structure"` AccessKeyId *string `locationName:"accessKeyId" type:"string"` Expiration *string `locationName:"expiration" type:"string"` // SecretAccessKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by Credentials's // String and GoString methods. SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"` SessionToken *string `locationName:"sessionToken" type:"string"` } // String returns the string representation. // // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". func (s Credentials) String() string { return awsutil.Prettify(s) } // GoString returns the string representation. // // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". func (s Credentials) GoString() string { return s.String() } // SetAccessKeyId sets the AccessKeyId field's value. func (s *Credentials) SetAccessKeyId(v string) *Credentials { s.AccessKeyId = &v return s } // SetExpiration sets the Expiration field's value. func (s *Credentials) SetExpiration(v string) *Credentials { s.Expiration = &v return s } // SetSecretAccessKey sets the SecretAccessKey field's value. func (s *Credentials) SetSecretAccessKey(v string) *Credentials { s.SecretAccessKey = &v return s } // SetSessionToken sets the SessionToken field's value. func (s *Credentials) SetSessionToken(v string) *Credentials { s.SessionToken = &v return s } type AssumedRoleUser struct { _ struct{} `type:"structure"` Arn *string `locationName:"arn" type:"string"` AssumedRoleId *string `locationName:"assumedRoleId" type:"string"` } // String returns the string representation. // // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". func (s AssumedRoleUser) String() string { return awsutil.Prettify(s) } // GoString returns the string representation. // // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". func (s AssumedRoleUser) GoString() string { return s.String() } // SetArn sets the Arn field's value. func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { s.Arn = &v return s } // SetAssumedRoleId sets the AssumedRoleId field's value. func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { s.AssumedRoleId = &v return s } type ValidationException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } // String returns the string representation. // // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". func (s ValidationException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation. // // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". func (s ValidationException) GoString() string { return s.String() } func newErrorValidationException(v protocol.ResponseMetadata) error { return &ValidationException{ RespMetadata: v, } } // String returns the string representation. // // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". func (s AccessDeniedException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation. // // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". func (s AccessDeniedException) GoString() string { return s.String() } func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ RespMetadata: v, } } // Code returns the exception type name. func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } return "" } type AccessDeniedException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } // OrigErr always returns nil, satisfies awserr.Error interface. func (s *AccessDeniedException) OrigErr() error { return nil } func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. func (s *AccessDeniedException) StatusCode() int { return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. func (s *AccessDeniedException) RequestID() string { return s.RespMetadata.RequestID } type ResourceNotFoundException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } // String returns the string representation. // // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". func (s ResourceNotFoundException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation. // // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". func (s ResourceNotFoundException) GoString() string { return s.String() } func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ RespMetadata: v, } } // Code returns the exception type name. func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } return "" } // OrigErr always returns nil, satisfies awserr.Error interface. func (s *ResourceNotFoundException) OrigErr() error { return nil } func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. func (s *ResourceNotFoundException) StatusCode() int { return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. func (s *ResourceNotFoundException) RequestID() string { return s.RespMetadata.RequestID } // Code returns the exception type name. func (s *ValidationException) Code() string { return "ValidationException" } // Message returns the exception's message. func (s *ValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } return "" } // OrigErr always returns nil, satisfies awserr.Error interface. func (s *ValidationException) OrigErr() error { return nil } func (s *ValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. func (s *ValidationException) StatusCode() int { return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. func (s *ValidationException) RequestID() string { return s.RespMetadata.RequestID }
659
rolesanywhere-credential-helper
aws
Go
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. // Package rolesanywhere provides the client and types for making API // requests to RolesAnywhere Service. // // See https://docs.aws.amazon.com/goto/WebAPI/roles-anywhere-2018-05-10 for more information on this service. // // See rolesanywhere package documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/rolesanywhere/ // // Using the Client // // To contact RolesAnywhere Service with the SDK use the New function to create // a new service client. With that client you can make API requests to the service. // These clients are safe to use concurrently. // // See the SDK's documentation for more information on how to use the SDK. // https://docs.aws.amazon.com/sdk-for-go/api/ // // See aws.Config documentation for more information on configuring SDK clients. // https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config // // See the RolesAnywhere Service client RolesAnywhere for more // information on creating client for this service. // https://docs.aws.amazon.com/sdk-for-go/api/service/rolesanywhere/#New package rolesanywhere
27
rolesanywhere-credential-helper
aws
Go
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package rolesanywhere import ( "github.com/aws/aws-sdk-go/private/protocol" ) const ( // ErrCodeAccessDeniedException for service response error code // "AccessDeniedException". ErrCodeAccessDeniedException = "AccessDeniedException" // ErrCodeResourceNotFoundException for service response error code // "ResourceNotFoundException". ErrCodeResourceNotFoundException = "ResourceNotFoundException" // ErrCodeValidationException for service response error code // "ValidationException". ErrCodeValidationException = "ValidationException" ) var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "AccessDeniedException": newErrorAccessDeniedException, "ResourceNotFoundException": newErrorResourceNotFoundException, "ValidationException": newErrorValidationException, }
29
rolesanywhere-credential-helper
aws
Go
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package rolesanywhere import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) // RolesAnywhere provides the API operation methods for making requests to // RolesAnywhere Service. See this package's package overview docs // for details on the service. // // RolesAnywhere methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type RolesAnywhere struct { *client.Client } // Used for custom client initialization logic var initClient func(*client.Client) // Used for custom request initialization logic var initRequest func(*request.Request) // Service information constants const ( ServiceName = "Roles Anywhere" // Name of service. EndpointsID = "rolesanywhere" // ID to lookup a service endpoint with. ServiceID = "Roles Anywhere" // ServiceID is a unique identifier of a specific service. ) // New creates a new instance of the RolesAnywhere client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a RolesAnywhere client from just a session. // svc := rolesanywhere.New(mySession) // // // Create a RolesAnywhere client with additional configuration // svc := rolesanywhere.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *RolesAnywhere { c := p.ClientConfig(EndpointsID, cfgs...) if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "rolesanywhere" } return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) } // newClient creates, initializes and returns a new service client instance. func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *RolesAnywhere { svc := &RolesAnywhere{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: ServiceName, ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-05-10", ResolvedRegion: resolvedRegion, }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed( protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), ) // Run custom client initialization if present if initClient != nil { initClient(svc.Client) } return svc } // newRequest creates a new request for a RolesAnywhere operation and runs any // custom request initialization. func (c *RolesAnywhere) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) // Run custom request initialization if present if initRequest != nil { initRequest(req) } return req }
106
rolesanywhere-credential-helper
aws
Go
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. // Package rolesanywhereiface provides an interface to enable mocking the RolesAnywhere Service service client // for testing your code. // // It is important to note that this interface will have breaking changes // when the service model is updated and adds new API operations, paginators, // and waiters. package rolesanywhereiface import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/rolesanywhere-credential-helper/rolesanywhere" ) // RolesAnywhereAPI provides an interface to enable mocking the // rolesanywhere.RolesAnywhere service client's API operation, // paginators, and waiters. This make unit testing your code that calls out // to the SDK's service client's calls easier. // // The best way to use this interface is so the SDK's service client's calls // can be stubbed out for unit testing your code with the SDK without needing // to inject custom request handlers into the SDK's request pipeline. // // // myFunc uses an SDK service client to make a request to // // RolesAnywhere Service. // func myFunc(svc rolesanywhereiface.RolesAnywhereAPI) bool { // // Make svc.CreateSession request // } // // func main() { // sess := session.New() // svc := rolesanywhere.New(sess) // // myFunc(svc) // } // // In your _test.go file: // // // Define a mock struct to be used in your unit tests of myFunc. // type mockRolesAnywhereClient struct { // rolesanywhereiface.RolesAnywhereAPI // } // func (m *mockRolesAnywhereClient) CreateSession(input *rolesanywhere.CreateSessionInput) (*rolesanywhere.CreateSessionOutput, error) { // // mock response/functionality // } // // func TestMyFunc(t *testing.T) { // // Setup Test // mockSvc := &mockRolesAnywhereClient{} // // myfunc(mockSvc) // // // Verify myFunc's functionality // } // // It is important to note that this interface will have breaking changes // when the service model is updated and adds new API operations, paginators, // and waiters. Its suggested to use the pattern above for testing, or using // tooling to generate mocks to satisfy the interfaces. type RolesAnywhereAPI interface { CreateSession(*rolesanywhere.CreateSessionInput) (*rolesanywhere.CreateSessionOutput, error) CreateSessionWithContext(aws.Context, *rolesanywhere.CreateSessionInput, ...request.Option) (*rolesanywhere.CreateSessionOutput, error) CreateSessionRequest(*rolesanywhere.CreateSessionInput) (*request.Request, *rolesanywhere.CreateSessionOutput) } var _ RolesAnywhereAPI = (*rolesanywhere.RolesAnywhere)(nil)
69
secrets-store-csi-driver-provider-aws
aws
Go
package main import ( "flag" "fmt" "net" "os" "os/signal" "syscall" "google.golang.org/grpc" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/klog/v2" csidriver "sigs.k8s.io/secrets-store-csi-driver/provider/v1alpha1" "github.com/aws/secrets-store-csi-driver-provider-aws/auth" "github.com/aws/secrets-store-csi-driver-provider-aws/provider" "github.com/aws/secrets-store-csi-driver-provider-aws/server" ) var ( endpointDir = flag.String("provider-volume", "/etc/kubernetes/secrets-store-csi-providers", "Rendezvous directory for provider socket") driverWriteSecrets = flag.Bool("driver-writes-secrets", false, "The driver will do the write instead of the plugin") ) // Main entry point for the Secret Store CSI driver AWS provider. This main // rountine starts up the gRPC server that will listen for incoming mount // requests. func main() { klog.Infof("Starting %s version %s", auth.ProviderName, server.Version) flag.Parse() // Parse command line flags //socket on which to listen to for driver calls endpoint := fmt.Sprintf("%s/aws.sock", *endpointDir) os.Remove(endpoint) // Make sure to start clean. grpcSrv := grpc.NewServer() //Gracefully terminate server on shutdown unix signals sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGTERM, syscall.SIGINT) go func() { sig := <-sigs klog.Infof("received signal:%s to terminate", sig) grpcSrv.GracefulStop() }() listener, err := net.Listen("unix", endpoint) if err != nil { klog.Fatalf("Failed to listen on unix socket. error: %v", err) } cfg, err := rest.InClusterConfig() if err != nil { klog.Fatalf("Can not get cluster config. error: %v", err) } clientset, err := kubernetes.NewForConfig(cfg) if err != nil { klog.Fatalf("Can not initialize kubernetes client. error: %v", err) } defer func() { // Cleanup on shutdown listener.Close() os.Remove(endpoint) }() providerSrv, err := server.NewServer(provider.NewSecretProviderFactory, clientset.CoreV1(), *driverWriteSecrets) if err != nil { klog.Fatalf("Could not create server. error: %v", err) } csidriver.RegisterCSIDriverProviderServer(grpcSrv, providerSrv) klog.Infof("Listening for connections on address: %s", listener.Addr()) err = grpcSrv.Serve(listener) if err != nil { klog.Fatalf("Failure serving incoming mount requests. error: %v", err) } }
85
secrets-store-csi-driver-provider-aws
aws
Go
/* * Package responsible for returning an AWS SDK session with credentials * given an AWS region, K8s namespace, and K8s service account. * * This package requries that the K8s service account be associated with an IAM * role via IAM Roles for Service Accounts (IRSA). */ package auth import ( "context" "fmt" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sts" "github.com/aws/aws-sdk-go/service/sts/stsiface" authv1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sv1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/klog/v2" ) const ( arnAnno = "eks.amazonaws.com/role-arn" docURL = "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html" tokenAudience = "sts.amazonaws.com" ProviderName = "secrets-store-csi-driver-provider-aws" ) // Private implementation of stscreds.TokenFetcher interface to fetch a token // for use with AssumeRoleWithWebIdentity given a K8s namespace and service // account. // type authTokenFetcher struct { nameSpace, svcAcc string k8sClient k8sv1.CoreV1Interface } // Private helper to fetch a JWT token for a given namespace and service account. // // See also: https://pkg.go.dev/k8s.io/client-go/kubernetes/typed/core/v1 // func (p authTokenFetcher) FetchToken(ctx credentials.Context) ([]byte, error) { // Use the K8s API to fetch the token from the OIDC provider. tokRsp, err := p.k8sClient.ServiceAccounts(p.nameSpace).CreateToken(ctx, p.svcAcc, &authv1.TokenRequest{ Spec: authv1.TokenRequestSpec{ Audiences: []string{tokenAudience}, }, }, metav1.CreateOptions{}) if err != nil { return nil, err } return []byte(tokRsp.Status.Token), nil } // Auth is the main entry point to retrive an AWS session. The caller // initializes a new Auth object with NewAuth passing the region, namespace, and // K8s service account (and request context). The caller can then obtain AWS // sessions by calling GetAWSSession. // type Auth struct { region, nameSpace, svcAcc string k8sClient k8sv1.CoreV1Interface stsClient stsiface.STSAPI ctx context.Context } // Factory method to create a new Auth object for an incomming mount request. // func NewAuth( ctx context.Context, region, nameSpace, svcAcc string, k8sClient k8sv1.CoreV1Interface, ) (auth *Auth, e error) { // Get an initial session to use for STS calls. sess, err := session.NewSession(aws.NewConfig(). WithSTSRegionalEndpoint(endpoints.RegionalSTSEndpoint). WithRegion(region), ) if err != nil { return nil, err } return &Auth{ region: region, nameSpace: nameSpace, svcAcc: svcAcc, k8sClient: k8sClient, stsClient: sts.New(sess), ctx: ctx, }, nil } // Private helper to lookup the role ARN for a given pod. // // This method looks up the role ARN associated with the K8s service account by // calling the K8s APIs to get the role annotation on the service account. // See also: https://pkg.go.dev/k8s.io/client-go/kubernetes/typed/core/v1 // func (p Auth) getRoleARN() (arn *string, e error) { // cli equivalent: kubectl -o yaml -n <namespace> get serviceaccount <acct> rsp, err := p.k8sClient.ServiceAccounts(p.nameSpace).Get(p.ctx, p.svcAcc, metav1.GetOptions{}) if err != nil { return nil, err } roleArn := rsp.Annotations[arnAnno] if len(roleArn) <= 0 { klog.Errorf("Need IAM role for service account %s (namespace: %s) - %s", p.svcAcc, p.nameSpace, docURL) return nil, fmt.Errorf("An IAM role must be associated with service account %s (namespace: %s)", p.svcAcc, p.nameSpace) } klog.Infof("Role ARN for %s:%s is %s", p.nameSpace, p.svcAcc, roleArn) return &roleArn, nil } // Get the AWS session credentials associated with a given pod's service account. // // The returned session is capable of automatically refreshing creds as needed // by using a private TokenFetcher helper. // func (p Auth) GetAWSSession() (awsSession *session.Session, e error) { roleArn, err := p.getRoleARN() if err != nil { return nil, err } fetcher := &authTokenFetcher{p.nameSpace, p.svcAcc, p.k8sClient} ar := stscreds.NewWebIdentityRoleProviderWithToken(p.stsClient, *roleArn, ProviderName, fetcher) config := aws.NewConfig(). WithSTSRegionalEndpoint(endpoints.RegionalSTSEndpoint). // Use regional STS endpoint WithRegion(p.region). WithCredentials(credentials.NewCredentials(ar)) // Include the provider in the user agent string. sess, err := session.NewSession(config) if err != nil { return nil, err } sess.Handlers.Build.PushFront(func(r *request.Request) { request.AddToUserAgent(r, ProviderName) }) return session.Must(sess, err), nil }
158
secrets-store-csi-driver-provider-aws
aws
Go
package auth import ( "context" "fmt" "strings" "testing" "github.com/aws/aws-sdk-go/service/sts/stsiface" authv1 "k8s.io/api/authentication/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" k8sv1 "k8s.io/client-go/kubernetes/typed/core/v1" ) // Mock STS client type mockSTS struct { stsiface.STSAPI } // Mock K8s client for creating tokens type mockK8sV1 struct { k8sv1.CoreV1Interface k8CTOneShotError bool } func (m *mockK8sV1) ServiceAccounts(namespace string) k8sv1.ServiceAccountInterface { return &mockK8sV1SA{v1mock: m} } // Mock the K8s service account client type mockK8sV1SA struct { k8sv1.ServiceAccountInterface v1mock *mockK8sV1 } func (ma *mockK8sV1SA) CreateToken( ctx context.Context, serviceAccountName string, tokenRequest *authv1.TokenRequest, opts metav1.CreateOptions, ) (*authv1.TokenRequest, error) { if ma.v1mock.k8CTOneShotError { ma.v1mock.k8CTOneShotError = false // Reset so other tests don't fail return nil, fmt.Errorf("Fake create token error") } return &authv1.TokenRequest{ Status: authv1.TokenRequestStatus{ Token: "FAKETOKEN", }, }, nil } func newAuthWithMocks(k8SAGetError bool, roleARN string) *Auth { nameSpace := "someNamespace" accName := "someServiceAccount" region := "someRegion" sa := &corev1.ServiceAccount{} if !k8SAGetError { sa.Name = accName } sa.Namespace = nameSpace sa.Annotations = map[string]string{"eks.amazonaws.com/role-arn": roleARN} clientset := fake.NewSimpleClientset(sa) return &Auth{ region: region, nameSpace: nameSpace, svcAcc: accName, k8sClient: clientset.CoreV1(), stsClient: &mockSTS{}, } } type authTest struct { testName string k8SAGetOneShotError bool k8CTOneShotError bool roleARN string expError string } var authTests []authTest = []authTest{ {"Success", false, false, "fakeRoleARN", ""}, {"Missing Role", false, false, "", "An IAM role must"}, {"Fetch svc acc fail", true, false, "fakeRoleARN", "not found"}, } func TestAuth(t *testing.T) { for _, tstData := range authTests { t.Run(tstData.testName, func(t *testing.T) { tstAuth := newAuthWithMocks(tstData.k8SAGetOneShotError, tstData.roleARN) sess, err := tstAuth.GetAWSSession() if len(tstData.expError) == 0 && err != nil { t.Errorf("%s case: got unexpected auth error: %s", tstData.testName, err) } if len(tstData.expError) == 0 && sess == nil { t.Errorf("%s case: got empty session", tstData.testName) } if len(tstData.expError) != 0 && err == nil { t.Errorf("%s case: expected error but got none", tstData.testName) } if len(tstData.expError) != 0 && !strings.Contains(err.Error(), tstData.expError) { t.Errorf("%s case: expected error prefix '%s' but got '%s'", tstData.testName, tstData.expError, err.Error()) } }) } } var tokenTests []authTest = []authTest{ {"Success", false, false, "myRoleARN", ""}, {"Fetch JWT fail", false, true, "myRoleARN", "Fake create token"}, } func TestToken(t *testing.T) { for _, tstData := range tokenTests { t.Run(tstData.testName, func(t *testing.T) { tstAuth := newAuthWithMocks(tstData.k8SAGetOneShotError, tstData.roleARN) fetcher := &authTokenFetcher{tstAuth.nameSpace, tstAuth.svcAcc, &mockK8sV1{k8CTOneShotError: tstData.k8CTOneShotError}} tokenOut, err := fetcher.FetchToken(nil) if len(tstData.expError) == 0 && err != nil { t.Errorf("%s case: got unexpected error: %s", tstData.testName, err) } if len(tstData.expError) != 0 && err == nil { t.Errorf("%s case: expected error but got none", tstData.testName) } if len(tstData.expError) != 0 && !strings.HasPrefix(err.Error(), tstData.expError) { t.Errorf("%s case: expected error prefix '%s' but got '%s'", tstData.testName, tstData.expError, err.Error()) } if len(tstData.expError) == 0 && len(tokenOut) == 0 { t.Errorf("%s case: got empty token output", tstData.testName) return } if len(tstData.expError) == 0 && string(tokenOut) != "FAKETOKEN" { t.Errorf("%s case: got bad token output", tstData.testName) } }) } }
163
secrets-store-csi-driver-provider-aws
aws
Go
package provider import ( "context" "fmt" "strconv" "strings" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ssm" "github.com/aws/aws-sdk-go/service/ssm/ssmiface" "github.com/aws/secrets-store-csi-driver-provider-aws/utils" "k8s.io/klog/v2" "sigs.k8s.io/secrets-store-csi-driver/provider/v1alpha1" ) const ( batchSize = 10 // Max parameters SSM allows in a batch. ) // Implements the provider interface for SSM Parameter Store. // // Unlike the SecretsManagerProvider, this implementation is optimized to // reduce API call rates rather than latency in order to avoid request // throttling (which would result in higher latency). // // This implementation reduces API calls by batching multiple parameter requests // together using the GetParameters call. // type ParameterStoreProvider struct { clients []ParameterStoreClient } //Parameterstore client with region type ParameterStoreClient struct { IsFailover bool Region string Client ssmiface.SSMAPI } // Get the secret from Parameter Store. // // This method iterates over the requested secrets build up batches of requests // and fetching them. As each batch is fetched, the results are saved and the // current version map (curMap) is updated with the current version information. // func (p *ParameterStoreProvider) GetSecretValues( ctx context.Context, descriptors []*SecretDescriptor, curMap map[string]*v1alpha1.ObjectVersion, ) (v []*SecretValue, e error) { // Fetch parameters in batches and build up the results in values descLen := len(descriptors) for i := 0; i < descLen; i += batchSize { end := min(i+batchSize, descLen) // Calculate slice end. batchDescriptors := descriptors[i:end] batchValues, batchErrors := p.fetchParameterStoreValue(ctx, batchDescriptors, curMap) if batchErrors != nil { return nil, batchErrors } v = append(v, batchValues...) } return v, nil } // Private helper function to fetch a batch secret. // // This method iterates over all available clients in the ParameterProvider. // It requests a fetch from each of them. Once a fetch succeeds it returns the // value. If a fetch fails in all clients it returns all errors. // func (p *ParameterStoreProvider) fetchParameterStoreValue( ctx context.Context, batchDescriptors []*SecretDescriptor, curMap map[string]*v1alpha1.ObjectVersion, ) (values []*SecretValue, err error) { for _, client := range p.clients { batchValues, err := p.fetchParameterStoreBatch(client, ctx, batchDescriptors, curMap) if utils.IsFatalError(err) { return nil, err } else if err != nil { klog.Warning(err) } if len(values) == 0 { values = batchValues } } if values == nil { return nil, fmt.Errorf("Failed to fetch parameters from all regions.") } return values, nil } // Private helper function to fetch batch of secrets from a single region // // This method builds batch of parameters and fetches the values. // if any parameter is failed to fetch, the parameter is returned as invalid parameter // and the version information is updated in the current version map. // func (p *ParameterStoreProvider) fetchParameterStoreBatch( client ParameterStoreClient, ctx context.Context, batchDescriptors []*SecretDescriptor, curMap map[string]*v1alpha1.ObjectVersion, ) (v []*SecretValue, err error) { var values []*SecretValue // Build up the batch of parameter names. var names []*string batchDesc := make(map[string]*SecretDescriptor) for _, descriptor := range batchDescriptors { // Use either version or label if specified (but not both) parameterName := descriptor.GetSecretName(client.IsFailover) if len(descriptor.GetObjectVersion(client.IsFailover)) != 0 { parameterName = fmt.Sprintf("%s:%s", parameterName, descriptor.GetObjectVersion(client.IsFailover)) } else if len(descriptor.GetObjectVersionLabel(client.IsFailover)) != 0 { parameterName = fmt.Sprintf("%s:%s", parameterName, descriptor.GetObjectVersionLabel(client.IsFailover)) } names = append(names, aws.String(parameterName)) batchDesc[descriptor.GetSecretName(client.IsFailover)] = descriptor // Needed for response } // Fetch the batch of secrets rsp, err := client.Client.GetParametersWithContext(ctx, &ssm.GetParametersInput{ Names: names, WithDecryption: aws.Bool(true), }) if err != nil { return nil, fmt.Errorf("%s: Failed fetching parameters: %w", client.Region, err) } if len(rsp.InvalidParameters) != 0 { err = awserr.NewRequestFailure(awserr.New("", fmt.Sprintf("%s: Invalid parameters: %s", client.Region, strings.Join(aws.StringValueSlice(rsp.InvalidParameters), ", ")), err), 400, "") return nil, err } // Build up the results from the batch for _, parm := range rsp.Parameters { descriptor := batchDesc[*(parm.Name)] secretValue := &SecretValue{ Value: []byte(*(parm.Value)), Descriptor: *descriptor, } values = append(values, secretValue) //Fetch individual json key value pairs if jmesPath is specified jsonSecrets, jsonErr := secretValue.getJsonSecrets() if jsonErr != nil { return nil, fmt.Errorf("%s: %s", client.Region, jsonErr) } values = append(values, jsonSecrets...) // Update the version in the current version map. for _, jsonSecret := range jsonSecrets { jsonDescriptor := jsonSecret.Descriptor curMap[jsonDescriptor.GetFileName()] = &v1alpha1.ObjectVersion{ Id: jsonDescriptor.GetFileName(), Version: strconv.Itoa(int(*(parm.Version))), } } curMap[descriptor.GetFileName()] = &v1alpha1.ObjectVersion{ Id: descriptor.GetFileName(), Version: strconv.Itoa(int(*(parm.Version))), } } return values, nil } // Factory methods to build a new ParameterStoreProvider // func NewParameterStoreProviderWithClients(clients ...ParameterStoreClient) *ParameterStoreProvider { return &ParameterStoreProvider{ clients: clients, } } func NewParameterStoreProvider(awsSessions []*session.Session, regions []string) *ParameterStoreProvider { var parameterStoreClients []ParameterStoreClient for i, awsSession := range awsSessions { client := ParameterStoreClient{ Region: *awsSession.Config.Region, Client: ssm.New(awsSession, aws.NewConfig().WithRegion(regions[i])), IsFailover: i > 0, } parameterStoreClients = append(parameterStoreClients, client) } return NewParameterStoreProviderWithClients(parameterStoreClients...) } // Private implementation of min using ints because math.Min uses floats only. func min(i, j int) int { if i > j { return j } return i }
215
secrets-store-csi-driver-provider-aws
aws
Go
package provider import ( "context" "fmt" "io/ioutil" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/secretsmanager" "github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface" "github.com/aws/secrets-store-csi-driver-provider-aws/utils" "k8s.io/klog/v2" "sigs.k8s.io/secrets-store-csi-driver/provider/v1alpha1" ) // Implements the provider interface for Secrets Manager. // // Unlike the ParameterStoreProvider, this implementation is optimized for // latency and not reduced API call rates becuase Secrets Manager provides // higher API limits. // // When there are no existing versions of the secret (first mount), this // provider will just call GetSecretValue, update the current version map // (curMap), and return the secret in the results. When there are existing // versions (rotation reconciler case), this implementation will use the lower // latency DescribeSecret call to first determine if the secret has been // updated. // type SecretsManagerProvider struct { clients []SecretsManagerClient } //SecretsManager client with region type SecretsManagerClient struct { Region string Client secretsmanageriface.SecretsManagerAPI IsFailover bool } // Get the secret from SecretsManager. // // This method iterates over all descriptors and requests a fetch. When // sucessfully fetched, then it continues until all descriptors have been fetched. // Once an error happens, it immediately returns the error. // func (p *SecretsManagerProvider) GetSecretValues( ctx context.Context, descriptors []*SecretDescriptor, curMap map[string]*v1alpha1.ObjectVersion, ) (v []*SecretValue, errs error) { // Fetch each secret in order. If any secret fails we will return that secret's errors for _, descriptor := range descriptors { values, errs := p.fetchSecretManagerValue(ctx, descriptor, curMap) if values == nil { return nil, errs } v = append(v, values...) } return v, nil } // Private helper function to fetch a single secret. // // This method iterates over all available clients in the SecretsManagerProvider. // It requests a fetch from each of them. Once a fetch succeeds it returns the // value. If a fetch fails all clients it returns all errors. // func (p *SecretsManagerProvider) fetchSecretManagerValue( ctx context.Context, descriptor *SecretDescriptor, curMap map[string]*v1alpha1.ObjectVersion, ) (value []*SecretValue, err error) { for _, client := range p.clients { secretVal, err := p.fetchSecretManagerValueWithClient(ctx, client, descriptor, curMap) //check if fatal(4XX status error) exist to error out the mount if utils.IsFatalError(err) { return nil, err } else if err != nil { klog.Warning(err) } if len(secretVal) > 0 && len(value) == 0 { value = secretVal } } if len(value) == 0 { return nil, fmt.Errorf("Failed to fetch secret from all regions: %s", descriptor.ObjectName) } return value, nil } // Private helper function to fetch a single secret from a single region // // This method checks if the secret is current. If a secret is not current // (or this is the first time), the secret is fetched, added to the list of // secrets, and the version information is updated in the current version map. // func (p *SecretsManagerProvider) fetchSecretManagerValueWithClient( ctx context.Context, client SecretsManagerClient, descriptor *SecretDescriptor, curMap map[string]*v1alpha1.ObjectVersion, ) (v []*SecretValue, e error) { var values []*SecretValue // Don't re-fetch if we already have the current version. isCurrent, version, err := p.isCurrent(ctx, client, descriptor, curMap) if err != nil { return nil, err } // If version is current, read it back in, otherwise pull it down var secret *SecretValue if isCurrent { secret, err = p.reloadSecret(descriptor) if err != nil { return nil, err } } else { // Fetch the latest version. version, secret, err = p.fetchSecret(ctx, client, descriptor) if err != nil { return nil, err } } values = append(values, secret) // Build up the slice of values //Fetch individual json key value pairs based on jmesPath jsonSecrets, jsonError := secret.getJsonSecrets() if jsonError != nil { return nil, jsonError } values = append(values, jsonSecrets...) // Update the version in the current version map. for _, jsonSecret := range jsonSecrets { jsonDescriptor := jsonSecret.Descriptor curMap[jsonDescriptor.GetFileName()] = &v1alpha1.ObjectVersion{ Id: jsonDescriptor.GetFileName(), Version: version, } } // Update the version in the current version map. curMap[descriptor.GetFileName()] = &v1alpha1.ObjectVersion{ Id: descriptor.GetFileName(), Version: version, } return values, nil } // Private helper to check if a secret is current. // // This method looks for the given secret in the current version map, if it // does not exist (first time) it is not current. If the requsted secret uses // the objectVersion parameter, the current version is compared to the required // version to determine if it is current. Otherwise, the current vesion // information is fetched using DescribeSecret and this method checks if the // current version is labeled as current (AWSCURRENT) or has the label // sepecified via objectVersionLable (if any). // func (p *SecretsManagerProvider) isCurrent( ctx context.Context, client SecretsManagerClient, descriptor *SecretDescriptor, curMap map[string]*v1alpha1.ObjectVersion, ) (cur bool, ver string, err error) { // If we don't have this version, it is not current. curVer := curMap[descriptor.GetFileName()] if curVer == nil { return false, "", nil } // If the secret is pinned to a version see if that is what we have. if len(descriptor.GetObjectVersion(client.IsFailover)) > 0 { return curVer.Version == descriptor.GetObjectVersion(client.IsFailover), curVer.Version, nil } // Lookup the current version information. rsp, err := client.Client.DescribeSecretWithContext(ctx, &secretsmanager.DescribeSecretInput{SecretId: aws.String(descriptor.GetSecretName(client.IsFailover))}) if err != nil { return false, curVer.Version, fmt.Errorf("%s: Failed to describe secret %s: %w", client.Region, descriptor.ObjectName, err) } // If no label is specified use current, otherwise use the specified label. label := "AWSCURRENT" if len(descriptor.GetObjectVersionLabel(client.IsFailover)) > 0 { label = descriptor.GetObjectVersionLabel(client.IsFailover) } // Linear search for desired label in the list of labels on current version. stages := rsp.VersionIdsToStages[curVer.Version] hasLabel := false for i := 0; i < len(stages) && !hasLabel; i++ { hasLabel = *(stages[i]) == label } return hasLabel, curVer.Version, nil // If the current version has the desired label, it is current. } // Private helper to fetch a given secret. // // This method builds up the GetSecretValue request using the objectName from // the request and any objectVersion or objectVersionLabel parameters. // func (p *SecretsManagerProvider) fetchSecret( ctx context.Context, client SecretsManagerClient, descriptor *SecretDescriptor, ) (ver string, val *SecretValue, err error) { req := secretsmanager.GetSecretValueInput{SecretId: aws.String(descriptor.GetSecretName(client.IsFailover))} // Use explicit version if specified if len(descriptor.GetObjectVersion(client.IsFailover)) != 0 { req.SetVersionId(descriptor.GetObjectVersion(client.IsFailover)) } // Use stage label if specified if len(descriptor.GetObjectVersionLabel(client.IsFailover)) != 0 { req.SetVersionStage(descriptor.GetObjectVersionLabel(client.IsFailover)) } rsp, err := client.Client.GetSecretValueWithContext(ctx, &req) if err != nil { return "", nil, fmt.Errorf("%s: Failed fetching secret %s: %w", client.Region, descriptor.ObjectName, err) } // Use either secret string or secret binary. var sValue []byte if rsp.SecretString != nil { sValue = []byte(*rsp.SecretString) } else { sValue = rsp.SecretBinary } return *rsp.VersionId, &SecretValue{Value: sValue, Descriptor: *descriptor}, nil } // Private helper to refesh a secret from its previously stored value. // // Reads a secret back in from the file system. // func (p *SecretsManagerProvider) reloadSecret(descriptor *SecretDescriptor) (val *SecretValue, e error) { sValue, err := ioutil.ReadFile(descriptor.GetMountPath()) if err != nil { return nil, err } return &SecretValue{Value: sValue, Descriptor: *descriptor}, nil } // Factory methods to build a new SecretsManagerProvider // func NewSecretsManagerProviderWithClients(clients ...SecretsManagerClient) *SecretsManagerProvider { return &SecretsManagerProvider{ clients: clients, } } func NewSecretsManagerProvider(awsSessions []*session.Session, regions []string) *SecretsManagerProvider { var clients []SecretsManagerClient for i, awsSession := range awsSessions { client := SecretsManagerClient{ Region: *awsSession.Config.Region, Client: secretsmanager.New(awsSession, aws.NewConfig().WithRegion(regions[i])), IsFailover: i > 0, } clients = append(clients, client) } return NewSecretsManagerProviderWithClients(clients...) }
283
secrets-store-csi-driver-provider-aws
aws
Go
package provider import ( "fmt" "os" "path/filepath" "regexp" "strings" "github.com/aws/aws-sdk-go/aws/arn" "sigs.k8s.io/yaml" ) // An RE pattern to check for bad paths var badPathRE = regexp.MustCompile("(/\\.\\./)|(^\\.\\./)|(/\\.\\.$)") // An individual record from the mount request indicating the secret to be // fetched and mounted. type SecretDescriptor struct { // Name of the secret ObjectName string `json:"objectName"` // Optional base file name in which to store the secret (use ObjectName if nil). ObjectAlias string `json:"objectAlias"` // Optional version id of the secret (default to latest). ObjectVersion string `json:"objectVersion"` // Optional version/stage label of the secret (defaults to latest). ObjectVersionLabel string `json:"objectVersionLabel"` // One of secretsmanager or ssmparameter (not required when using full secrets manager ARN). ObjectType string `json:"objectType"` // Optional array to specify what json key value pairs to extract from a secret and mount as individual secrets JMESPath []JMESPathEntry `json:"jmesPath"` // Optional failover object FailoverObject FailoverObjectEntry `json:"failoverObject"` // Path translation character (not part of YAML spec). translate string `json:"-"` // Mount point directory (not part of YAML spec). mountDir string `json:"-"` } //An individual json key value pair to mount type JMESPathEntry struct { //JMES path to use for retrieval Path string `json:"path"` //File name in which to store the secret in. ObjectAlias string `json:"objectAlias"` } //An individual json key value pair to mount type FailoverObjectEntry struct { // Optional name of the failover secret ObjectName string `json:"objectName"` // Optional version id of the secret (default to latest). ObjectVersion string `json:"objectVersion"` // Optional version/stage label of the secret (defaults to latest). ObjectVersionLabel string `json:"objectVersionLabel"` } // Enum of supported secret types // type SecretType int const ( SSMParameter SecretType = iota SecretsManager ) func (sType SecretType) String() string { return []string{"ssmparameter", "secretsmanager"}[sType] } // Private map of allowed objectType and associated ARN type. Used for // validating and converting ARNs and objectType. var typeMap = map[string]SecretType{ "secretsmanager": SecretsManager, "ssmparameter": SSMParameter, "ssm": SSMParameter, } // Returns the file name where the secrets are to be written. // // Uses either the ObjectName or ObjectAlias to construct the file name. // func (p *SecretDescriptor) GetFileName() (path string) { fileName := p.ObjectName if len(p.ObjectAlias) != 0 { fileName = p.ObjectAlias } // Translate slashes to underscore if required. if len(p.translate) != 0 { fileName = strings.ReplaceAll(fileName, string(os.PathSeparator), p.translate) } else { fileName = strings.TrimLeft(fileName, string(os.PathSeparator)) // Strip leading slash } return fileName } // Return the mount point directory // // Return the mount point directory pass in by the driver in the mount request. // func (p *SecretDescriptor) GetMountDir() string { return p.mountDir } // Get the full path name (mount point + file) of the file where the seret is stored. // // Returns a path name composed of the mount point and the file name. // func (p *SecretDescriptor) GetMountPath() string { return filepath.Join(p.GetMountDir(), p.GetFileName()) } //Return the object type (ssmparameter, secretsmanager, or ssm) func (p *SecretDescriptor) getObjectType() (otype string) { oType := p.ObjectType if len(oType) == 0 { oType = strings.Split(p.ObjectName, ":")[2] // Other checks guarantee ARN } return oType } // Returns the secret type (ssmparameter or secretsmanager). // // If the ObjectType is not specified, a full ARN must be present in the // ObjectName so this method pulls the type from the ARN when ObjectType is // not specified. // func (p *SecretDescriptor) GetSecretType() (stype SecretType) { // If no objectType, use ARN (but convert ssm to ssmparameter). Note that // SSM does not actually allow ARNs but we convert anyway for other checks. sType := p.getObjectType() return typeMap[sType] } //Return a descriptor for a jmes object entry within the secret func (p *SecretDescriptor) getJmesEntrySecretDescriptor(j *JMESPathEntry) (d SecretDescriptor) { return SecretDescriptor{ ObjectAlias: j.ObjectAlias, ObjectType: p.getObjectType(), translate: p.translate, mountDir: p.mountDir, } } // Returns the secret name for the current descriptor. // // The current secret name will resolve to the ObjectName if not in failover, // and will resolve the the backup ARN if in failover. // func (p *SecretDescriptor) GetSecretName(useFailoverRegion bool) (secretName string) { if len(p.FailoverObject.ObjectName) > 0 && useFailoverRegion { return p.FailoverObject.ObjectName } return p.ObjectName } // Return the ObjectVersionLabel // func (p *SecretDescriptor) GetObjectVersionLabel(useFailoverRegion bool) (secretName string) { if len(p.FailoverObject.ObjectVersionLabel) > 0 && useFailoverRegion { return p.FailoverObject.ObjectVersionLabel } return p.ObjectVersionLabel } // Return the ObjectVersion // func (p *SecretDescriptor) GetObjectVersion(useFailoverRegion bool) (secretName string) { if len(p.FailoverObject.ObjectVersion) > 0 && useFailoverRegion { return p.FailoverObject.ObjectVersion } return p.ObjectVersion } // Private helper to validate the contents of SecretDescriptor. // // This method is used to validate input before it is used by the rest of the // plugin. // func (p *SecretDescriptor) validateSecretDescriptor(regions []string) error { if len(p.ObjectName) == 0 { return fmt.Errorf("Object name must be specified") } err := p.validateObjectName(p.ObjectName, p.ObjectType, regions[0]) if err != nil { return err } // Can only use objectVersion or objectVersionLabel for SSM not both if p.GetSecretType() == SSMParameter && len(p.ObjectVersion) != 0 && len(p.ObjectVersionLabel) != 0 { return fmt.Errorf("ssm parameters can not specify both objectVersion and objectVersionLabel: %s", p.ObjectName) } // Do not allow ../ in a path when translation is turned off if badPathRE.MatchString(p.GetFileName()) { return fmt.Errorf("path can not contain ../: %s", p.ObjectName) } //ensure each jmesPath entry has a path and an objectalias for _, jmesPathEntry := range p.JMESPath { if len(jmesPathEntry.Path) == 0 { return fmt.Errorf("Path must be specified for JMES object") } if len(jmesPathEntry.ObjectAlias) == 0 { return fmt.Errorf("Object alias must be specified for JMES object") } } if len(p.FailoverObject.ObjectName) > 0 { // Backup arns require object alias to be set. if len(p.ObjectAlias) == 0 { return fmt.Errorf("object alias must be specified for objects with failover entries: %s", p.ObjectName) } // Our regions must exist if len(regions) < 2 { return fmt.Errorf("failover object allowed only when failover region is defined: %s", p.ObjectName) } err := p.validateObjectName(p.FailoverObject.ObjectName, p.ObjectType, regions[1]) if err != nil { return err } // Can only use objectVersion or objectVersionLabel for SSM not both if p.GetSecretType() == SSMParameter && len(p.FailoverObject.ObjectVersion) != 0 && len(p.FailoverObject.ObjectVersionLabel) != 0 { return fmt.Errorf("ssm parameters can not specify both objectVersion and objectVersionLabel: %s", p.ObjectName) } if p.FailoverObject.ObjectVersion != p.ObjectVersion { return fmt.Errorf("object versions must match between primary and failover regions: %s", p.ObjectName) } } return nil } // Private helper to validate an objectname. // // This function validates the objectname string, and makes sure it matches the // corresponding 'objectType' and 'region'. // func (p *SecretDescriptor) validateObjectName(objectName string, objectType string, region string) (err error) { var objARN arn.ARN // Validate if ARNs hasARN := strings.HasPrefix(objectName, "arn:") if hasARN { objARN, err = arn.Parse(objectName) if err != nil { return fmt.Errorf("Invalid ARN format in object name: %s", objectName) } } // If has an ARN, validate that it matches the primary region if hasARN && objARN.Region != region { return fmt.Errorf("ARN region must match region %s: %s", region, objectName) } // Make sure either objectType is used or a full ARN is specified if len(objectType) == 0 && !hasARN { return fmt.Errorf("Must use objectType when a full ARN is not specified: %s", objectName) } // Make sure the ARN is for a supported service _, ok := typeMap[objARN.Service] if len(objectType) == 0 && !ok { return fmt.Errorf("Invalid service in ARN: %s", objARN.Service) } // Make sure objectType is one we understand _, ok = typeMap[objectType] if len(objectType) != 0 && (!ok || objectType == "ssm") { return fmt.Errorf("Invalid objectType: %s", objectType) } // If both ARN and objectType are used make sure they agree if len(objectType) != 0 && hasARN && typeMap[objectType] != typeMap[objARN.Service] { return fmt.Errorf("objectType does not match ARN: %s", objectName) } return nil } // Group requested objects by secret type and return a map (keyed by secret type) of slices of requests. // // This function will parse the objects array specified in the // SecretProviderClass passed on the mount request. All entries will be // validated. The object will be grouped into slices based on GetSecretType() // and returned in a map keyed by secret type. This is to allow batching of // requests. // func NewSecretDescriptorList(mountDir, translate, objectSpec string, regions []string) ( desc map[SecretType][]*SecretDescriptor, e error, ) { // See if we should substitite underscore for slash if len(translate) == 0 { translate = "_" // Use default } else if strings.ToLower(translate) == "false" { translate = "" // Turn it off. } else if len(translate) != 1 { return nil, fmt.Errorf("pathTranslation must be either 'False' or a single character string") } // Unpack the SecretProviderClass mount specification descriptors := make([]*SecretDescriptor, 0) err := yaml.Unmarshal([]byte(objectSpec), &descriptors) if err != nil { return nil, fmt.Errorf("Failed to load SecretProviderClass: %+v", err) } // Validate each record and check for duplicates groups := make(map[SecretType][]*SecretDescriptor, 0) names := make(map[string]bool) for _, descriptor := range descriptors { descriptor.translate = translate descriptor.mountDir = mountDir err = descriptor.validateSecretDescriptor(regions) if err != nil { return nil, err } // Group secrets of the same type together to allow batching requests sType := descriptor.GetSecretType() groups[sType] = append(groups[sType], descriptor) // Check for duplicate names if names[descriptor.ObjectName] { return nil, fmt.Errorf("Name already in use for objectName: %s", descriptor.ObjectName) } names[descriptor.ObjectName] = true if len(descriptor.ObjectAlias) > 0 { if names[descriptor.ObjectAlias] { return nil, fmt.Errorf("Name already in use for objectAlias: %s", descriptor.ObjectAlias) } names[descriptor.ObjectAlias] = true } if len(descriptor.JMESPath) == 0 { //jmesPath not used. No more checks continue } for _, jmesPathEntry := range descriptor.JMESPath { if names[jmesPathEntry.ObjectAlias] { return nil, fmt.Errorf("Name already in use for objectAlias: %s", jmesPathEntry.ObjectAlias) } names[jmesPathEntry.ObjectAlias] = true } } return groups, nil }
378
secrets-store-csi-driver-provider-aws
aws
Go
package provider import ( "fmt" "strings" "testing" ) var singleRegion = []string{"us-west-2"} func TestGetSecretTypeSM(t *testing.T) { descriptor := SecretDescriptor{ ObjectName: "arn:aws:secretsmanager:us-west-2:123456789012:secret:/feaw", } secretType := descriptor.GetSecretType() if secretType != SecretsManager { t.Fatalf("expected type secretsmanager but got type: %s", secretType) } } func TestGetSecretTypeSSM(t *testing.T) { descriptor := SecretDescriptor{ ObjectName: "arn:aws:ssm:us-west-2:123456789012:parameter/feaw", } secretType := descriptor.GetSecretType() if secretType != SSMParameter { t.Fatalf("expected type ssmparameter but got type: %s", secretType) } } func RunDescriptorValidationTest(t *testing.T, descriptor *SecretDescriptor, expectedErrorMessage string) { err := descriptor.validateSecretDescriptor(singleRegion) if err == nil || err.Error() != expectedErrorMessage { t.Fatalf("Expected error: %s, got error: %v", expectedErrorMessage, err) } } func TestNoNamePresent(t *testing.T) { descriptor := SecretDescriptor{} expectedErrorMessage := "Object name must be specified" RunDescriptorValidationTest(t, &descriptor, expectedErrorMessage) } func TestNoTypePresent(t *testing.T) { objectName := "arn::" descriptor := SecretDescriptor{ ObjectName: objectName, } expectedErrorMessage := fmt.Sprintf("Invalid ARN format in object name: %s", objectName) RunDescriptorValidationTest(t, &descriptor, expectedErrorMessage) } func TestUnknownService(t *testing.T) { objectName := "arn:aws:sts:us-west-2:123456789012:parameter/feaw" descriptor := SecretDescriptor{ ObjectName: objectName, } expectedErrorMessage := fmt.Sprintf("Invalid service in ARN: sts") RunDescriptorValidationTest(t, &descriptor, expectedErrorMessage) } func TestSSMWithArn(t *testing.T) { objectName := "arn:aws:ssm:us-west-2:123456789012:parameter/feaw" descriptor := SecretDescriptor{ ObjectName: objectName, } err := descriptor.validateSecretDescriptor(singleRegion) if err != nil { t.Fatalf("Unexpected error: %v", err) } } func TestNoObjectTypeWoArn(t *testing.T) { objectName := "SomeSecret" descriptor := SecretDescriptor{ ObjectName: objectName, } expectedErrorMessage := fmt.Sprintf("Must use objectType when a full ARN is not specified: %s", objectName) RunDescriptorValidationTest(t, &descriptor, expectedErrorMessage) } func TestInvalidObjectType(t *testing.T) { objectType := "sts" descriptor := SecretDescriptor{ ObjectName: "SomeName", ObjectType: objectType, } expectedErrorMessage := fmt.Sprintf("Invalid objectType: %s", objectType) RunDescriptorValidationTest(t, &descriptor, expectedErrorMessage) } func TestSSMObjectType(t *testing.T) { objectType := "ssm" descriptor := SecretDescriptor{ ObjectName: "SomeName", ObjectType: objectType, } expectedErrorMessage := fmt.Sprintf("Invalid objectType: %s", objectType) RunDescriptorValidationTest(t, &descriptor, expectedErrorMessage) } func TestObjectTypeMisMatchArn(t *testing.T) { objectName := "arn:aws:secretsmanager:us-west-2:123456789012:secret:/feaw" descriptor := SecretDescriptor{ ObjectName: objectName, ObjectType: "ssmparameter", } expectedErrorMessage := fmt.Sprintf("objectType does not match ARN: %s", objectName) RunDescriptorValidationTest(t, &descriptor, expectedErrorMessage) } func TestSSMBothVersionandLabel(t *testing.T) { objectName := "SomeParameter" descriptor := SecretDescriptor{ ObjectName: objectName, ObjectVersionLabel: "SomeLabel", ObjectVersion: "VersionId", ObjectType: "ssmparameter", } expectedErrorMessage := fmt.Sprintf("ssm parameters can not specify both objectVersion and objectVersionLabel: %s", objectName) RunDescriptorValidationTest(t, &descriptor, expectedErrorMessage) } func TestConflictingName(t *testing.T) { objects := ` - objectName: secret1 objectType: ssmparameter - objectName: secret1 objectType: ssmparameter` _, err := NewSecretDescriptorList("/", "", objects, singleRegion) expectedErrorMessage := fmt.Sprintf("Name already in use for objectName: %s", "secret1") if err == nil || err.Error() != expectedErrorMessage { t.Fatalf("Expected error: %s, got error: %v", expectedErrorMessage, err) } } func TestConflictingAlias(t *testing.T) { objects := ` - objectName: secret1 objectType: ssmparameter objectAlias: aliasOne - objectName: secret2 objectType: ssmparameter objectAlias: aliasOne` _, err := NewSecretDescriptorList("/", "", objects, singleRegion) expectedErrorMessage := fmt.Sprintf("Name already in use for objectAlias: %s", "aliasOne") if err == nil || err.Error() != expectedErrorMessage { t.Fatalf("Expected error: %s, got error: %v", expectedErrorMessage, err) } } func TestConflictingAliasJMES(t *testing.T) { objects := ` - objectName: secret1 objectType: ssmparameter objectAlias: aliasOne - objectName: secret2 objectType: ssmparameter jmesPath: - path: .username objectAlias: aliasOne` _, err := NewSecretDescriptorList("/", "", objects, singleRegion) expectedErrorMessage := fmt.Sprintf("Name already in use for objectAlias: %s", "aliasOne") if err == nil || err.Error() != expectedErrorMessage { t.Fatalf("Expected error: %s, got error: %v", expectedErrorMessage, err) } } func TestMissingAliasJMES(t *testing.T) { objects := ` - objectName: secret2 objectType: ssmparameter jmesPath: - path: .username` _, err := NewSecretDescriptorList("/", "", objects, singleRegion) expectedErrorMessage := fmt.Sprintf("Object alias must be specified for JMES object") if err == nil || err.Error() != expectedErrorMessage { t.Fatalf("Expected error: %s, got error: %v", expectedErrorMessage, err) } } func TestMissingPathJMES(t *testing.T) { objects := ` - objectName: secret2 objectType: ssmparameter jmesPath: - objectAlias: aliasOne` _, err := NewSecretDescriptorList("/", "", objects, singleRegion) expectedErrorMessage := fmt.Sprintf("Path must be specified for JMES object") if err == nil || err.Error() != expectedErrorMessage { t.Fatalf("Expected error: %s, got error: %v", expectedErrorMessage, err) } } //test separation/grouping into ssm/secretsmanager with valid parameters func TestNewDescriptorList(t *testing.T) { objects := ` - objectName: secret1 objectType: secretsmanager - objectName: secret2 objectType: ssmparameter - objectName: secret3 objectType: ssmparameter objectAlias: myParm` descriptorList, err := NewSecretDescriptorList("/", "_", objects, singleRegion) if err != nil { t.Fatalf("Unexpected error: %v", err) } if len(descriptorList[SSMParameter]) != 2 { t.Fatalf("Only expected 2 ssm objects but got %d", len(descriptorList[SSMParameter])) } if len(descriptorList[SecretsManager]) != 1 { t.Fatalf("Only expected 1 ssm object but got %d", len(descriptorList[SecretsManager])) } if descriptorList[SSMParameter][0].GetFileName() != "secret2" { t.Fatalf("Bad file name %s", descriptorList[SSMParameter][0].GetFileName()) } if descriptorList[SSMParameter][1].GetFileName() != "myParm" { t.Fatalf("Bad file name %s", descriptorList[SSMParameter][0].GetFileName()) } } //test separation/grouping into ssm/secretsmanager with valid parameters func TestBadYaml(t *testing.T) { objects := ` - objectName: secret1 objectType: secretsmanager - {` _, err := NewSecretDescriptorList("/", "", objects, singleRegion) if err == nil { t.Fatalf("Expected error but got none.") } } //test separation/grouping into ssm/secretsmanager with valid parameters func TestErrorYaml(t *testing.T) { objects := ` - objectName: secret1` _, err := NewSecretDescriptorList("/", "", objects, singleRegion) if err == nil { t.Fatalf("Expected error but got none.") } } // Validate enum strings are translated correctly func TestEnumStrings(t *testing.T) { if fmt.Sprint(SSMParameter) != "ssmparameter" { t.Fatalf("Bad enum string %s", SSMParameter) } if fmt.Sprint(SecretsManager) != "secretsmanager" { t.Fatalf("Bad enum string %s", SecretsManager) } } //test separation/grouping into ssm/secretsmanager with valid parameters func TestBadTrans(t *testing.T) { objects := ` - objectName: secret1 objectType: secretsmanager ` _, err := NewSecretDescriptorList("/", "--", objects, singleRegion) if err == nil || !strings.Contains(err.Error(), "must be either 'False' or a single character") { t.Fatalf("Unexpected error, got %v", err) } } func TestGetPath(t *testing.T) { objects := ` - objectName: "arn:aws:secretsmanager:us-west-2:123456789012:secret:secret1" objectAlias: secret1 - objectName: parm1 objectType: ssmparameter ` descriptorList, err := NewSecretDescriptorList("/mountpoint", "", objects, singleRegion) if err != nil { t.Fatalf("Unexpected error: %v", err) } if len(descriptorList[SSMParameter]) != 1 || len(descriptorList[SecretsManager]) != 1 { t.Fatalf("Missing descriptors") } if descriptorList[SSMParameter][0].GetMountPath() != "/mountpoint/parm1" { t.Errorf("Bad mount path for SSM parameter") } if descriptorList[SecretsManager][0].GetMountPath() != "/mountpoint/secret1" { t.Errorf("Bad mount path for secret") } } func TestTraversal(t *testing.T) { objects := []string{ ` - objectName: "arn:aws:secretsmanager:us-west-2:123456789012:secret:/../pathTest-abc123" `, ` - objectName: "arn:aws:secretsmanager:us-west-2:123456789012:secret:mypath/../../pathTest-abc123" `, ` - objectName: "arn:aws:secretsmanager:us-west-2:123456789012:secret:mypath/.." `, ` - objectName: "../mypath" objectType: secretsmanager `, ` - objectName: "mypath/../../param" objectType: secretsmanager `, ` - objectName: "mypath/.." objectType: secretsmanager `, ` - objectName: "../mypath" objectType: ssmparameter `, ` - objectName: "mypath/../../param" objectType: ssmparameter `, ` - objectName: "mypath/.." objectType: ssmparameter `, } for _, obj := range objects { _, err := NewSecretDescriptorList("/", "False", obj, singleRegion) if err == nil || !strings.Contains(err.Error(), "path can not contain ../") { t.Errorf("Expected error: path can not contain ../, got error: %v\n%v", err, obj) } } } func TestNotTraversal(t *testing.T) { objects := []string{ ` - objectName: "arn:aws:secretsmanager:us-west-2:123456789012:secret:/..pathTest-abc123" `, ` - objectName: "arn:aws:secretsmanager:us-west-2:123456789012:secret:..pathTest-abc123" `, ` - objectName: "arn:aws:secretsmanager:us-west-2:123456789012:secret:mypath../pathTest-abc123" `, ` - objectName: "arn:aws:secretsmanager:us-west-2:123456789012:secret:mypath.." `, ` - objectName: "/..mypath" objectType: ssmparameter `, ` - objectName: "..mypath" objectType: ssmparameter `, ` - objectName: "mypath../param" objectType: ssmparameter `, ` - objectName: "mypath.." objectType: ssmparameter `, } for _, obj := range objects { desc, err := NewSecretDescriptorList("/", "False", obj, singleRegion) if len(desc[SSMParameter]) == 0 && len(desc[SecretsManager]) == 0 { t.Errorf("TestNotTraversal: Missing descriptor for %v", obj) } if err != nil { t.Errorf("Unexpected error: %v\n%v", err, obj) } } } //If the failoverObject exists, then the object must have an alias. func TestFallbackObjectRequiresAlias(t *testing.T) { objects := ` - objectName: "arn:aws:secretsmanager:us-west-1:123456789012:secret:secret1" failoverObject: objectName: "arn:aws:secretsmanager:us-west-2:123456789012:secret:secret1"` _, err := NewSecretDescriptorList("/mountpoint", "", objects, []string{"us-west-1", "us-west-2"}) if err == nil || !strings.Contains(err.Error(), "object alias must be specified for objects with failover entries") { t.Fatalf("Unexpected error, got %v", err) } } //If either the main objectname or failoverObject's object name are not arns, then the objectType must be specified (failover is not ARN). func TestFallbackNonARNStillNeedsObjectType(t *testing.T) { objects := ` - objectName: "arn:aws:secretsmanager:us-west-1:123456789012:secret:secret1" failoverObject: {objectName: "MySecret"} objectAlias: test ` _, err := NewSecretDescriptorList("/mountpoint", "", objects, []string{"us-west-1", "us-west-2"}) if err == nil || !strings.Contains(err.Error(), "Must use objectType when a full ARN is not specified") { t.Fatalf("Unexpected error, got %v", err) } } //If either the main objectname or failoverObject's object name are not arns, then the objectType must be specified (main objectName is not ARN). func TestBackupArnMustBePairedWithObjectType(t *testing.T) { objects := ` - objectName: "MySecret" objectAlias: test failoverObject: objectName: "arn:aws:secretsmanager:us-west-1:123456789012:secret:secret1"` _, err := NewSecretDescriptorList("/mountpoint", "", objects, []string{"us-west-2", "us-west-1"}) if err == nil || !strings.Contains(err.Error(), "Must use objectType when a full ARN is not specified") { t.Fatalf("Unexpected error, got %v", err) } } //If the failover descriptor is an ARN, and the objectType is specified, then they must match which provider to use. func TestBackupArnDoesNotMatchType(t *testing.T) { objects := ` - objectName: "arn:aws:secretsmanager:us-west-1:123456789012:secret:secret1" failoverObject: {objectName: "arn:aws:bad:us-west-2:123456789012:secret:secret1"} objectType: "secretsmanager" objectAlias: test ` _, err := NewSecretDescriptorList("/mountpoint", "", objects, []string{"us-west-1", "us-west-2"}) if err == nil || !strings.Contains(err.Error(), "objectType does not match ARN") { t.Fatalf("Unexpected error, got %v", err) } } //The failoverObject must be a valid service name. func TestBackupArnInvalidType(t *testing.T) { objects := ` - objectName: "arn:aws:secretsmanager:us-west-1:123456789012:secret:secret1" failoverObject: {objectName: "arn:aws:bad:us-west-2:123456789012:secret:secret1"} objectAlias: test ` _, err := NewSecretDescriptorList("/mountpoint", "", objects, []string{"us-west-1", "us-west-2"}) if err == nil || !strings.Contains(err.Error(), "Invalid service in ARN") { t.Fatalf("Unexpected error, got %v", err) } } //Success case: both ARNs match. func TestBackupArnSuccess(t *testing.T) { objects := ` - objectName: "arn:aws:secretsmanager:us-west-1:123456789012:secret:secret1" failoverObject: {objectName: "arn:aws:secretsmanager:us-west-2:123456789012:secret:secret1"} objectAlias: test ` _, err := NewSecretDescriptorList("/mountpoint", "", objects, []string{"us-west-1", "us-west-2"}) if err != nil { t.Errorf("Unexpected error: %v", err) } } //The main regions must now match. This main ARN is for one region, and the main region is configured for a different one. func TestPrimaryArnRequiresRegionMatch(t *testing.T) { objects := ` - objectName: "arn:aws:secretsmanager:us-west-1:123456789012:secret:secret1" objectAlias: test ` _, err := NewSecretDescriptorList("/mountpoint", "", objects, []string{"us-west-2"}) if err == nil || !strings.Contains(err.Error(), "ARN region must match region us-west-2") { t.Fatalf("Unexpected error, got %v", err) } } //The failover regions must now match. This failover ARN is for one region, and failover region is configured for a different one. func TestBackupArnRequiresRegionMatch(t *testing.T) { objects := ` - objectName: "arn:aws:secretsmanager:us-west-1:123456789012:secret:secret1" failoverObject: {objectName: "arn:aws:secretsmanager:us-west-2:123456789012:secret:secret1"} objectAlias: test ` _, err := NewSecretDescriptorList("/mountpoint", "", objects, []string{"us-west-1", "us-east-2"}) if err == nil || !strings.Contains(err.Error(), "ARN region must match region us-east-2") { t.Fatalf("Unexpected error, got %v", err) } } //If a failoverObject is given, then a failover region must be given. func TestFallbackDataRequiresMultipleRegions(t *testing.T) { objects := ` - objectName: "arn:aws:secretsmanager:us-west-1:123456789012:secret:secret1" failoverObject: {objectName: "arn:aws:secretsmanager:us-west-2:123456789012:secret:secret1"} objectAlias: test ` _, err := NewSecretDescriptorList("/mountpoint", "", objects, []string{"us-west-1"}) if err == nil || !strings.Contains(err.Error(), "failover object allowed only when failover region") { t.Fatalf("Unexpected error, got %v", err) } } //If using ssmparameter and a failoverObject, then using both objectVersion and objectVersionLabel is invalid func TestObjectVersionAndLabelAreIncompatible(t *testing.T) { objects := ` - objectName: "MySecret1" objectType: ssmparameter failoverObject: objectName: MySecretInAnotherRegion objectVersion: VersionId objectVersionLabel: MyLabel objectAlias: test ` _, err := NewSecretDescriptorList("/mountpoint", "", objects, []string{"us-west-1", "us-west-2"}) if err == nil || !strings.Contains(err.Error(), "ssm parameters can not specify both objectVersion and objectVersionLabel") { t.Fatalf("Unexpected error, got %v", err) } } //Validate that the mountpoint still follows the objectAlias, even if multiple regions are defined. func TestGetPathForMultiregion(t *testing.T) { objects := ` - objectName: "MySecret1" objectType: ssmparameter failoverObject: objectName: MySecretInAnotherRegion objectAlias: test ` descriptorList, err := NewSecretDescriptorList("/mountpoint", "", objects, []string{"us-west-1", "us-west-2"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } if len(descriptorList[SSMParameter]) != 1 { t.Fatalf("Missing descriptors") } if descriptorList[SSMParameter][0].GetMountPath() != "/mountpoint/test" { t.Errorf("Bad mount path for SSM parameter") } } //A few objectVersion tests. The two must be equal. func TestVersionIdsMustMatch(t *testing.T) { objects := ` - objectName: "MySecret1" objectType: ssmparameter objectVersion: OldVersionId failoverObject: objectName: MySecretInAnotherRegion objectVersion: ADifferentVersionId objectAlias: test ` _, err := NewSecretDescriptorList("/mountpoint", "", objects, []string{"us-west-1", "us-west-2"}) if err == nil || !strings.Contains(err.Error(), "object versions must match between primary and failover regions") { t.Fatalf("Unexpected error, got %v", err) } } //Test Version Ids acceptibal if they match. func TestVersionidsMatch(t *testing.T) { objects := ` - objectName: "MySecret1" objectType: ssmparameter objectVersion: VersionId failoverObject: objectName: MySecretInAnotherRegion objectVersion: VersionId objectAlias: test ` descriptorList, err := NewSecretDescriptorList("/mountpoint", "", objects, []string{"us-west-1", "us-west-2"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } if len(descriptorList[SSMParameter]) != 1 { t.Fatalf("Missing descriptors") } if descriptorList[SSMParameter][0].GetMountPath() != "/mountpoint/test" { t.Errorf("Bad mount path for SSM parameter") } }
623
secrets-store-csi-driver-provider-aws
aws
Go
/* * Package responsible for fetching secrets from the service. * * This package defines the abstract interface used to fetch secrets, a factory * to supply the concrete implementation for a given secret type, and the * various implementations. * */ package provider import ( "context" "github.com/aws/aws-sdk-go/aws/session" "sigs.k8s.io/secrets-store-csi-driver/provider/v1alpha1" ) // Generic interface for the different secret providers. // type SecretProvider interface { GetSecretValues(ctx context.Context, descriptor []*SecretDescriptor, curMap map[string]*v1alpha1.ObjectVersion) (secret []*SecretValue, e error) } // Factory class to return singltons based on secret type (secretsmanager or ssmparameter). // type SecretProviderFactory struct { Providers map[SecretType]SecretProvider // Maps secret type to the provider. } // The prototype for the provider factory fatory // type ProviderFactoryFactory func(session []*session.Session, reigons []string) (factory *SecretProviderFactory) // Creates the provider factory. // // This factory catagorizes the request and returns the correct concrete // provider implementation using the secret type. // func NewSecretProviderFactory(sessions []*session.Session, regions []string) (factory *SecretProviderFactory) { return &SecretProviderFactory{ Providers: map[SecretType]SecretProvider{ SSMParameter: NewParameterStoreProvider(sessions, regions), SecretsManager: NewSecretsManagerProvider(sessions, regions), }, } } // Factory method to get the correct secret provider for the request type. // // This factory method uses the secret type to return the previously created // provider implementation. // func (p SecretProviderFactory) GetSecretProvider(secretType SecretType) (prov SecretProvider) { return p.Providers[secretType] }
59
secrets-store-csi-driver-provider-aws
aws
Go
package provider import ( "encoding/json" "fmt" "github.com/jmespath/go-jmespath" ) // Contains the actual contents of the secret fetched from either Secrete Manager // or SSM Parameter Store along with the original descriptor. type SecretValue struct { Value []byte Descriptor SecretDescriptor } func (p *SecretValue) String() string { return "<REDACTED>" } // Do not log secrets //parse out and return specified key value pairs from the secret func (p *SecretValue) getJsonSecrets() (s []*SecretValue, e error) { jsonValues := make([]*SecretValue, 0) if len(p.Descriptor.JMESPath) == 0 { return jsonValues, nil } var data interface{} err := json.Unmarshal(p.Value, &data) if err != nil { return nil, fmt.Errorf("Invalid JSON used with jmesPath in secret: %s.", p.Descriptor.ObjectName) } //fetch all specified key value pairs` for _, jmesPathEntry := range p.Descriptor.JMESPath { jsonSecret, err := jmespath.Search(jmesPathEntry.Path, data) if err != nil { return nil, fmt.Errorf("Invalid JMES Path: %s.", jmesPathEntry.Path) } if jsonSecret == nil { return nil, fmt.Errorf("JMES Path - %s for object alias - %s does not point to a valid object.", jmesPathEntry.Path, jmesPathEntry.ObjectAlias) } jsonSecretAsString, isString := jsonSecret.(string) if !isString { return nil, fmt.Errorf("Invalid JMES search result type for path:%s. Only string is allowed.", jmesPathEntry.Path) } descriptor := p.Descriptor.getJmesEntrySecretDescriptor(&jmesPathEntry) secretValue := SecretValue{ Value: []byte(jsonSecretAsString), Descriptor: descriptor, } jsonValues = append(jsonValues, &secretValue) } return jsonValues, nil }
63
secrets-store-csi-driver-provider-aws
aws
Go
package provider import ( "fmt" "testing" ) var TEST_OBJECT_NAME = "jsonObject" func RunGetJsonSecretTest(t *testing.T, jsonContent string, path string, objectAlias string, expectedErrorMessage string) { jmesPath := []JMESPathEntry{ { Path: path, ObjectAlias: objectAlias, }, } descriptor := SecretDescriptor{ ObjectName: TEST_OBJECT_NAME, JMESPath: jmesPath, } secretValue := SecretValue{ Value: []byte(jsonContent), Descriptor: descriptor, } _, err := secretValue.getJsonSecrets() if err == nil || err.Error() != expectedErrorMessage { t.Fatalf("Expected error: %s, got error: %v", expectedErrorMessage, err) } } func TestNotValidJson(t *testing.T) { path := ".username" objectAlias := "test" jsonContent := "NotValidJson" expectedErrorMessage := fmt.Sprintf("Invalid JSON used with jmesPath in secret: %s.", TEST_OBJECT_NAME) RunGetJsonSecretTest(t, jsonContent, path, objectAlias, expectedErrorMessage) } func TestJMESPathPointsToInvalidObject(t *testing.T) { jsonContent := `{"username": "ParameterStoreUser", "password": "PasswordForParameterStore"}` path := "testpath" objectAlias := "testAlias" expectedErrorMessage := fmt.Sprintf("JMES Path - %s for object alias - %s does not point to a valid object.", path, objectAlias) RunGetJsonSecretTest(t, jsonContent, path, objectAlias, expectedErrorMessage) } func TestInvalidJMESPath(t *testing.T) { jsonContent := `{"username": "ParameterStoreUser", "password": "PasswordForParameterStore"}` path := ".testpath" objectAlias := "testAlias" expectedErrorMessage := fmt.Sprintf("Invalid JMES Path: %s.", path) RunGetJsonSecretTest(t, jsonContent, path, objectAlias, expectedErrorMessage) } func TestInvalidJMESResultType(t *testing.T) { jsonContent := `{"username": 3}` path := "username" objectAlias := "testAlias" expectedErrorMessage := fmt.Sprintf("Invalid JMES search result type for path:%s. Only string is allowed.", path) RunGetJsonSecretTest(t, jsonContent, path, objectAlias, expectedErrorMessage) }
73
secrets-store-csi-driver-provider-aws
aws
Go
/* * Package responsible for reciving incomming mount requests from the driver. * * This package acts as the high level orchestrator; unpacking the message and * calling the provider implementation to fetch the secrets. * */ package server import ( "context" "encoding/json" "fmt" "io/ioutil" "os" "strings" "k8s.io/klog/v2" "google.golang.org/grpc" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sv1 "k8s.io/client-go/kubernetes/typed/core/v1" "sigs.k8s.io/secrets-store-csi-driver/provider/v1alpha1" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/secrets-store-csi-driver-provider-aws/auth" "github.com/aws/secrets-store-csi-driver-provider-aws/provider" ) // Version filled in by Makefile during build. var Version string const ( namespaceAttrib = "csi.storage.k8s.io/pod.namespace" acctAttrib = "csi.storage.k8s.io/serviceAccount.name" podnameAttrib = "csi.storage.k8s.io/pod.name" regionAttrib = "region" // The attribute name for the region in the SecretProviderClass transAttrib = "pathTranslation" // Path translation char regionLabel = "topology.kubernetes.io/region" // The node label giving the region secProvAttrib = "objects" // The attribute used to pass the SecretProviderClass definition (with what to mount) failoverRegionAttrib = "failoverRegion" // The attribute name for the failover region in the SecretProviderClass ) // A Secrets Store CSI Driver provider implementation for AWS Secrets Manager and SSM Parameter Store. // // This server receives mount requests and then retreives and stores the secrets // from that request. The details of what secrets are required and where to // store them are in the request. The secrets will be retrieved using the AWS // credentials of the IAM role associated with the pod. If there is a failure // during the mount of any one secret no secrets are written to the mount point. // type CSIDriverProviderServer struct { *grpc.Server secretProviderFactory provider.ProviderFactoryFactory k8sClient k8sv1.CoreV1Interface driverWriteSecrets bool } // Factory function to create the server to handle incoming mount requests. // func NewServer( secretProviderFact provider.ProviderFactoryFactory, k8client k8sv1.CoreV1Interface, driverWriteSecrets bool, ) (srv *CSIDriverProviderServer, e error) { return &CSIDriverProviderServer{ secretProviderFactory: secretProviderFact, k8sClient: k8client, driverWriteSecrets: driverWriteSecrets, }, nil } // Mount handles each incomming mount request. // // The provider will fetch the secret value from the secret provider (Parameter // Store or Secrets Manager) and write the secrets to the mount point. The // version ids of the secrets are then returned to the driver. // func (s *CSIDriverProviderServer) Mount(ctx context.Context, req *v1alpha1.MountRequest) (response *v1alpha1.MountResponse, e error) { // Basic sanity check if len(req.GetTargetPath()) == 0 { return nil, fmt.Errorf("Missing mount path") } mountDir := req.GetTargetPath() // Unpack the request. var attrib map[string]string err := json.Unmarshal([]byte(req.GetAttributes()), &attrib) if err != nil { return nil, fmt.Errorf("failed to unmarshal attributes, error: %+v", err) } // Get the mount attributes. nameSpace := attrib[namespaceAttrib] svcAcct := attrib[acctAttrib] podName := attrib[podnameAttrib] region := attrib[regionAttrib] translate := attrib[transAttrib] failoverRegion := attrib[failoverRegionAttrib] // Make a map of the currently mounted versions (if any) curVersions := req.GetCurrentObjectVersion() curVerMap := make(map[string]*v1alpha1.ObjectVersion) for _, ver := range curVersions { curVerMap[ver.Id] = ver } // Unpack the file permission to use. var filePermission os.FileMode err = json.Unmarshal([]byte(req.GetPermission()), &filePermission) if err != nil { return nil, fmt.Errorf("failed to unmarshal file permission, error: %+v", err) } regions, err := s.getAwsRegions(region, failoverRegion, nameSpace, podName, ctx) if err != nil { klog.ErrorS(err, "Failed to initialize AWS session") return nil, err } klog.Infof("Servicing mount request for pod %s in namespace %s using service account %s with region(s) %s", podName, nameSpace, svcAcct, strings.Join(regions, ", ")) awsSessions, err := s.getAwsSessions(nameSpace, svcAcct, ctx, regions) if err != nil { return nil, err } if len(awsSessions) > 2 { klog.Errorf("Max number of region(s) exceeded: %s", strings.Join(regions, ", ")) return nil, err } // Get the list of secrets to mount. These will be grouped together by type // in a map of slices (map[string][]*SecretDescriptor) keyed by secret type // so that requests can be batched if the implementation allows it. descriptors, err := provider.NewSecretDescriptorList(mountDir, translate, attrib[secProvAttrib], regions) if err != nil { klog.Errorf("Failure reading descriptor list: %s", err) return nil, err } providerFactory := s.secretProviderFactory(awsSessions, regions) var fetchedSecrets []*provider.SecretValue for sType := range descriptors { // Iterate over each secret type. // Fetch all the secrets and update the curVerMap provider := providerFactory.GetSecretProvider(sType) secrets, err := provider.GetSecretValues(ctx, descriptors[sType], curVerMap) if err != nil { klog.Errorf("Failure getting secret values from provider type %s: %s", sType, err) return nil, err } fetchedSecrets = append(fetchedSecrets, secrets...) // Build up the list of all secrets } // Write out the secrets to the mount point after everything is fetched. var files []*v1alpha1.File for _, secret := range fetchedSecrets { file, err := s.writeFile(secret, filePermission) if err != nil { return nil, err } if file != nil { files = append(files, file) } } // Build the version response from the current version map and return it. var ov []*v1alpha1.ObjectVersion for id := range curVerMap { ov = append(ov, curVerMap[id]) } return &v1alpha1.MountResponse{Files: files, ObjectVersion: ov}, nil } // Private helper to get the aws lookup regions for a given pod. // // When a region in the mount request is available, the region is added as primary region to the lookup region list // If a region is not specified in the mount request, we must lookup the region from node label and add as primary region to the lookup region list // If both the region and node label region are not available, error will be thrown // If backupRegion is provided and is equal to region/node region, error will be thrown else backupRegion is added to the lookup region list // func (s *CSIDriverProviderServer) getAwsRegions(region, backupRegion, nameSpace, podName string, ctx context.Context) (response []string, err error) { var lookupRegionList []string // Find primary region. Fall back to region node if unavailable. if len(region) == 0 { region, err = s.getRegionFromNode(ctx, nameSpace, podName) if err != nil { return nil, fmt.Errorf("failed to retrieve region from node. error %+v", err) } } lookupRegionList = []string{region} // Find backup region if len(backupRegion) > 0 { if region == backupRegion { return nil, fmt.Errorf("%v: failover region cannot be the same as the primary region", region) } lookupRegionList = append(lookupRegionList, backupRegion) } return lookupRegionList, nil } // Private helper to get the aws sessions for all the lookup regions for a given pod. // // Gets the pod's AWS creds for each lookup region // Establishes the connection using Aws cred for each lookup region // If atleast one session is not created, error will be thrown // func (s *CSIDriverProviderServer) getAwsSessions(nameSpace, svcAcct string, ctx context.Context, lookupRegionList []string) (response []*session.Session, err error) { // Get the pod's AWS creds for each lookup region. var awsSessionsList []*session.Session for _, region := range lookupRegionList { oidcAuth, err := auth.NewAuth(ctx, region, nameSpace, svcAcct, s.k8sClient) if err != nil { return nil, fmt.Errorf("%s: %s", region, err) } awsSession, err := oidcAuth.GetAWSSession() if err != nil { return nil, fmt.Errorf("%s: %s", region, err) } awsSessionsList = append(awsSessionsList, awsSession) } return awsSessionsList, nil } // Return the provider plugin version information to the driver. // func (s *CSIDriverProviderServer) Version(ctx context.Context, req *v1alpha1.VersionRequest) (*v1alpha1.VersionResponse, error) { return &v1alpha1.VersionResponse{ Version: "v1alpha1", RuntimeName: auth.ProviderName, RuntimeVersion: Version, }, nil } // Private helper to get the region information for a given pod. // // When a region is not specified in the mount request, we must lookup the // region of the requesting pod by first descriing the pod to find the node and // then describing the node to get the region label. // // See also: https://pkg.go.dev/k8s.io/client-go/kubernetes/typed/core/v1 // func (s *CSIDriverProviderServer) getRegionFromNode(ctx context.Context, namespace string, podName string) (reg string, err error) { // Describe the pod to find the node: kubectl -o yaml -n <namespace> get pod <podid> pod, err := s.k8sClient.Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) if err != nil { return "", err } // Describe node to get region: kubectl -o yaml -n <namespace> get node <nodeid> nodeName := pod.Spec.NodeName node, err := s.k8sClient.Nodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { return "", err } labels := node.ObjectMeta.Labels region := labels[regionLabel] if len(region) == 0 { return "", fmt.Errorf("Region not found") } return region, nil } // Private helper to write a new secret or perform an update on a previously mounted secret. // // If the driver writes the secrets just return the dirver data. Otherwise, // we write the secret to a temp file and then rename in order to get as close // to an atomic update as the file system supports. This is to avoid having // pod applications inadvertantly reading an empty or partial files as it is // being updated. // func (s *CSIDriverProviderServer) writeFile(secret *provider.SecretValue, mode os.FileMode) (*v1alpha1.File, error) { // Don't write if the driver is supposed to do it. if s.driverWriteSecrets { return &v1alpha1.File{ Path: secret.Descriptor.GetFileName(), Mode: int32(mode), Contents: secret.Value, }, nil } // Write to a tempfile first tmpFile, err := ioutil.TempFile(secret.Descriptor.GetMountDir(), secret.Descriptor.GetFileName()) if err != nil { return nil, err } defer os.Remove(tmpFile.Name()) // Cleanup on fail defer tmpFile.Close() // Don't leak file descriptors err = tmpFile.Chmod(mode) // Set correct permissions if err != nil { return nil, err } _, err = tmpFile.Write(secret.Value) // Write the secret if err != nil { return nil, err } err = tmpFile.Sync() // Make sure to flush to disk if err != nil { return nil, err } // Swap out the old secret for the new err = os.Rename(tmpFile.Name(), secret.Descriptor.GetMountPath()) if err != nil { return nil, err } return nil, nil }
329
secrets-store-csi-driver-provider-aws
aws
Go
package server import ( "context" "encoding/json" "fmt" "io/ioutil" "os" "path/filepath" "regexp" "strconv" "strings" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/secretsmanager" "github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface" "github.com/aws/aws-sdk-go/service/ssm" "github.com/aws/aws-sdk-go/service/ssm/ssmiface" corev1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes/fake" "sigs.k8s.io/secrets-store-csi-driver/provider/v1alpha1" "sigs.k8s.io/yaml" "github.com/aws/secrets-store-csi-driver-provider-aws/auth" "github.com/aws/secrets-store-csi-driver-provider-aws/provider" ) type MockParameterStoreClient struct { ssmiface.SSMAPI rspCnt int rsp []*ssm.GetParametersOutput reqErr error } func (m *MockParameterStoreClient) GetParametersWithContext( ctx context.Context, input *ssm.GetParametersInput, options ...request.Option, ) (*ssm.GetParametersOutput, error) { if m.rspCnt >= len(m.rsp) { panic(fmt.Sprintf("Got unexpected request: %+v", input)) } rsp := m.rsp[m.rspCnt] m.rspCnt += 1 if m.reqErr != nil { return nil, m.reqErr } if rsp == nil { return nil, fmt.Errorf("Error in GetParameters") } failed := make([]*string, 0) for _, name := range input.Names { if strings.Contains(*name, "Fail") { failed = append(failed, name) } } rsp.InvalidParameters = append(rsp.InvalidParameters, failed...) return rsp, nil } type MockSecretsManagerClient struct { secretsmanageriface.SecretsManagerAPI getCnt int getRsp []*secretsmanager.GetSecretValueOutput descCnt int descRsp []*secretsmanager.DescribeSecretOutput reqErr error } func (m *MockSecretsManagerClient) GetSecretValueWithContext( ctx context.Context, input *secretsmanager.GetSecretValueInput, options ...request.Option, ) (*secretsmanager.GetSecretValueOutput, error) { if m.getCnt >= len(m.getRsp) { panic(fmt.Sprintf("Got unexpected request: %+v", input)) } rsp := m.getRsp[m.getCnt] m.getCnt += 1 if m.reqErr != nil { return nil, m.reqErr } if rsp == nil { return nil, fmt.Errorf("Error in GetSecretValue") } return rsp, nil } func (m *MockSecretsManagerClient) DescribeSecretWithContext( ctx context.Context, input *secretsmanager.DescribeSecretInput, options ...request.Option, ) (*secretsmanager.DescribeSecretOutput, error) { if m.descCnt >= len(m.descRsp) { panic(fmt.Sprintf("Got unexpected request: %+v", input)) } rsp := m.descRsp[m.descCnt] m.descCnt += 1 if m.reqErr != nil { return nil, m.reqErr } if rsp == nil { return nil, fmt.Errorf("Error in DescribeSecret") } return rsp, nil } func newServerWithMocks(tstData *testCase, driverWrites bool) *CSIDriverProviderServer { var ssmRsp, backupRegionSsmRsp []*ssm.GetParametersOutput var gsvRsp, backupRegionGsvRsp []*secretsmanager.GetSecretValueOutput var descRsp, backupRegionDescRsp []*secretsmanager.DescribeSecretOutput var reqErr, brReqErr, ssmReqErr, ssmBrReqErr error if tstData != nil { ssmRsp = tstData.ssmRsp gsvRsp = tstData.gsvRsp descRsp = tstData.descRsp backupRegionGsvRsp = tstData.brGsvRsp backupRegionDescRsp = tstData.brDescRsp backupRegionSsmRsp = tstData.brSsmRsp reqErr = tstData.reqErr brReqErr = tstData.brReqErr ssmReqErr = tstData.ssmReqErr ssmBrReqErr = tstData.ssmBrReqErr } // Get the test attributes. attributes := map[string]string{} if tstData != nil { attributes = tstData.attributes } region := attributes["region"] nodeName := attributes["nodeName"] roleARN := attributes["roleARN"] namespace := attributes["namespace"] accName := attributes["accName"] podName := attributes["podName"] failoverRegion := attributes["failoverRegion"] nodeRegion := region if len(nodeRegion) == 0 { nodeRegion = "fakeRegion" } factory := func(session []*session.Session, regions []string) (factory *provider.SecretProviderFactory) { if len(region) == 0 { region = nodeRegion } ssmClients := []provider.SecretsManagerClient{} if gsvRsp != nil || descRsp != nil || reqErr != nil { ssmClients = append(ssmClients, provider.SecretsManagerClient{ Region: region, Client: &MockSecretsManagerClient{getRsp: gsvRsp, descRsp: descRsp, reqErr: reqErr}, }) } if backupRegionGsvRsp != nil || backupRegionDescRsp != nil || brReqErr != nil { ssmClients = append(ssmClients, provider.SecretsManagerClient{ Region: failoverRegion, Client: &MockSecretsManagerClient{getRsp: backupRegionGsvRsp, descRsp: backupRegionDescRsp, reqErr: brReqErr}, }) } paramClients := []provider.ParameterStoreClient{} if ssmRsp != nil || ssmReqErr != nil { paramClients = append(paramClients, provider.ParameterStoreClient{ Region: region, Client: &MockParameterStoreClient{rsp: ssmRsp, reqErr: ssmReqErr}, }) } if backupRegionSsmRsp != nil || ssmBrReqErr != nil { paramClients = append(paramClients, provider.ParameterStoreClient{ Region: failoverRegion, Client: &MockParameterStoreClient{rsp: backupRegionSsmRsp, reqErr: ssmBrReqErr}, IsFailover: true, }) } return &provider.SecretProviderFactory{ Providers: map[provider.SecretType]provider.SecretProvider{ provider.SSMParameter: provider.NewParameterStoreProviderWithClients(paramClients...), provider.SecretsManager: provider.NewSecretsManagerProviderWithClients(ssmClients...), }, } } sa := &corev1.ServiceAccount{} if !strings.Contains(accName, "Fail") { sa.Name = accName } sa.Namespace = namespace sa.Annotations = map[string]string{"eks.amazonaws.com/role-arn": roleARN} pod := &corev1.Pod{} if !strings.Contains(podName, "Fail") { pod.Name = podName } pod.Namespace = namespace pod.Spec.NodeName = nodeName node := &corev1.Node{} if !strings.Contains(nodeName, "Fail") { node.Name = nodeName } if !strings.Contains(region, "Fail") { node.ObjectMeta.Labels = map[string]string{"topology.kubernetes.io/region": nodeRegion} } clientset := fake.NewSimpleClientset(sa, pod, node) return &CSIDriverProviderServer{ secretProviderFactory: factory, k8sClient: clientset.CoreV1(), driverWriteSecrets: driverWrites, } } type testCase struct { testName string attributes map[string]string mountObjs []map[string]interface{} ssmRsp []*ssm.GetParametersOutput brSsmRsp []*ssm.GetParametersOutput gsvRsp []*secretsmanager.GetSecretValueOutput brGsvRsp []*secretsmanager.GetSecretValueOutput descRsp []*secretsmanager.DescribeSecretOutput brDescRsp []*secretsmanager.DescribeSecretOutput ssmReqErr error ssmBrReqErr error reqErr error brReqErr error expErr string brExpErr string expSecrets map[string]string perms string } func buildMountReq(dir string, tst testCase, curState []*v1alpha1.ObjectVersion) *v1alpha1.MountRequest { attrMap := make(map[string]string) attrMap["csi.storage.k8s.io/pod.name"] = tst.attributes["podName"] attrMap["csi.storage.k8s.io/pod.namespace"] = tst.attributes["namespace"] attrMap["csi.storage.k8s.io/serviceAccount.name"] = tst.attributes["accName"] region := tst.attributes["region"] if len(region) > 0 && !strings.Contains(region, "Fail") { attrMap["region"] = region } failoverRegion := tst.attributes["failoverRegion"] if len(failoverRegion) > 0 { attrMap["failoverRegion"] = failoverRegion } translate := tst.attributes["pathTranslation"] if len(translate) > 0 { attrMap["pathTranslation"] = translate } objs, err := yaml.Marshal(tst.mountObjs) if err != nil { panic(err) } attrMap["objects"] = string(objs) attr, err := json.Marshal(attrMap) if err != nil { panic(err) } return &v1alpha1.MountRequest{ Attributes: string(attr), TargetPath: dir, Permission: tst.perms, CurrentObjectVersion: curState, } } func validateMounts(t *testing.T, dir string, tst testCase, rsp *v1alpha1.MountResponse) bool { // Make sure the mount response does not contain the Files attribute if rsp != nil && rsp.Files != nil && len(rsp.Files) > 0 { t.Errorf("%s: Mount response can not contain Files attribute when driverWriteSecrets is false", tst.testName) return false } // Check for the expected secrets for file, val := range tst.expSecrets { secretVal, err := ioutil.ReadFile(filepath.Join(dir, file)) if err != nil { t.Errorf("%s: Can not read file %s", tst.testName, file) return false } if string(secretVal) != val { t.Errorf("%s: Expected secret value %s got %s", tst.testName, val, string(secretVal)) return false } } return true } func validateResponse(t *testing.T, dir string, tst testCase, rsp *v1alpha1.MountResponse) bool { if rsp == nil { // Nothing to validate return false } // Make sure there is a file response if rsp.Files == nil || len(rsp.Files) <= 0 { t.Errorf("%s: Mount response must contain Files attribute when driverWriteSecrets is true", tst.testName) return false } // Map response by pathname fileRsp := make(map[string][]byte) for _, file := range rsp.Files { fileRsp[file.Path] = file.Contents } // Check for the expected secrets perm, err := strconv.Atoi(tst.perms) if err != nil { panic(err) } for file, val := range tst.expSecrets { secretVal := fileRsp[file] if string(secretVal) != val { t.Errorf("%s: Expected secret value %s got %s", tst.testName, val, string(secretVal)) return false } // Simulate the driver wrting the files fullPath := filepath.Join(dir, file) baseDir, _ := filepath.Split(fullPath) if err := os.MkdirAll(baseDir, os.FileMode(0777)); err != nil { t.Errorf("%s: could not create base directory: %v", tst.testName, err) return false } if err := ioutil.WriteFile(fullPath, secretVal, os.FileMode(perm)); err != nil { t.Errorf("%s: could not write secret: %v", tst.testName, err) return false } } return true } var stdAttributes map[string]string = map[string]string{ "namespace": "fakeNS", "accName": "fakeSvcAcc", "podName": "fakePod", "nodeName": "fakeNode", "region": "", "roleARN": "fakeRole", } var mountTests []testCase = []testCase{ { // Vanila success case. testName: "New Mount Success", attributes: stdAttributes, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "", expSecrets: map[string]string{ "TestSecret1": "secret1", "TestParm1": "parm1", }, perms: "420", }, { // Multi-region success case. testName: "Multi Region Success", attributes: map[string]string{ "namespace": "fakeNS", "accName": "fakeSvcAcc", "podName": "fakePod", "nodeName": "fakeNode", "region": "", "roleARN": "fakeRole", "failoverRegion": "fakeBackupRegion", }, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ nil, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, brGsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, }, brDescRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "", expSecrets: map[string]string{ "TestSecret1": "secret1", "TestParm1": "parm1", }, perms: "420", }, { // Mount a json secret testName: "Mount Json Success", attributes: stdAttributes, mountObjs: []map[string]interface{}{ { "objectName": "TestSecret1", "objectType": "secretsmanager", "jmesPath": []map[string]string{ { "path": "dbUser.username", "objectAlias": "username", }, { "path": "dbUser.password", "objectAlias": "password", }, }, }, { "objectName": "TestParm1", "objectType": "ssmparameter", "jmesPath": []map[string]string{ { "path": "dbUser.username", "objectAlias": "ssmUsername", }, { "path": "dbUser.password", "objectAlias": "ssmPassword", }, }, }, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String(`{"dbUser": {"username": "ParameterStoreUser", "password" : "ParameterStorePassword"}}`), Version: aws.Int64(1)}, }, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String(`{"dbUser": {"username": "SecretsManagerUser", "password": "SecretsManagerPassword"}}`), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "", expSecrets: map[string]string{ "TestSecret1": `{"dbUser": {"username": "SecretsManagerUser", "password": "SecretsManagerPassword"}}`, "TestParm1": `{"dbUser": {"username": "ParameterStoreUser", "password" : "ParameterStorePassword"}}`, "username": "SecretsManagerUser", "password": "SecretsManagerPassword", "ssmUsername": "ParameterStoreUser", "ssmPassword": "ParameterStorePassword", }, perms: "420", }, { // Mount a json secret and specify secret arn testName: "Mount Json Success-specify ARN", attributes: stdAttributes, mountObjs: []map[string]interface{}{ { "objectName": "arn:aws:secretsmanager:fakeRegion:123456789012:secret:geheimnis-ABc123", "objectAlias": "TestSecret1", "jmesPath": []map[string]string{ { "path": "dbUser.username", "objectAlias": "username", }, }, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String(`{"dbUser": {"username": "SecretsManagerUser"}}`), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "", expSecrets: map[string]string{ "TestSecret1": `{"dbUser": {"username": "SecretsManagerUser"}}`, "username": "SecretsManagerUser", }, perms: "420", }, { // Mount a binary secret testName: "New Mount Binary Success", attributes: stdAttributes, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretBinary: []byte("BinarySecret"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "", expSecrets: map[string]string{ "TestSecret1": "BinarySecret", "TestParm1": "parm1", }, perms: "420", }, { // Test multiple SSM batches testName: "Big Batch Success", attributes: stdAttributes, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "BinarySecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, {"objectName": "TestParm2", "objectType": "ssmparameter"}, {"objectName": "TestParm3", "objectType": "ssmparameter"}, {"objectName": "TestParm4", "objectType": "ssmparameter"}, {"objectName": "TestParm5", "objectType": "ssmparameter"}, {"objectName": "TestParm6", "objectType": "ssmparameter"}, {"objectName": "TestParm7", "objectType": "ssmparameter"}, {"objectName": "TestParm8", "objectType": "ssmparameter"}, {"objectName": "TestParm9", "objectType": "ssmparameter"}, {"objectName": "TestParm10", "objectType": "ssmparameter"}, {"objectName": "TestParm11", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm3"), Value: aws.String("parm3"), Version: aws.Int64(1)}, // Validate out of order. {Name: aws.String("TestParm2"), Value: aws.String("parm2"), Version: aws.Int64(1)}, {Name: aws.String("TestParm4"), Value: aws.String("parm4"), Version: aws.Int64(1)}, {Name: aws.String("TestParm5"), Value: aws.String("parm5"), Version: aws.Int64(1)}, {Name: aws.String("TestParm6"), Value: aws.String("parm6"), Version: aws.Int64(1)}, {Name: aws.String("TestParm7"), Value: aws.String("parm7"), Version: aws.Int64(1)}, {Name: aws.String("TestParm8"), Value: aws.String("parm8"), Version: aws.Int64(1)}, {Name: aws.String("TestParm9"), Value: aws.String("parm9"), Version: aws.Int64(1)}, {Name: aws.String("TestParm10"), Value: aws.String("parm10"), Version: aws.Int64(1)}, }, }, { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm11"), Value: aws.String("parm11"), Version: aws.Int64(1)}, }, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, {SecretBinary: []byte("BinarySecret"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "", expSecrets: map[string]string{ "TestSecret1": "secret1", "BinarySecret1": "BinarySecret", "TestParm1": "parm1", "TestParm2": "parm2", "TestParm3": "parm3", "TestParm4": "parm4", "TestParm5": "parm5", "TestParm6": "parm6", "TestParm7": "parm7", "TestParm8": "parm8", "TestParm9": "parm9", "TestParm10": "parm10", "TestParm11": "parm11", }, perms: "420", }, { // Verify failure if we can not find the pod testName: "Fail Pod Retrieval", attributes: map[string]string{ "namespace": "fakeNS", "accName": "fakeSvcAcc", "podName": "FailPod", "nodeName": "fakeNode", "region": "", "roleARN": "fakeRole", }, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{}, gsvRsp: []*secretsmanager.GetSecretValueOutput{}, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "failed to retrieve region", expSecrets: map[string]string{}, perms: "420", }, { // Verify failure if we can not find the node testName: "Fail Node Retrieval", attributes: map[string]string{ "namespace": "fakeNS", "accName": "fakeSvcAcc", "podName": "fakePod", "nodeName": "FailNode", "region": "", "roleARN": "fakeRole", }, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{}, gsvRsp: []*secretsmanager.GetSecretValueOutput{}, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "failed to retrieve region", expSecrets: map[string]string{}, perms: "420", }, { // Verify failure if we can not find the region testName: "Fail Region Retrieval", attributes: map[string]string{ "namespace": "fakeNS", "accName": "fakeSvcAcc", "podName": "fakePod", "nodeName": "fakeNode", "region": "FailRegion", "roleARN": "fakeRole", }, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{}, gsvRsp: []*secretsmanager.GetSecretValueOutput{}, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "failed to retrieve region", expSecrets: map[string]string{}, perms: "420", }, { // Verify failure if we can not parse the file permissions. testName: "Fail File Perms", attributes: stdAttributes, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{}, gsvRsp: []*secretsmanager.GetSecretValueOutput{}, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "failed to unmarshal file permission", expSecrets: map[string]string{}, perms: "", }, { // Verify failure when we can not initialize the auth session (no role). testName: "Fail Session", attributes: map[string]string{ "namespace": "fakeNS", "accName": "fakeSvcAcc", "podName": "fakePod", "nodeName": "fakeNode", "region": "", "roleARN": "", }, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{}, gsvRsp: []*secretsmanager.GetSecretValueOutput{}, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "An IAM role must be associated", expSecrets: map[string]string{}, perms: "420", }, { // Verify failure when there is an error in the descriptors testName: "Fail Descriptors", attributes: stdAttributes, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{}, gsvRsp: []*secretsmanager.GetSecretValueOutput{}, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "Object name must be specified", expSecrets: map[string]string{}, perms: "420", }, { // Verify failure when we the API call (GetSecretValue) fails testName: "Fail Fetch Secret", attributes: stdAttributes, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ nil, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "Failed to fetch secret", expSecrets: map[string]string{}, perms: "420", }, { // Verify failure when we the API call (GetParameters) fails testName: "Fail Fetch Parm", attributes: stdAttributes, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ nil, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "Failed to fetch parameters from all regions", expSecrets: map[string]string{}, perms: "420", }, { // Verify failure when parameters in the batch fails testName: "Fail Fetch Parms", attributes: stdAttributes, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, {"objectName": "FailParm2", "objectType": "ssmparameter"}, {"objectName": "TestParm3", "objectType": "ssmparameter"}, {"objectName": "FailParm4", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm3"), Value: aws.String("parm3"), Version: aws.Int64(1)}, }, InvalidParameters: []*string{aws.String("FailParm2"), aws.String("FailParm4")}, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "Invalid parameters", expSecrets: map[string]string{}, perms: "420", }, { // Verify failure when we try to use a path name in a parameter (prevent traversal) testName: "Fail Write Param", attributes: map[string]string{ "namespace": "fakeNS", "accName": "fakeSvcAcc", "podName": "fakePod", "nodeName": "fakeNode", "region": "", "roleARN": "fakeRole", "pathTranslation": "False", }, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "../TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "(contains path separator)|(path can not contain)", expSecrets: map[string]string{}, perms: "420", }, { // Verify failure when we try to use a path name in a parameter (prevent traversal) testName: "Fail Write Secret", attributes: map[string]string{ "namespace": "fakeNS", "accName": "fakeSvcAcc", "podName": "fakePod", "nodeName": "fakeNode", "region": "", "roleARN": "fakeRole", "pathTranslation": "False", }, mountObjs: []map[string]interface{}{ {"objectName": "./../TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("../TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "(contains path separator)|(path can not contain)", expSecrets: map[string]string{}, perms: "420", }, { // Verify success when slashes are translated in the path name testName: "Success With Slash", attributes: stdAttributes, mountObjs: []map[string]interface{}{ {"objectName": "mypath/TestSecret1", "objectType": "secretsmanager"}, {"objectName": "mypath/TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("mypath/TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "", expSecrets: map[string]string{ "mypath_TestSecret1": "secret1", "mypath_TestParm1": "parm1", }, perms: "420", }, { // Verify success when slashes are translated to a custom character testName: "Slash to dash", attributes: map[string]string{ "namespace": "fakeNS", "accName": "fakeSvcAcc", "podName": "fakePod", "nodeName": "fakeNode", "region": "", "roleARN": "fakeRole", "pathTranslation": "-", }, mountObjs: []map[string]interface{}{ {"objectName": "mypath/TestSecret1", "objectType": "secretsmanager"}, {"objectName": "mypath/TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("mypath/TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "", expSecrets: map[string]string{ "mypath-TestSecret1": "secret1", "mypath-TestParm1": "parm1", }, perms: "420", }, { // Verify failure if we use a bad path translation string testName: "Fail pathTranslation", attributes: map[string]string{ "namespace": "fakeNS", "accName": "fakeSvcAcc", "podName": "fakePod", "nodeName": "fakeNode", "region": "", "roleARN": "fakeRole", "pathTranslation": "--", }, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{}, gsvRsp: []*secretsmanager.GetSecretValueOutput{}, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "pathTranslation must be", expSecrets: map[string]string{}, perms: "420", }, { // Verify failure when we try to use a path name in a secret testName: "Leading Slash OK", attributes: map[string]string{ "namespace": "fakeNS", "accName": "fakeSvcAcc", "podName": "fakePod", "nodeName": "fakeNode", "region": "", "roleARN": "fakeRole", "pathTranslation": "False", }, mountObjs: []map[string]interface{}{ {"objectName": "/TestSecret1", "objectType": "secretsmanager"}, {"objectName": "/TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ &ssm.Parameter{Name: aws.String("/TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "", expSecrets: map[string]string{ "TestSecret1": "secret1", "TestParm1": "parm1", }, perms: "420", }, } var stdAttributesWithBackupRegion map[string]string = map[string]string{ "namespace": "fakeNS", "accName": "fakeSvcAcc", "podName": "fakePod", "nodeName": "fakeNode", "region": "", "roleARN": "fakeRole", "failoverRegion": "fakeBackupRegion", } var mountTestsForMultiRegion []testCase = []testCase{ { // Mount secret manager secrets from the fallback region Success. testName: "Multi Region Secrets Manager Fallback Success", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{nil}, descRsp: []*secretsmanager.DescribeSecretOutput{nil}, reqErr: awserr.NewRequestFailure( awserr.New(secretsmanager.ErrCodeInternalServiceError, "An error occurred on the server side.", fmt.Errorf("")), 500, ""), brGsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, }, expErr: "", expSecrets: map[string]string{ "TestSecret1": "secret1", }, perms: "420", }, { // Mount parameter secrets from the fallback region Success. testName: "Multi Region Parameter Store Fallback Success", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{}, InvalidParameters: []*string{aws.String("TestParm1")}, }, }, ssmReqErr: awserr.NewRequestFailure( awserr.New(ssm.ErrCodeInternalServerError, "An error occurred on the server side.", fmt.Errorf("")), 500, ""), brSsmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, }, }, expErr: "", expSecrets: map[string]string{ "TestParm1": "parm1", }, perms: "420", }, { // Mount secrets from the fallback region Success. testName: "Multi Region Fallback Success", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{nil}, ssmReqErr: awserr.NewRequestFailure( awserr.New(ssm.ErrCodeInternalServerError, "An error occurred on the server side.", fmt.Errorf("")), 500, ""), gsvRsp: []*secretsmanager.GetSecretValueOutput{nil}, reqErr: awserr.NewRequestFailure( awserr.New(secretsmanager.ErrCodeInternalServiceError, "An error occurred on the server side.", fmt.Errorf("")), 500, ""), descRsp: []*secretsmanager.DescribeSecretOutput{}, brSsmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, }, }, brGsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, }, brDescRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "", expSecrets: map[string]string{ "TestSecret1": "secret1", "TestParm1": "parm1", }, perms: "420", }, { // Mount secrets from the primary region Success. testName: "Multi Region Prefers Primary", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, brSsmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("wrongSecret"), Version: aws.Int64(1)}, }, }, }, brGsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("wrongSecret"), VersionId: aws.String("1")}, }, brDescRsp: []*secretsmanager.DescribeSecretOutput{ {VersionIdsToStages: map[string][]*string{"TestSecret1": {aws.String("wrongSecret")}}}, }, expErr: "", expSecrets: map[string]string{ "TestSecret1": "secret1", "TestParm1": "parm1", }, perms: "420", }, { // Verify failure when the API call (GetSecretValue) fails for all the regions testName: "Multi Region Secret Manager Api Fail", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret2", "objectType": "secretsmanager"}, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{nil}, descRsp: []*secretsmanager.DescribeSecretOutput{nil}, reqErr: awserr.NewRequestFailure( awserr.New(secretsmanager.ErrCodeInternalServiceError, "An error occurred on the server side.", fmt.Errorf("")), 500, ""), brGsvRsp: []*secretsmanager.GetSecretValueOutput{nil}, brDescRsp: []*secretsmanager.DescribeSecretOutput{nil}, brReqErr: awserr.NewRequestFailure( awserr.New(secretsmanager.ErrCodeInternalServiceError, "An error occurred on the server side.", fmt.Errorf("")), 500, ""), expErr: "Failed to fetch secret from all regions", brExpErr: "Failed to fetch secret from all regions:", expSecrets: map[string]string{}, perms: "420", }, { // Verify failure when API call (GetParameters) fails for all the regions testName: "Multi Region Parameter Store Api Fail", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestParm2", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{nil}, ssmReqErr: awserr.NewRequestFailure( awserr.New(ssm.ErrCodeInternalServerError, "An error occurred on the server side.", fmt.Errorf("")), 500, ""), brSsmRsp: []*ssm.GetParametersOutput{nil}, ssmBrReqErr: awserr.NewRequestFailure( awserr.New(ssm.ErrCodeInternalServerError, "An error occurred on the server side.", fmt.Errorf("")), 500, ""), expErr: "Failed to fetch parameters from all regions.", brExpErr: "Failed to fetch parameters from all regions.", expSecrets: map[string]string{}, perms: "420", }, { // Verify failure to get region if region and node label is not available but failover region is available testName: "Multi Region Fallback Region Fail", attributes: map[string]string{ "namespace": "fakeNS", "accName": "fakeSvcAcc", "podName": "fakePod", "nodeName": "FailNode", "region": "FailRegion", "roleARN": "fakeRole", "failoverRegion": "fakeBackupRegion", }, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, }, brGsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, }, brDescRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "failed to retrieve region from node", expSecrets: map[string]string{}, perms: "420", }, { // Verify failure when region label is equal to backup region testName: "Region Equals FallbackRegion Fail", attributes: map[string]string{ "namespace": "fakeNS", "accName": "fakeSvcAcc", "podName": "fakePod", "nodeName": "fakeNode", "region": "fakeRegion", "roleARN": "fakeRole", "failoverRegion": "fakeRegion", }, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, }, expErr: "failover region cannot be the same as the primary region", expSecrets: map[string]string{}, perms: "420", }, { // Verify failure when we can not initialize the auth session (no role) in region and failoverRegion. testName: "Multi Region Session Fail", attributes: map[string]string{ "namespace": "fakeNS", "accName": "fakeSvcAcc", "podName": "fakePod", "nodeName": "fakeNode", "region": "fakeRegion", "roleARN": "", "failoverRegion": "fakeBackupRegion", }, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, expErr: "fakeRegion: An IAM role must be associated", expSecrets: map[string]string{}, perms: "420", }, { // Verify failure when params partially exists in primary and secondary region. testName: "Multi Region Param Partial Fail", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestParm1", "objectType": "ssmparameter"}, {"objectName": "TestParm2", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm2"), Value: aws.String("parm2"), Version: aws.Int64(1)}, }, InvalidParameters: []*string{aws.String("TestParm1")}, }, }, brSsmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, InvalidParameters: []*string{aws.String("TestParm2")}, }, }, expErr: "Invalid parameters", expSecrets: map[string]string{}, perms: "420", }, { // SecretsManager Primary Region 4XX Fail. testName: "SecretsManager Primary Region 4XX Fail", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{nil}, descRsp: []*secretsmanager.DescribeSecretOutput{nil}, reqErr: awserr.NewRequestFailure( awserr.New(secretsmanager.ErrCodeResourceNotFoundException, "Secrets Manager can't find the specified secret", fmt.Errorf("")), 400, ""), brGsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, }, brDescRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "Failed fetching secret TestSecret1", expSecrets: map[string]string{}, perms: "420", }, { // SecretsManager Primary Region 5XX Fail. testName: "SecretsManager Primary Region 5XX Fail", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{nil}, descRsp: []*secretsmanager.DescribeSecretOutput{nil}, reqErr: awserr.NewRequestFailure( awserr.New(secretsmanager.ErrCodeInternalServiceError, "An error occurred on the server side", fmt.Errorf("")), 500, ""), brGsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, }, brDescRsp: []*secretsmanager.DescribeSecretOutput{nil}, expErr: "", expSecrets: map[string]string{ "TestSecret1": "secret1", }, perms: "420", }, { // SecretsManager Primary Region 5XX and Secondary 4XX Fail. testName: "SecretsManager Primary Region 5XX And Secondary Region 4XX Fail", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{nil}, descRsp: []*secretsmanager.DescribeSecretOutput{nil}, reqErr: awserr.NewRequestFailure( awserr.New(secretsmanager.ErrCodeInternalServiceError, "An error occurred on the server side", fmt.Errorf("")), 500, ""), brGsvRsp: []*secretsmanager.GetSecretValueOutput{nil}, brDescRsp: []*secretsmanager.DescribeSecretOutput{nil}, brReqErr: awserr.NewRequestFailure( awserr.New(secretsmanager.ErrCodeResourceNotFoundException, "Secrets Manager can't find the specified secret", fmt.Errorf("")), 400, ""), expErr: "fakeBackupRegion: Failed fetching secret TestSecret1: ResourceNotFoundException: Secrets Manager can't find the specified secret", expSecrets: map[string]string{}, perms: "420", }, { // ParameterStore Primary Region 4XX Fail. testName: "ParameterStore Primary Region 4XX Fail", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm2"), Value: aws.String("parm2"), Version: aws.Int64(1)}, }, }, }, ssmReqErr: awserr.NewRequestFailure( awserr.New(ssm.ErrCodeInvalidKeyId, "The query key ID isn't valid.", fmt.Errorf("")), 400, ""), brSsmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, }, }, expErr: "InvalidKeyId: The query key ID isn't valid.", expSecrets: map[string]string{}, perms: "420", }, { // ParameterStore Primary Region 5XX Fail. testName: "ParameterStore Primary Region 5XX Fail", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{nil}, ssmReqErr: awserr.NewRequestFailure( awserr.New(ssm.ErrCodeInternalServerError, "An error occurred on the server side", fmt.Errorf("")), 500, ""), brSsmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, }, }, expErr: "", expSecrets: map[string]string{ "TestParm1": "parm1", }, perms: "420", }, { // ParameterStore Primary Region 5XX and Secondary region 4XX Fail. testName: "ParameterStore Primary Region 5XX And Secondary Region 4XX Fail", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{nil}, ssmReqErr: awserr.NewRequestFailure( awserr.New(ssm.ErrCodeInternalServerError, "An error occurred on the server side", fmt.Errorf("")), 500, ""), brSsmRsp: []*ssm.GetParametersOutput{nil}, ssmBrReqErr: awserr.NewRequestFailure( awserr.New(ssm.ErrCodeInvalidKeyId, "The query key ID isn't valid.", fmt.Errorf("")), 400, ""), expErr: "InvalidKeyId: The query key ID isn't valid.", expSecrets: map[string]string{}, perms: "420", }, { // Multi Region params Fail due to invalid params in fallback region. testName: "Multi Region Param Fallback Invalid Fail", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestParm1", "objectType": "ssmparameter"}, {"objectName": "TestParm2", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm2"), Value: aws.String("parm2"), Version: aws.Int64(1)}, {Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, }, }, brSsmRsp: []*ssm.GetParametersOutput{ { InvalidParameters: []*string{aws.String("TestParm1"), aws.String("TestParm2")}, }, }, expErr: "Invalid parameters: TestParm1, TestParm2", expSecrets: map[string]string{}, perms: "420", }, { // Multi Region params Fail due to 4XX error in fallback region. testName: "Multi Region Param Fallback 4XX Fail", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestParm1", "objectType": "ssmparameter"}, {"objectName": "TestParm2", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm2"), Value: aws.String("parm2"), Version: aws.Int64(1)}, {Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, }, }, brSsmRsp: []*ssm.GetParametersOutput{nil}, ssmBrReqErr: awserr.NewRequestFailure( awserr.New(ssm.ErrCodeInvalidKeyId, "Failed due to Invalid KeyId", fmt.Errorf("")), 400, ""), expErr: "InvalidKeyId: Failed due to Invalid KeyId", expSecrets: map[string]string{}, perms: "420", }, { // Multi Region Secrets fail due to 4XX in Fallback. testName: "Multi Region Secrets Fallback 4XX Fail", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestSecret2", "objectType": "secretsmanager"}, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, {SecretString: aws.String("secret2"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{nil}, brGsvRsp: []*secretsmanager.GetSecretValueOutput{nil}, brDescRsp: []*secretsmanager.DescribeSecretOutput{nil}, brReqErr: awserr.NewRequestFailure( awserr.New(secretsmanager.ErrCodeResourceNotFoundException, "Secrets Manager can't find the specified secret", fmt.Errorf("")), 400, ""), expErr: "Failed to describe secret", expSecrets: map[string]string{}, perms: "420", }, { // Mount secret manager secrets from the fallback region Success. testName: "Multi Region Secrets Manager Backup Arn Success", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ { "objectName": "arn:aws:secretsmanager:fakeRegion:123456789012:secret:geheimnis-ABc123", "backupArn": "arn:aws:secretsmanager:fakeBackupRegion:123456789012:secret:backupArn-12345", "objectType": "secretsmanager", "objectAlias": "TestSecret1", }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{nil}, descRsp: []*secretsmanager.DescribeSecretOutput{nil}, reqErr: awserr.NewRequestFailure( awserr.New(secretsmanager.ErrCodeInternalServiceError, "An error occurred on the server side", fmt.Errorf("")), 500, ""), brGsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, }, brDescRsp: []*secretsmanager.DescribeSecretOutput{nil}, expErr: "", expSecrets: map[string]string{ "TestSecret1": "secret1", }, perms: "420", }, { // Test multiple SSM batches for Multi Region Fail testName: "Multi Region Big Batch Fail", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "BinarySecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, {"objectName": "TestParm2", "objectType": "ssmparameter"}, {"objectName": "TestParm3", "objectType": "ssmparameter"}, {"objectName": "TestParm4", "objectType": "ssmparameter"}, {"objectName": "TestParm5", "objectType": "ssmparameter"}, {"objectName": "TestParm6", "objectType": "ssmparameter"}, {"objectName": "TestParm7", "objectType": "ssmparameter"}, {"objectName": "TestParm8", "objectType": "ssmparameter"}, {"objectName": "TestParm9", "objectType": "ssmparameter"}, {"objectName": "TestParm10", "objectType": "ssmparameter"}, {"objectName": "TestParm11", "objectType": "ssmparameter"}, {"objectName": "TestParm12", "objectType": "ssmparameter"}, {"objectName": "TestParm13", "objectType": "ssmparameter"}, {"objectName": "TestParm14", "objectType": "ssmparameter"}, {"objectName": "TestParm15", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm3"), Value: aws.String("parm3"), Version: aws.Int64(1)}, {Name: aws.String("TestParm2"), Value: aws.String("parm2"), Version: aws.Int64(1)}, {Name: aws.String("TestParm4"), Value: aws.String("parm4"), Version: aws.Int64(1)}, {Name: aws.String("TestParm5"), Value: aws.String("parm5"), Version: aws.Int64(1)}, }, InvalidParameters: []*string{aws.String("TestParm6"), aws.String("TestParm7"), aws.String("TestParm8"), aws.String("TestParm9"), aws.String("TestParm10")}, }, { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm11"), Value: aws.String("parm11"), Version: aws.Int64(1)}, }, }, }, ssmReqErr: awserr.NewRequestFailure( awserr.New(ssm.ErrCodeInternalServerError, "An error occurred on the server side.", fmt.Errorf("")), 500, ""), gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, {SecretBinary: []byte("BinarySecret"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, brSsmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm3"), Value: aws.String("parm3"), Version: aws.Int64(1)}, {Name: aws.String("TestParm2"), Value: aws.String("parm2"), Version: aws.Int64(1)}, {Name: aws.String("TestParm4"), Value: aws.String("parm4"), Version: aws.Int64(1)}, {Name: aws.String("TestParm5"), Value: aws.String("parm5"), Version: aws.Int64(1)}, }, InvalidParameters: []*string{aws.String("TestParm6"), aws.String("TestParm7"), aws.String("TestParm8"), aws.String("TestParm9"), aws.String("TestParm10")}, }, }, expErr: "Invalid parameters: TestParm6, TestParm7, TestParm8, TestParm9, TestParm10", expSecrets: map[string]string{}, perms: "420", }, { // Test multiple SSM batches for Multi Region success testName: "Multi Region Big Batch Success", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "BinarySecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, {"objectName": "TestParm2", "objectType": "ssmparameter"}, {"objectName": "TestParm3", "objectType": "ssmparameter"}, {"objectName": "TestParm4", "objectType": "ssmparameter"}, {"objectName": "TestParm5", "objectType": "ssmparameter"}, {"objectName": "TestParm6", "objectType": "ssmparameter"}, {"objectName": "TestParm7", "objectType": "ssmparameter"}, {"objectName": "TestParm8", "objectType": "ssmparameter"}, {"objectName": "TestParm9", "objectType": "ssmparameter"}, {"objectName": "TestParm10", "objectType": "ssmparameter"}, {"objectName": "TestParm11", "objectType": "ssmparameter"}, {"objectName": "TestParm12", "objectType": "ssmparameter"}, {"objectName": "TestParm13", "objectType": "ssmparameter"}, {"objectName": "TestParm14", "objectType": "ssmparameter"}, {"objectName": "TestParm15", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{}, InvalidParameters: []*string{}, }, { Parameters: []*ssm.Parameter{}, InvalidParameters: []*string{}, }, }, ssmReqErr: awserr.NewRequestFailure( awserr.New(ssm.ErrCodeInternalServerError, "An error occurred on the server side.", fmt.Errorf("")), 500, ""), gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, {SecretBinary: []byte("BinarySecret"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, brSsmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm3"), Value: aws.String("parm3"), Version: aws.Int64(1)}, {Name: aws.String("TestParm2"), Value: aws.String("parm2"), Version: aws.Int64(1)}, {Name: aws.String("TestParm4"), Value: aws.String("parm4"), Version: aws.Int64(1)}, {Name: aws.String("TestParm5"), Value: aws.String("parm5"), Version: aws.Int64(1)}, {Name: aws.String("TestParm6"), Value: aws.String("parm6"), Version: aws.Int64(1)}, {Name: aws.String("TestParm7"), Value: aws.String("parm7"), Version: aws.Int64(1)}, {Name: aws.String("TestParm8"), Value: aws.String("parm8"), Version: aws.Int64(1)}, {Name: aws.String("TestParm9"), Value: aws.String("parm9"), Version: aws.Int64(1)}, {Name: aws.String("TestParm10"), Value: aws.String("parm10"), Version: aws.Int64(1)}, }}, { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm11"), Value: aws.String("parm11"), Version: aws.Int64(1)}, {Name: aws.String("TestParm12"), Value: aws.String("parm12"), Version: aws.Int64(1)}, {Name: aws.String("TestParm13"), Value: aws.String("parm13"), Version: aws.Int64(1)}, {Name: aws.String("TestParm14"), Value: aws.String("parm14"), Version: aws.Int64(1)}, {Name: aws.String("TestParm15"), Value: aws.String("parm15"), Version: aws.Int64(1)}, }, }, }, expErr: "", brExpErr: "", expSecrets: map[string]string{ "TestSecret1": "secret1", "BinarySecret1": "BinarySecret", "TestParm1": "parm1", "TestParm2": "parm2", "TestParm3": "parm3", "TestParm4": "parm4", "TestParm5": "parm5", "TestParm6": "parm6", "TestParm7": "parm7", "TestParm8": "parm8", "TestParm9": "parm9", "TestParm10": "parm10", "TestParm11": "parm11", "TestParm12": "parm12", "TestParm13": "parm13", "TestParm14": "parm14", "TestParm15": "parm15", }, perms: "420", }, { // Test partial SSM batches for Multi Region Fail testName: "Multi Region Partial Big Batch Fail", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "BinarySecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, {"objectName": "TestParm2", "objectType": "ssmparameter"}, {"objectName": "TestParm3", "objectType": "ssmparameter"}, {"objectName": "TestParm4", "objectType": "ssmparameter"}, {"objectName": "TestParm5", "objectType": "ssmparameter"}, {"objectName": "TestParm6", "objectType": "ssmparameter"}, {"objectName": "TestParm7", "objectType": "ssmparameter"}, {"objectName": "TestParm8", "objectType": "ssmparameter"}, {"objectName": "TestParm9", "objectType": "ssmparameter"}, {"objectName": "TestParm10", "objectType": "ssmparameter"}, {"objectName": "TestParm11", "objectType": "ssmparameter"}, {"objectName": "TestParm12", "objectType": "ssmparameter"}, {"objectName": "TestParm13", "objectType": "ssmparameter"}, {"objectName": "TestParm14", "objectType": "ssmparameter"}, {"objectName": "TestParm15", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm3"), Value: aws.String("parm3"), Version: aws.Int64(1)}, {Name: aws.String("TestParm2"), Value: aws.String("parm2"), Version: aws.Int64(1)}, {Name: aws.String("TestParm4"), Value: aws.String("parm4"), Version: aws.Int64(1)}, {Name: aws.String("TestParm5"), Value: aws.String("parm5"), Version: aws.Int64(1)}, }, InvalidParameters: []*string{aws.String("TestParm6"), aws.String("TestParm7"), aws.String("TestParm8"), aws.String("TestParm9"), aws.String("TestParm10")}, }, { InvalidParameters: []*string{aws.String("TestParm11"), aws.String("TestParm12"), aws.String("TestParm13"), aws.String("TestParm14"), aws.String("TestParm15")}, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, {SecretBinary: []byte("BinarySecret"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, brSsmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1-sec"), Version: aws.Int64(1)}, {Name: aws.String("TestParm3"), Value: aws.String("parm3-sec"), Version: aws.Int64(1)}, {Name: aws.String("TestParm2"), Value: aws.String("parm2-sec"), Version: aws.Int64(1)}, {Name: aws.String("TestParm8"), Value: aws.String("parm8"), Version: aws.Int64(1)}, {Name: aws.String("TestParm9"), Value: aws.String("parm9"), Version: aws.Int64(1)}, {Name: aws.String("TestParm10"), Value: aws.String("parm10"), Version: aws.Int64(1)}, }, InvalidParameters: []*string{aws.String("TestParm4"), aws.String("TestParm5"), aws.String("TestParm6"), aws.String("TestParm7")}, }, { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm11"), Value: aws.String("parm11"), Version: aws.Int64(1)}, {Name: aws.String("TestParm12"), Value: aws.String("parm12"), Version: aws.Int64(1)}, {Name: aws.String("TestParm13"), Value: aws.String("parm13"), Version: aws.Int64(1)}, {Name: aws.String("TestParm14"), Value: aws.String("parm14"), Version: aws.Int64(1)}, {Name: aws.String("TestParm15"), Value: aws.String("parm15"), Version: aws.Int64(1)}, }, }, }, expErr: "Invalid parameters: TestParm6, TestParm7, TestParm8, TestParm9, TestParm10", expSecrets: map[string]string{}, perms: "420", }, { // Test partial SSM batches for Multi Region with Failover Descriptor success testName: "Multi Region Failover Descriptor Batch Success", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "BinarySecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm13", "objectType": "ssmparameter"}, {"objectName": "TestParm14", "objectType": "ssmparameter"}, { "objectName": "TestParm15", "objectType": "ssmparameter", "objectVersion": "VersionId", "failoverObject": map[string]string{ "objectName": "TestParm15AnotherRegion", "objectVersion": "VersionId", }, "inFallback": "true", "objectAlias": "TestParm15Alias", }, }, ssmRsp: []*ssm.GetParametersOutput{nil}, ssmReqErr: awserr.NewRequestFailure( awserr.New(secretsmanager.ErrCodeInternalServiceError, "An error occurred on the server side.", fmt.Errorf("")), 500, ""), gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, {SecretBinary: []byte("BinarySecret"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, brSsmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm13"), Value: aws.String("parm13"), Version: aws.Int64(1)}, {Name: aws.String("TestParm14"), Value: aws.String("parm14"), Version: aws.Int64(1)}, {Name: aws.String("TestParm15AnotherRegion"), Value: aws.String("parm15"), Version: aws.Int64(1)}, }, }, }, expErr: "", brExpErr: "", expSecrets: map[string]string{ "TestSecret1": "secret1", "BinarySecret1": "BinarySecret", "TestParm13": "parm13", "TestParm14": "parm14", "TestParm15Alias": "parm15", }, perms: "420", }, { // Test Json SSM batches for Multi Region success testName: "Multi Region Json SSM batches success", attributes: stdAttributesWithBackupRegion, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "BinarySecret1", "objectType": "secretsmanager"}, { "objectName": "TestParm1", "objectType": "ssmparameter", "jmesPath": []map[string]string{ { "path": "dbUser.username", "objectAlias": "ssm1Username", }, }, }, { "objectName": "TestParm2", "objectType": "ssmparameter", "jmesPath": []map[string]string{ { "path": "dbUser.username", "objectAlias": "ssm2Username", }, }, }, { "objectName": "TestParm3", "objectType": "ssmparameter", "jmesPath": []map[string]string{ { "path": "dbUser.username", "objectAlias": "ssm3Username", }, }, }, { "objectName": "TestParm4", "objectType": "ssmparameter", "jmesPath": []map[string]string{ { "path": "dbUser.username", "objectAlias": "ssm4Username", }, }, }, { "objectName": "TestParm5", "objectType": "ssmparameter", "jmesPath": []map[string]string{ { "path": "dbUser.username", "objectAlias": "ssm5Username", }, }, }, { "objectName": "TestParm6", "objectType": "ssmparameter", "jmesPath": []map[string]string{ { "path": "dbUser.username", "objectAlias": "ssm6Username", }, }, }, { "objectName": "TestParm7", "objectType": "ssmparameter", "jmesPath": []map[string]string{ { "path": "dbUser.username", "objectAlias": "ssm7Username", }, }, }, { "objectName": "TestParm8", "objectType": "ssmparameter", "jmesPath": []map[string]string{ { "path": "dbUser.username", "objectAlias": "ssm8Username", }, }, }, { "objectName": "TestParm9", "objectType": "ssmparameter", "jmesPath": []map[string]string{ { "path": "dbUser.username", "objectAlias": "ssm9Username", }, }, }, { "objectName": "TestParm10", "objectType": "ssmparameter", "jmesPath": []map[string]string{ { "path": "dbUser.username", "objectAlias": "ssm10Username", }, }, }, {"objectName": "TestParm11", "objectType": "ssmparameter"}, {"objectName": "TestParm12", "objectType": "ssmparameter"}, {"objectName": "TestParm13", "objectType": "ssmparameter"}, {"objectName": "TestParm14", "objectType": "ssmparameter"}, { "objectName": "TestParm15", "objectType": "ssmparameter", "objectVersion": "VersionId", "failoverObject": map[string]string{ "objectName": "TestParm15AnotherRegion", "objectVersion": "VersionId", }, "inFallback": "true", "objectAlias": "TestParm15Alias", }, {"objectName": "TestParm16", "objectType": "ssmparameter"}, {"objectName": "TestParm17", "objectType": "ssmparameter"}, {"objectName": "TestParm18", "objectType": "ssmparameter"}, {"objectName": "TestParm19", "objectType": "ssmparameter"}, {"objectName": "TestParm20", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{nil, nil}, ssmReqErr: awserr.NewRequestFailure( awserr.New(ssm.ErrCodeInternalServerError, "An error occurred on the server side.", fmt.Errorf("")), 500, ""), gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, {SecretBinary: []byte("BinarySecret"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{nil}, brSsmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String(`{"dbUser": {"username": "ParameterStoreUser1", "password" : "ParameterStorePassword1"}}`), Version: aws.Int64(1)}, {Name: aws.String("TestParm2"), Value: aws.String(`{"dbUser": {"username": "ParameterStoreUser2", "password" : "ParameterStorePassword2"}}`), Version: aws.Int64(1)}, {Name: aws.String("TestParm3"), Value: aws.String(`{"dbUser": {"username": "ParameterStoreUser3", "password" : "ParameterStorePassword3"}}`), Version: aws.Int64(1)}, {Name: aws.String("TestParm4"), Value: aws.String(`{"dbUser": {"username": "ParameterStoreUser4", "password" : "ParameterStorePassword4"}}`), Version: aws.Int64(1)}, {Name: aws.String("TestParm5"), Value: aws.String(`{"dbUser": {"username": "ParameterStoreUser5", "password" : "ParameterStorePassword5"}}`), Version: aws.Int64(1)}, {Name: aws.String("TestParm6"), Value: aws.String(`{"dbUser": {"username": "ParameterStoreUser6", "password" : "ParameterStorePassword6"}}`), Version: aws.Int64(1)}, {Name: aws.String("TestParm7"), Value: aws.String(`{"dbUser": {"username": "ParameterStoreUser7", "password" : "ParameterStorePassword7"}}`), Version: aws.Int64(1)}, {Name: aws.String("TestParm8"), Value: aws.String(`{"dbUser": {"username": "ParameterStoreUser8", "password" : "ParameterStorePassword8"}}`), Version: aws.Int64(1)}, {Name: aws.String("TestParm9"), Value: aws.String(`{"dbUser": {"username": "ParameterStoreUser9", "password" : "ParameterStorePassword9"}}`), Version: aws.Int64(1)}, {Name: aws.String("TestParm10"), Value: aws.String(`{"dbUser": {"username": "ParameterStoreUser10", "password" : "ParameterStorePassword10"}}`), Version: aws.Int64(1)}, }, }, { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm11"), Value: aws.String("parm11"), Version: aws.Int64(1)}, {Name: aws.String("TestParm12"), Value: aws.String("parm12"), Version: aws.Int64(1)}, {Name: aws.String("TestParm13"), Value: aws.String("parm13"), Version: aws.Int64(1)}, {Name: aws.String("TestParm14"), Value: aws.String("parm14"), Version: aws.Int64(1)}, {Name: aws.String("TestParm15AnotherRegion"), Value: aws.String("parm15"), Version: aws.Int64(1)}, {Name: aws.String("TestParm16"), Value: aws.String("parm16"), Version: aws.Int64(1)}, {Name: aws.String("TestParm17"), Value: aws.String("parm17"), Version: aws.Int64(1)}, {Name: aws.String("TestParm18"), Value: aws.String("parm18"), Version: aws.Int64(1)}, {Name: aws.String("TestParm19"), Value: aws.String("parm19"), Version: aws.Int64(1)}, {Name: aws.String("TestParm20"), Value: aws.String("parm20"), Version: aws.Int64(1)}, }, }, }, expErr: "", brExpErr: "", expSecrets: map[string]string{ "TestSecret1": "secret1", "BinarySecret1": "BinarySecret", "TestParm1": `{"dbUser": {"username": "ParameterStoreUser1", "password" : "ParameterStorePassword1"}}`, "TestParm2": `{"dbUser": {"username": "ParameterStoreUser2", "password" : "ParameterStorePassword2"}}`, "TestParm3": `{"dbUser": {"username": "ParameterStoreUser3", "password" : "ParameterStorePassword3"}}`, "TestParm4": `{"dbUser": {"username": "ParameterStoreUser4", "password" : "ParameterStorePassword4"}}`, "TestParm5": `{"dbUser": {"username": "ParameterStoreUser5", "password" : "ParameterStorePassword5"}}`, "TestParm6": `{"dbUser": {"username": "ParameterStoreUser6", "password" : "ParameterStorePassword6"}}`, "TestParm7": `{"dbUser": {"username": "ParameterStoreUser7", "password" : "ParameterStorePassword7"}}`, "TestParm8": `{"dbUser": {"username": "ParameterStoreUser8", "password" : "ParameterStorePassword8"}}`, "TestParm9": `{"dbUser": {"username": "ParameterStoreUser9", "password" : "ParameterStorePassword9"}}`, "TestParm10": `{"dbUser": {"username": "ParameterStoreUser10", "password" : "ParameterStorePassword10"}}`, "ssm1Username": "ParameterStoreUser1", "ssm2Username": "ParameterStoreUser2", "ssm3Username": "ParameterStoreUser3", "ssm4Username": "ParameterStoreUser4", "ssm5Username": "ParameterStoreUser5", "ssm6Username": "ParameterStoreUser6", "ssm7Username": "ParameterStoreUser7", "ssm8Username": "ParameterStoreUser8", "ssm9Username": "ParameterStoreUser9", "ssm10Username": "ParameterStoreUser10", "TestParm11": "parm11", "TestParm12": "parm12", "TestParm13": "parm13", "TestParm14": "parm14", "TestParm15Alias": "parm15", "TestParm16": "parm16", "TestParm17": "parm17", "TestParm18": "parm18", "TestParm19": "parm19", "TestParm20": "parm20", }, perms: "420", }, } // Test that only run with driverWriteSecrets = false var writeOnlyMountTests []testCase = []testCase{ { // Verify failure when we try to use a path name in a secret testName: "Fail Write Path Secret", attributes: map[string]string{ "namespace": "fakeNS", "accName": "fakeSvcAcc", "podName": "fakePod", "nodeName": "fakeNode", "region": "", "roleARN": "fakeRole", "pathTranslation": "False", }, mountObjs: []map[string]interface{}{ {"objectName": "mypath/TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ &ssm.Parameter{Name: aws.String("TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "contains path separator", expSecrets: map[string]string{}, perms: "420", }, { // Verify failure when we try to use a path name in a secret testName: "Fail Write Path Parm", attributes: map[string]string{ "namespace": "fakeNS", "accName": "fakeSvcAcc", "podName": "fakePod", "nodeName": "fakeNode", "region": "", "roleARN": "fakeRole", "pathTranslation": "False", }, mountObjs: []map[string]interface{}{ {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "mypath/TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ &ssm.Parameter{Name: aws.String("mypath/TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "contains path separator", expSecrets: map[string]string{}, perms: "420", }, } // Test that only run with driverWriteSecrets = true var noWriteMountTests []testCase = []testCase{ { // Verify success when using leading slashes with driver write testName: "Full path OK", attributes: map[string]string{ "namespace": "fakeNS", "accName": "fakeSvcAcc", "podName": "fakePod", "nodeName": "fakeNode", "region": "", "roleARN": "fakeRole", "pathTranslation": "False", }, mountObjs: []map[string]interface{}{ {"objectName": "/mypath/TestSecret1", "objectType": "secretsmanager"}, {"objectName": "/mypath/TestParm1", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ &ssm.Parameter{Name: aws.String("/mypath/TestParm1"), Value: aws.String("parm1"), Version: aws.Int64(1)}, }, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("secret1"), VersionId: aws.String("1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "", expSecrets: map[string]string{ "mypath/TestSecret1": "secret1", "mypath/TestParm1": "parm1", }, perms: "420", }, } // Map test name for use as a directory var nameCharMap map[rune]bool = map[rune]bool{filepath.Separator: true, ' ': true} func nameMapper(c rune) rune { if nameCharMap[c] { return '_' } return c } func TestMounts(t *testing.T) { testCases := append(mountTests, mountTestsForMultiRegion...) allTests := append(testCases, writeOnlyMountTests...) for _, tst := range allTests { t.Run(tst.testName, func(t *testing.T) { dir, err := ioutil.TempDir("", strings.Map(nameMapper, tst.testName)) if err != nil { panic(err) } defer os.RemoveAll(dir) // Cleanup svr := newServerWithMocks(&tst, false) // Do the mount req := buildMountReq(dir, tst, []*v1alpha1.ObjectVersion{}) rsp, err := svr.Mount(nil, req) if len(tst.expErr) == 0 && err != nil { t.Fatalf("%s: Got unexpected error: %s", tst.testName, err) } if len(tst.expErr) != 0 && err == nil { t.Fatalf("%s: Expected error but got none", tst.testName) } if len(tst.expErr) == 0 && rsp == nil { t.Fatalf("%s: Got empty response", tst.testName) } if len(tst.expErr) != 0 && !regexp.MustCompile(tst.expErr).MatchString(err.Error()) { t.Fatalf("%s: Expected error %s got %s", tst.testName, tst.expErr, err.Error()) } if len(tst.brExpErr) != 0 && !regexp.MustCompile(tst.brExpErr).MatchString(err.Error()) { t.Fatalf("%s: Expected error %s got %s", tst.testName, tst.expErr, err.Error()) } validateMounts(t, req.TargetPath, tst, rsp) }) } } func TestMountsNoWrite(t *testing.T) { testCases := append(mountTests, mountTestsForMultiRegion...) allTests := append(testCases, noWriteMountTests...) for _, tst := range allTests { t.Run(tst.testName, func(t *testing.T) { dir, err := ioutil.TempDir("", strings.Map(nameMapper, tst.testName)) if err != nil { panic(err) } defer os.RemoveAll(dir) // Cleanup svr := newServerWithMocks(&tst, true) // Do the mount req := buildMountReq(dir, tst, []*v1alpha1.ObjectVersion{}) rsp, err := svr.Mount(nil, req) if len(tst.expErr) == 0 && err != nil { t.Fatalf("%s: Got unexpected error: %s", tst.testName, err) } if len(tst.expErr) != 0 && err == nil { t.Fatalf("%s: Expected error but got none", tst.testName) } if len(tst.expErr) == 0 && rsp == nil { t.Fatalf("%s: Got empty response", tst.testName) } if len(tst.expErr) != 0 && !regexp.MustCompile(tst.expErr).MatchString(err.Error()) { t.Fatalf("%s: Expected error %s got %s", tst.testName, tst.expErr, err.Error()) } if len(tst.brExpErr) != 0 && !regexp.MustCompile(tst.brExpErr).MatchString(err.Error()) { t.Fatalf("%s: Expected error %s got %s", tst.testName, tst.expErr, err.Error()) } validateResponse(t, req.TargetPath, tst, rsp) }) } } var remountTests []testCase = []testCase{ { // Test multiple SSM batches testName: "Initial Mount Success", attributes: stdAttributes, mountObjs: []map[string]interface{}{ // Secrets with and without lables and versions {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestSecret2", "objectType": "secretsmanager", "objectVersionLabel": "custom"}, {"objectName": "TestSecret3", "objectType": "secretsmanager", "objectVersion": "TestSecret3-1"}, {"objectName": "TestSecretJSON", "objectType": "secretsmanager", "jmesPath": []map[string]string{{"path": "username", "objectAlias": "username"}}}, // SSM parameters with and without lables and versions {"objectName": "TestParm1", "objectType": "ssmparameter", "objectVersionLabel": "current"}, {"objectName": "TestParm2", "objectType": "ssmparameter", "objectVersion": "1"}, {"objectName": "TestParm3", "objectType": "ssmparameter"}, {"objectName": "TestParm4", "objectType": "ssmparameter"}, {"objectName": "TestParm5", "objectType": "ssmparameter"}, {"objectName": "TestParm6", "objectType": "ssmparameter"}, {"objectName": "TestParm7", "objectType": "ssmparameter"}, {"objectName": "TestParm8", "objectType": "ssmparameter"}, {"objectName": "TestParm9", "objectType": "ssmparameter"}, {"objectName": "TestParm10", "objectType": "ssmparameter"}, {"objectName": "TestParm11", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm2"), Value: aws.String("parm2 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm3"), Value: aws.String("parm3 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm4"), Value: aws.String("parm4 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm5"), Value: aws.String("parm5 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm6"), Value: aws.String("parm6 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm7"), Value: aws.String("parm7 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm8"), Value: aws.String("parm8 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm9"), Value: aws.String("parm9 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm10"), Value: aws.String("parm10 v1"), Version: aws.Int64(1)}, }, }, { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm11"), Value: aws.String("parm11 v1"), Version: aws.Int64(1)}, }, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretBinary: []byte("TestSecret1 v1"), VersionId: aws.String("TestSecret1-1")}, // Binary secret {SecretString: aws.String("TestSecret2 v1"), VersionId: aws.String("TestSecret2-1")}, {SecretString: aws.String("TestSecret3 v1"), VersionId: aws.String("TestSecret3-1")}, {SecretString: aws.String(`{"username": "SecretsManagerUser", "password": "SecretsManagerPassword"}`), VersionId: aws.String("TestSecretJSON-1")}, }, descRsp: []*secretsmanager.DescribeSecretOutput{}, expErr: "", expSecrets: map[string]string{ "TestSecret1": "TestSecret1 v1", "TestSecret2": "TestSecret2 v1", "TestSecret3": "TestSecret3 v1", "TestSecretJSON": `{"username": "SecretsManagerUser", "password": "SecretsManagerPassword"}`, "username": "SecretsManagerUser", "TestParm1": "parm1 v1", "TestParm2": "parm2 v1", "TestParm3": "parm3 v1", "TestParm4": "parm4 v1", "TestParm5": "parm5 v1", "TestParm6": "parm6 v1", "TestParm7": "parm7 v1", "TestParm8": "parm8 v1", "TestParm9": "parm9 v1", "TestParm10": "parm10 v1", "TestParm11": "parm11 v1", }, perms: "420", }, { // Test remount with no changes. testName: "No Change Success", attributes: stdAttributes, mountObjs: []map[string]interface{}{ // Secrets with and without lables and versions {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestSecret2", "objectType": "secretsmanager", "objectVersionLabel": "custom"}, {"objectName": "TestSecret3", "objectType": "secretsmanager", "objectVersion": "TestSecret3-1"}, {"objectName": "TestSecretJSON", "objectType": "secretsmanager", "jmesPath": []map[string]string{{"path": "username", "objectAlias": "username"}}}, // SSM parameters with and without lables and versions {"objectName": "TestParm1", "objectType": "ssmparameter", "objectVersionLabel": "current"}, {"objectName": "TestParm2", "objectType": "ssmparameter", "objectVersion": "1"}, {"objectName": "TestParm3", "objectType": "ssmparameter"}, {"objectName": "TestParm4", "objectType": "ssmparameter"}, {"objectName": "TestParm5", "objectType": "ssmparameter"}, {"objectName": "TestParm6", "objectType": "ssmparameter"}, {"objectName": "TestParm7", "objectType": "ssmparameter"}, {"objectName": "TestParm8", "objectType": "ssmparameter"}, {"objectName": "TestParm9", "objectType": "ssmparameter"}, {"objectName": "TestParm10", "objectType": "ssmparameter"}, {"objectName": "TestParm11", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm2"), Value: aws.String("parm2 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm3"), Value: aws.String("parm3 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm4"), Value: aws.String("parm4 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm5"), Value: aws.String("parm5 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm6"), Value: aws.String("parm6 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm7"), Value: aws.String("parm7 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm8"), Value: aws.String("parm8 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm9"), Value: aws.String("parm9 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm10"), Value: aws.String("parm10 v1"), Version: aws.Int64(1)}, }, }, { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm11"), Value: aws.String("parm11 v1"), Version: aws.Int64(1)}, }, }, }, gsvRsp: []*secretsmanager.GetSecretValueOutput{}, // Should be describe only descRsp: []*secretsmanager.DescribeSecretOutput{ {VersionIdsToStages: map[string][]*string{"TestSecret1-1": {aws.String("AWSPENDING"), aws.String("AWSCURRENT")}}}, {VersionIdsToStages: map[string][]*string{"TestSecret2-1": {aws.String("custom"), aws.String("AWSCURRENT")}}}, {VersionIdsToStages: map[string][]*string{"TestSecretJSON-1": {aws.String("AWSCURRENT")}}}, }, expErr: "", expSecrets: map[string]string{ "TestSecret1": "TestSecret1 v1", "TestSecret2": "TestSecret2 v1", "TestSecret3": "TestSecret3 v1", "TestSecretJSON": `{"username": "SecretsManagerUser", "password": "SecretsManagerPassword"}`, "username": "SecretsManagerUser", "TestParm1": "parm1 v1", "TestParm2": "parm2 v1", "TestParm3": "parm3 v1", "TestParm4": "parm4 v1", "TestParm5": "parm5 v1", "TestParm6": "parm6 v1", "TestParm7": "parm7 v1", "TestParm8": "parm8 v1", "TestParm9": "parm9 v1", "TestParm10": "parm10 v1", "TestParm11": "parm11 v1", }, perms: "420", }, { // Make sure we see changes unless we use a fixed version testName: "Rotation1 Success", attributes: stdAttributes, mountObjs: []map[string]interface{}{ // Secrets with and without lables and versions {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestSecret2", "objectType": "secretsmanager", "objectVersionLabel": "custom"}, {"objectName": "TestSecret3", "objectType": "secretsmanager", "objectVersion": "TestSecret3-1"}, {"objectName": "TestSecretJSON", "objectType": "secretsmanager", "jmesPath": []map[string]string{{"path": "username", "objectAlias": "username"}}}, // SSM parameters with and without lables and versions {"objectName": "TestParm1", "objectType": "ssmparameter", "objectVersionLabel": "current"}, {"objectName": "TestParm2", "objectType": "ssmparameter", "objectVersion": "1"}, {"objectName": "TestParm3", "objectType": "ssmparameter"}, {"objectName": "TestParm4", "objectType": "ssmparameter"}, {"objectName": "TestParm5", "objectType": "ssmparameter"}, {"objectName": "TestParm6", "objectType": "ssmparameter"}, {"objectName": "TestParm7", "objectType": "ssmparameter"}, {"objectName": "TestParm8", "objectType": "ssmparameter"}, {"objectName": "TestParm9", "objectType": "ssmparameter"}, {"objectName": "TestParm10", "objectType": "ssmparameter"}, {"objectName": "TestParm11", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm2"), Value: aws.String("parm2 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm3"), Value: aws.String("parm3 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm4"), Value: aws.String("parm4 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm5"), Value: aws.String("parm5 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm6"), Value: aws.String("parm6 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm7"), Value: aws.String("parm7 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm8"), Value: aws.String("parm8 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm9"), Value: aws.String("parm9 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm10"), Value: aws.String("parm10 v2"), Version: aws.Int64(2)}, }, }, { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm11"), Value: aws.String("parm11 v2"), Version: aws.Int64(2)}, }, }, }, descRsp: []*secretsmanager.DescribeSecretOutput{ {VersionIdsToStages: map[string][]*string{ "TestSecret1-1": {aws.String("AWSPREVIOUS")}, "TestSecret1-2": {aws.String("AWSCURRENT"), aws.String("AWSPENDING")}, }}, {VersionIdsToStages: map[string][]*string{ "TestSecret2-1": {aws.String("custom"), aws.String("AWSPREVIOUS")}, "TestSecret2-2": {aws.String("AWSCURRENT")}, }}, {VersionIdsToStages: map[string][]*string{"TestSecretJSON-1": {aws.String("AWSPREVIOUS")}}}, }, // Only should retrive TestSecret1 gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretBinary: []byte("TestSecret1 v2"), VersionId: aws.String("TestSecret1-2")}, // Binary secret {SecretString: aws.String(`{"username": "SecretsManagerUser2", "password": "SecretsManagerPassword"}`), VersionId: aws.String("TestSecretJSON-2")}, }, expErr: "", expSecrets: map[string]string{ "TestSecret1": "TestSecret1 v2", "TestSecret2": "TestSecret2 v1", "TestSecret3": "TestSecret3 v1", "TestSecretJSON": `{"username": "SecretsManagerUser2", "password": "SecretsManagerPassword"}`, "username": "SecretsManagerUser2", "TestParm1": "parm1 v2", "TestParm2": "parm2 v1", "TestParm3": "parm3 v1", "TestParm4": "parm4 v2", "TestParm5": "parm5 v2", "TestParm6": "parm6 v2", "TestParm7": "parm7 v2", "TestParm8": "parm8 v2", "TestParm9": "parm9 v2", "TestParm10": "parm10 v2", "TestParm11": "parm11 v2", }, perms: "420", }, { // Make sure we see changes when labels are moved testName: "Move Labels1 Success", attributes: stdAttributes, mountObjs: []map[string]interface{}{ // Secrets with and without lables and versions {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestSecret2", "objectType": "secretsmanager", "objectVersionLabel": "custom"}, {"objectName": "TestSecret3", "objectType": "secretsmanager", "objectVersion": "TestSecret3-1"}, {"objectName": "TestSecretJSON", "objectType": "secretsmanager", "jmesPath": []map[string]string{{"path": "username", "objectAlias": "username"}}}, // SSM parameters with and without lables and versions {"objectName": "TestParm1", "objectType": "ssmparameter", "objectVersionLabel": "current"}, {"objectName": "TestParm2", "objectType": "ssmparameter", "objectVersion": "1"}, {"objectName": "TestParm3", "objectType": "ssmparameter"}, {"objectName": "TestParm4", "objectType": "ssmparameter"}, {"objectName": "TestParm5", "objectType": "ssmparameter"}, {"objectName": "TestParm6", "objectType": "ssmparameter"}, {"objectName": "TestParm7", "objectType": "ssmparameter"}, {"objectName": "TestParm8", "objectType": "ssmparameter"}, {"objectName": "TestParm9", "objectType": "ssmparameter"}, {"objectName": "TestParm10", "objectType": "ssmparameter"}, {"objectName": "TestParm11", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm2"), Value: aws.String("parm2 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm3"), Value: aws.String("parm3 v1"), Version: aws.Int64(1)}, {Name: aws.String("TestParm4"), Value: aws.String("parm4 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm5"), Value: aws.String("parm5 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm6"), Value: aws.String("parm6 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm7"), Value: aws.String("parm7 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm8"), Value: aws.String("parm8 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm9"), Value: aws.String("parm9 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm10"), Value: aws.String("parm10 v2"), Version: aws.Int64(2)}, }, }, { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm11"), Value: aws.String("parm11 v2"), Version: aws.Int64(2)}, }, }, }, descRsp: []*secretsmanager.DescribeSecretOutput{ {VersionIdsToStages: map[string][]*string{ "TestSecret1-1": {aws.String("AWSPREVIOUS")}, "TestSecret1-2": {aws.String("AWSCURRENT"), aws.String("AWSPENDING")}, }}, {VersionIdsToStages: map[string][]*string{ "TestSecret2-1": {aws.String("AWSPREVIOUS")}, "TestSecret2-2": {aws.String("custom"), aws.String("AWSCURRENT")}, }}, {VersionIdsToStages: map[string][]*string{"TestSecretJSON-2": {aws.String("AWSCURRENT")}}}, }, // Only should retrive TestSecret1 gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("TestSecret2 v2"), VersionId: aws.String("TestSecret2-2")}, }, expErr: "", expSecrets: map[string]string{ "TestSecret1": "TestSecret1 v2", "TestSecret2": "TestSecret2 v2", "TestSecret3": "TestSecret3 v1", "TestSecretJSON": `{"username": "SecretsManagerUser2", "password": "SecretsManagerPassword"}`, "username": "SecretsManagerUser2", "TestParm1": "parm1 v2", "TestParm2": "parm2 v2", "TestParm3": "parm3 v1", "TestParm4": "parm4 v2", "TestParm5": "parm5 v2", "TestParm6": "parm6 v2", "TestParm7": "parm7 v2", "TestParm8": "parm8 v2", "TestParm9": "parm9 v2", "TestParm10": "parm10 v2", "TestParm11": "parm11 v2", }, perms: "420", }, { // Make sure we see changes when we change hard coded version in the request testName: "Move Version Success", attributes: stdAttributes, mountObjs: []map[string]interface{}{ // Secrets with and without lables and versions {"objectName": "TestSecret1", "objectType": "secretsmanager"}, {"objectName": "TestSecret2", "objectType": "secretsmanager", "objectVersionLabel": "custom"}, {"objectName": "TestSecret3", "objectType": "secretsmanager", "objectVersion": "TestSecret3-2"}, {"objectName": "TestSecretJSON", "objectType": "secretsmanager", "jmesPath": []map[string]string{{"path": "username", "objectAlias": "username"}}}, // SSM parameters with and without lables and versions {"objectName": "TestParm1", "objectType": "ssmparameter", "objectVersionLabel": "current"}, {"objectName": "TestParm2", "objectType": "ssmparameter", "objectVersion": "2"}, {"objectName": "TestParm3", "objectType": "ssmparameter"}, {"objectName": "TestParm4", "objectType": "ssmparameter"}, {"objectName": "TestParm5", "objectType": "ssmparameter"}, {"objectName": "TestParm6", "objectType": "ssmparameter"}, {"objectName": "TestParm7", "objectType": "ssmparameter"}, {"objectName": "TestParm8", "objectType": "ssmparameter"}, {"objectName": "TestParm9", "objectType": "ssmparameter"}, {"objectName": "TestParm10", "objectType": "ssmparameter"}, {"objectName": "TestParm11", "objectType": "ssmparameter"}, }, ssmRsp: []*ssm.GetParametersOutput{ { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm1"), Value: aws.String("parm1 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm2"), Value: aws.String("parm2 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm3"), Value: aws.String("parm3 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm4"), Value: aws.String("parm4 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm5"), Value: aws.String("parm5 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm6"), Value: aws.String("parm6 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm7"), Value: aws.String("parm7 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm8"), Value: aws.String("parm8 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm9"), Value: aws.String("parm9 v2"), Version: aws.Int64(2)}, {Name: aws.String("TestParm10"), Value: aws.String("parm10 v2"), Version: aws.Int64(2)}, }, }, { Parameters: []*ssm.Parameter{ {Name: aws.String("TestParm11"), Value: aws.String("parm11 v2"), Version: aws.Int64(2)}, }, }, }, descRsp: []*secretsmanager.DescribeSecretOutput{ {VersionIdsToStages: map[string][]*string{ "TestSecret1-1": {aws.String("AWSPREVIOUS")}, "TestSecret1-2": {aws.String("AWSCURRENT"), aws.String("AWSPENDING")}, }}, {VersionIdsToStages: map[string][]*string{ "TestSecret2-1": {aws.String("AWSPREVIOUS")}, "TestSecret2-2": {aws.String("custom"), aws.String("AWSCURRENT")}, }}, {VersionIdsToStages: map[string][]*string{"TestSecretJSON-2": {aws.String("AWSCURRENT")}}}, }, // Only should retrive TestSecret1 gsvRsp: []*secretsmanager.GetSecretValueOutput{ {SecretString: aws.String("TestSecret3 v2"), VersionId: aws.String("TestSecret3-2")}, }, expErr: "", expSecrets: map[string]string{ "TestSecret1": "TestSecret1 v2", "TestSecret2": "TestSecret2 v2", "TestSecret3": "TestSecret3 v2", "TestSecretJSON": `{"username": "SecretsManagerUser2", "password": "SecretsManagerPassword"}`, "username": "SecretsManagerUser2", "TestParm1": "parm1 v2", "TestParm2": "parm2 v2", "TestParm3": "parm3 v2", "TestParm4": "parm4 v2", "TestParm5": "parm5 v2", "TestParm6": "parm6 v2", "TestParm7": "parm7 v2", "TestParm8": "parm8 v2", "TestParm9": "parm9 v2", "TestParm10": "parm10 v2", "TestParm11": "parm11 v2", }, perms: "420", }, } // Validate rotation func TestReMounts(t *testing.T) { dir, err := ioutil.TempDir("", "TestReMounts") if err != nil { panic(err) } defer os.RemoveAll(dir) // Cleanup curState := []*v1alpha1.ObjectVersion{} for _, tst := range remountTests { t.Run(tst.testName, func(t *testing.T) { svr := newServerWithMocks(&tst, false) // Do the mount req := buildMountReq(dir, tst, curState) rsp, err := svr.Mount(nil, req) if len(tst.expErr) == 0 && err != nil { t.Fatalf("%s: Got unexpected error: %s", tst.testName, err) } if len(tst.expErr) != 0 && !regexp.MustCompile(tst.expErr).MatchString(err.Error()) { t.Fatalf("%s: Expected error %s got %s", tst.testName, tst.expErr, err.Error()) } if len(tst.expErr) == 0 && rsp == nil { t.Fatalf("%s: Got empty response", tst.testName) } if rsp != nil { curState = rsp.ObjectVersion // Mount state for next iteration } validateMounts(t, req.TargetPath, tst, rsp) }) } } // Validate rotation func TestNoWriteReMounts(t *testing.T) { dir, err := ioutil.TempDir("", "TestReMounts") if err != nil { panic(err) } defer os.RemoveAll(dir) // Cleanup curState := []*v1alpha1.ObjectVersion{} for _, tst := range remountTests { t.Run(tst.testName, func(t *testing.T) { svr := newServerWithMocks(&tst, true) // Do the mount req := buildMountReq(dir, tst, curState) rsp, err := svr.Mount(nil, req) if len(tst.expErr) == 0 && err != nil { t.Fatalf("%s: Got unexpected error: %s", tst.testName, err) } if len(tst.expErr) != 0 && !regexp.MustCompile(tst.expErr).MatchString(err.Error()) { t.Fatalf("%s: Expected error %s got %s", tst.testName, tst.expErr, err.Error()) } if len(tst.expErr) == 0 && rsp == nil { t.Fatalf("%s: Got empty response", tst.testName) } // Simulate the driver behaviour of only keeping updated secrets. if err := os.RemoveAll(req.TargetPath); err != nil { t.Fatalf("%s: could not clean directory - %v", tst.testName, err) } if rsp != nil { curState = rsp.ObjectVersion // Mount state for next iteration } validateResponse(t, req.TargetPath, tst, rsp) }) } } func TestEmptyAttributes(t *testing.T) { svr := newServerWithMocks(nil, false) req := &v1alpha1.MountRequest{ Attributes: "", // Should error TargetPath: "/tmp", Permission: "420", CurrentObjectVersion: []*v1alpha1.ObjectVersion{}, } rsp, err := svr.Mount(nil, req) if rsp != nil { t.Fatalf("TestEmptyAttributes: got unexpected response") } else if err == nil { t.Fatalf("TestEmptyAttributes: did not get error") } else if !strings.Contains(err.Error(), "failed to unmarshal attributes") { t.Fatalf("TestEmptyAttributes: Unexpected error %s", err.Error()) } } func TestNoPath(t *testing.T) { svr := newServerWithMocks(nil, false) req := &v1alpha1.MountRequest{ // Missing TargetPath Attributes: "{}", Permission: "420", CurrentObjectVersion: []*v1alpha1.ObjectVersion{}, } rsp, err := svr.Mount(nil, req) if rsp != nil { t.Fatalf("TestNoPath: got unexpected response") } else if err == nil { t.Fatalf("TestNoPath: did not get error") } else if !strings.Contains(err.Error(), "Missing mount path") { t.Fatalf("TestNoPath: Unexpected error %s", err.Error()) } } // Make sure the Version call works func TestDriverVersion(t *testing.T) { svr, err := NewServer(nil, nil, true) if err != nil { t.Fatalf("TestDriverVersion: got unexpected server error %s", err.Error()) } if svr == nil { t.Fatalf("TestDriverVersion: got empty server") } ver, err := svr.Version(nil, &v1alpha1.VersionRequest{}) if err != nil { t.Fatalf("TestDriverVersion: got unexpected error %s", err.Error()) } if ver == nil { t.Fatalf("TestDriverVersion: got empty response") } if ver.RuntimeName != auth.ProviderName { t.Fatalf("TestDriverVersion: wrong RuntimeName: %s", ver.RuntimeName) } }
2,554
secrets-store-csi-driver-provider-aws
aws
Go
package utils import ( "errors" "github.com/aws/aws-sdk-go/aws/awserr" ) //Helper method to check if the request is fatal/4XX status func IsFatalError(errMsg error) bool { if reqErr, ok := errMsg.(awserr.RequestFailure); ok { // check if client side error occurred if reqErr.StatusCode() >= 400 && reqErr.StatusCode() < 500 { return true } } if reqErr, ok := errMsg.(awserr.Error); ok { if reqErr.OrigErr() != nil { return IsFatalError(reqErr.OrigErr()) } } if errors.Unwrap(errMsg) != nil { return IsFatalError(errors.Unwrap(errMsg)) } return false }
28
secrets-store-csi-driver-provider-aws
aws
Go
package utils import ( "testing" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/stretchr/testify/assert" ) type awsError awserr.Error type WrapAwsError struct { awsError code string message string err error } func (w WrapAwsError) Error() string { return awserr.SprintError(w.code, w.message, "", w.OrigErr()) } func (w WrapAwsError) OrigErr() error { return w.err } func TestIsFatalError_CannotAssumeRoleWithWebIdentity_isFatal(t *testing.T) { innerErr := WrapAwsError{code: "AccessDenied", message: "Not authorized to perform sts:AssumeRoleWithWebIdentity", err: nil} awsRequestError := awserr.NewRequestFailure(innerErr, 403, "someId") returnedErr := WrapAwsError{code: "WebIdentityErr", message: "failed to retrieve credentials", err: awsRequestError} fatalError := IsFatalError(returnedErr) assert.Equal(t, true, fatalError) } func TestIsFatalError_WrapperWithoutOriginError_nonFatal(t *testing.T) { returnedErr := WrapAwsError{code: "WebIdentityErr", message: "failed to retrieve credentials", err: nil} fatalError := IsFatalError(returnedErr) assert.Equal(t, false, fatalError) }
46
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // this package implement base communicator for network connections. package communicator import ( "errors" "sync" "time" "github.com/aws/session-manager-plugin/src/config" "github.com/aws/session-manager-plugin/src/log" "github.com/aws/session-manager-plugin/src/websocketutil" "github.com/gorilla/websocket" ) // IWebSocketChannel is the interface for DataChannel. type IWebSocketChannel interface { Initialize(log log.T, channelUrl string, channelToken string) Open(log log.T) error Close(log log.T) error SendMessage(log log.T, input []byte, inputType int) error StartPings(log log.T, pingInterval time.Duration) GetChannelToken() string GetStreamUrl() string SetChannelToken(string) SetOnError(onErrorHandler func(error)) SetOnMessage(onMessageHandler func([]byte)) } // WebSocketChannel parent class for DataChannel. type WebSocketChannel struct { IWebSocketChannel Url string OnMessage func([]byte) OnError func(error) IsOpen bool writeLock *sync.Mutex Connection *websocket.Conn ChannelToken string } // GetChannelToken gets the channel token func (webSocketChannel *WebSocketChannel) GetChannelToken() string { return webSocketChannel.ChannelToken } // SetChannelToken sets the channel token func (webSocketChannel *WebSocketChannel) SetChannelToken(channelToken string) { webSocketChannel.ChannelToken = channelToken } // GetStreamUrl gets stream url func (webSocketChannel *WebSocketChannel) GetStreamUrl() string { return webSocketChannel.Url } // SetOnError sets OnError field of websocket channel func (webSocketChannel *WebSocketChannel) SetOnError(onErrorHandler func(error)) { webSocketChannel.OnError = onErrorHandler } // SetOnMessage sets OnMessage field of websocket channel func (webSocketChannel *WebSocketChannel) SetOnMessage(onMessageHandler func([]byte)) { webSocketChannel.OnMessage = onMessageHandler } // Initialize initializes websocket channel fields func (webSocketChannel *WebSocketChannel) Initialize(log log.T, channelUrl string, channelToken string) { webSocketChannel.ChannelToken = channelToken webSocketChannel.Url = channelUrl } // StartPings starts the pinging process to keep the websocket channel alive. func (webSocketChannel *WebSocketChannel) StartPings(log log.T, pingInterval time.Duration) { go func() { for { if webSocketChannel.IsOpen == false { return } log.Debug("WebsocketChannel: Send ping. Message.") webSocketChannel.writeLock.Lock() err := webSocketChannel.Connection.WriteMessage(websocket.PingMessage, []byte("keepalive")) webSocketChannel.writeLock.Unlock() if err != nil { log.Errorf("Error while sending websocket ping: %v", err) return } time.Sleep(pingInterval) } }() } // SendMessage sends a byte message through the websocket connection. // Examples of message type are websocket.TextMessage or websocket.Binary func (webSocketChannel *WebSocketChannel) SendMessage(log log.T, input []byte, inputType int) error { if webSocketChannel.IsOpen == false { return errors.New("Can't send message: Connection is closed.") } if len(input) < 1 { return errors.New("Can't send message: Empty input.") } webSocketChannel.writeLock.Lock() err := webSocketChannel.Connection.WriteMessage(inputType, input) webSocketChannel.writeLock.Unlock() return err } // Close closes the corresponding connection. func (webSocketChannel *WebSocketChannel) Close(log log.T) error { log.Info("Closing websocket channel connection to: " + webSocketChannel.Url) if webSocketChannel.IsOpen == true { // Send signal to stop receiving message webSocketChannel.IsOpen = false return websocketutil.NewWebsocketUtil(log, nil).CloseConnection(webSocketChannel.Connection) } log.Info("Websocket channel connection to: " + webSocketChannel.Url + " is already Closed!") return nil } // Open upgrades the http connection to a websocket connection. func (webSocketChannel *WebSocketChannel) Open(log log.T) error { // initialize the write mutex webSocketChannel.writeLock = &sync.Mutex{} ws, err := websocketutil.NewWebsocketUtil(log, nil).OpenConnection(webSocketChannel.Url) if err != nil { return err } webSocketChannel.Connection = ws webSocketChannel.IsOpen = true webSocketChannel.StartPings(log, config.PingTimeInterval) // spin up a different routine to listen to the incoming traffic go func() { defer func() { if msg := recover(); msg != nil { log.Errorf("WebsocketChannel listener run panic: %v", msg) } }() retryCount := 0 for { if webSocketChannel.IsOpen == false { log.Debugf("Ending the channel listening routine since the channel is closed: %s", webSocketChannel.Url) break } messageType, rawMessage, err := webSocketChannel.Connection.ReadMessage() if err != nil { retryCount++ if retryCount >= config.RetryAttempt { log.Errorf("Reach the retry limit %v for receive messages.", config.RetryAttempt) webSocketChannel.OnError(err) break } log.Debugf("An error happened when receiving the message. Retried times: %v, Error: %v, Messagetype: %v", retryCount, err.Error(), messageType) } else if messageType != websocket.TextMessage && messageType != websocket.BinaryMessage { // We only accept text messages which are interpreted as UTF-8 or binary encoded text. log.Errorf("Invalid message type. We only accept UTF-8 or binary encoded text. Message type: %v", messageType) } else { retryCount = 0 webSocketChannel.OnMessage(rawMessage) } } }() return nil }
191
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // this package implement base communicator for network connections. package communicator import ( "errors" "fmt" "net/http" "net/http/httptest" "net/url" "sync" "testing" "github.com/aws/session-manager-plugin/src/log" "github.com/gorilla/websocket" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) var ( mockLogger = log.NewMockLog() defaultChannelToken = "channelToken" defaultStreamUrl = "streamUrl" defaultError = errors.New("Default Error") defaultMessage = []byte("Default Message") ) type ErrorCallbackWrapper struct { mock.Mock err error } func (mock *ErrorCallbackWrapper) defaultErrorHandler(err error) { mock.Called(err) mock.err = err } type MessageCallbackWrapper struct { mock.Mock message []byte } func (mock *MessageCallbackWrapper) defaultMessageHandler(msg []byte) { mock.Called(msg) mock.message = msg } var upgrader = websocket.Upgrader{ ReadBufferSize: 1024, WriteBufferSize: 1024, } // handlerToBeTested echos all incoming input from a websocket connection back to the client while // adding the word "echo". func handlerToBeTested(w http.ResponseWriter, req *http.Request) { var log = log.NewMockLog() conn, err := upgrader.Upgrade(w, req, nil) if err != nil { http.Error(w, fmt.Sprintf("cannot upgrade: %v", err), http.StatusInternalServerError) } for { mt, p, err := conn.ReadMessage() if err != nil { log.Errorf("error: %v", err) return } //echo back the same sent string from the client while adding "echo" at the beginning conn.WriteMessage(mt, []byte("echo "+string(p))) } } func TestWebSocketChannel_GetChannelToken(t *testing.T) { t.Log("Starting test: webSocketChannel.GetChannelToken") channel := &WebSocketChannel{ ChannelToken: defaultChannelToken, } token := channel.GetChannelToken() assert.Equal(t, defaultChannelToken, token) } func TestWebSocketChannel_SetChannelToken(t *testing.T) { t.Log("Starting test: webSocketChannel.SetChannelToken") channel := &WebSocketChannel{} channel.SetChannelToken(defaultChannelToken) assert.Equal(t, defaultChannelToken, channel.ChannelToken) } func TestWebSocketChannel_GetStreamUrl(t *testing.T) { t.Log("Starting test: webSocketChannel.GetStreamUrl") channel := &WebSocketChannel{ Url: defaultStreamUrl, } url := channel.GetStreamUrl() assert.Equal(t, defaultStreamUrl, url) } func TestWebSocketChannel_SetOnError(t *testing.T) { t.Log("Starting test: webSocketChannel.SetOnError") channel := &WebSocketChannel{} errorCallbackWrapper := &ErrorCallbackWrapper{} errorCallbackWrapper.On("defaultErrorHandler", defaultError).Return() channel.SetOnError((*errorCallbackWrapper).defaultErrorHandler) channel.OnError(defaultError) errorCallbackWrapper.AssertCalled(t, "defaultErrorHandler", defaultError) assert.Equal(t, defaultError.Error(), errorCallbackWrapper.err.Error()) } func TestWebsocketChannel_SetOnMessage(t *testing.T) { t.Log("Starting test: webSocketChannel.SetOnMessage") channel := &WebSocketChannel{} messageCallbackWrapper := &MessageCallbackWrapper{} messageCallbackWrapper.On("defaultMessageHandler", defaultMessage).Return() channel.SetOnMessage((*messageCallbackWrapper).defaultMessageHandler) channel.OnMessage(defaultMessage) messageCallbackWrapper.AssertCalled(t, "defaultMessageHandler", defaultMessage) assert.Equal(t, defaultMessage, messageCallbackWrapper.message) } func TestWebsocketchannel_Initialize(t *testing.T) { t.Log("Starting test: webSocketChannel.Initialize") channel := &WebSocketChannel{} channel.Initialize(mockLogger, defaultStreamUrl, defaultChannelToken) assert.Equal(t, defaultStreamUrl, channel.Url) assert.Equal(t, defaultChannelToken, channel.ChannelToken) } func TestOpenCloseWebSocketChannel(t *testing.T) { t.Log("Starting test: TestOpenCloseWebSocketChannel") srv := httptest.NewServer(http.HandlerFunc(handlerToBeTested)) u, _ := url.Parse(srv.URL) u.Scheme = "ws" var log = log.NewMockLog() websocketchannel := WebSocketChannel{ Url: u.String(), } err := websocketchannel.Open(log) assert.Nil(t, err, "Error opening the websocket connection.") assert.NotNil(t, websocketchannel.Connection, "Open connection failed.") assert.True(t, websocketchannel.IsOpen, "IsOpen is not set to true.") err = websocketchannel.Close(log) assert.Nil(t, err, "Error closing the websocket connection.") assert.False(t, websocketchannel.IsOpen, "IsOpen is not set to false.") t.Log("Ending test: TestOpenCloseWebSocketChannel") } func TestReadWriteTextToWebSocketChannel(t *testing.T) { t.Log("Starting test: TestReadWriteWebSocketChannel ") srv := httptest.NewServer(http.HandlerFunc(handlerToBeTested)) u, _ := url.Parse(srv.URL) u.Scheme = "ws" var log = log.NewMockLog() var wg sync.WaitGroup wg.Add(1) onMessage := func(input []byte) { defer wg.Done() t.Log(input) // Verify read from websocket server assert.Equal(t, string(input), "echo channelreadwrite") } websocketchannel := WebSocketChannel{ Url: u.String(), OnMessage: onMessage, } // Open the websocket connection err := websocketchannel.Open(log) assert.Nil(t, err, "Error opening the websocket connection.") assert.NotNil(t, websocketchannel.Connection, "Open connection failed.") // Verify write to websocket server websocketchannel.SendMessage(log, []byte("channelreadwrite"), websocket.TextMessage) wg.Wait() err = websocketchannel.Close(log) assert.Nil(t, err, "Error closing the websocket connection.") assert.False(t, websocketchannel.IsOpen, "IsOpen is not set to false.") t.Log("Ending test: TestReadWriteWebSocketChannel ") } func TestReadWriteBinaryToWebSocketChannel(t *testing.T) { t.Log("Starting test: TestReadWriteWebSocketChannel ") srv := httptest.NewServer(http.HandlerFunc(handlerToBeTested)) u, _ := url.Parse(srv.URL) u.Scheme = "ws" var log = log.NewMockLog() var wg sync.WaitGroup wg.Add(1) onMessage := func(input []byte) { defer wg.Done() t.Log(input) // Verify read from websocket server assert.Equal(t, string(input), "echo channelreadwrite") } websocketchannel := WebSocketChannel{ Url: u.String(), OnMessage: onMessage, } // Open the websocket connection err := websocketchannel.Open(log) assert.Nil(t, err, "Error opening the websocket connection.") assert.NotNil(t, websocketchannel.Connection, "Open connection failed.") // Verify write to websocket server websocketchannel.SendMessage(log, []byte("channelreadwrite"), websocket.BinaryMessage) wg.Wait() err = websocketchannel.Close(log) assert.Nil(t, err, "Error closing the websocket connection.") assert.False(t, websocketchannel.IsOpen, "IsOpen is not set to false.") t.Log("Ending test: TestReadWriteWebSocketChannel ") } func TestMultipleReadWriteWebSocketChannel(t *testing.T) { t.Log("Starting test: TestMultipleReadWriteWebSocketChannel") srv := httptest.NewServer(http.HandlerFunc(handlerToBeTested)) u, _ := url.Parse(srv.URL) u.Scheme = "ws" var log = log.NewMockLog() read1 := make(chan bool) read2 := make(chan bool) onMessage := func(input []byte) { t.Log(input) // Verify reads from websocket server if string(input) == "echo channelreadwrite1" { read1 <- true } if string(input) == "echo channelreadwrite2" { read2 <- true } } websocketchannel := WebSocketChannel{ Url: u.String(), OnMessage: onMessage, } // Open the websocket connection err := websocketchannel.Open(log) assert.Nil(t, err, "Error opening the websocket connection.") assert.NotNil(t, websocketchannel.Connection, "Open connection failed.") // Verify writes to websocket server websocketchannel.SendMessage(log, []byte("channelreadwrite1"), websocket.TextMessage) websocketchannel.SendMessage(log, []byte("channelreadwrite2"), websocket.TextMessage) assert.True(t, <-read1, "Didn't read value 1 correctly") assert.True(t, <-read2, "Didn't ready value 2 correctly") err = websocketchannel.Close(log) assert.Nil(t, err, "Error closing the websocket connection.") assert.False(t, websocketchannel.IsOpen, "IsOpen is not set to false.") t.Log("Ending test: TestMultipleReadWriteWebSocketChannel") }
289
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // Code generated by mockery v1.0.0. DO NOT EDIT. package mocks import ( log "github.com/aws/session-manager-plugin/src/log" mock "github.com/stretchr/testify/mock" time "time" ) // IWebSocketChannel is an autogenerated mock type for the IWebSocketChannel type type IWebSocketChannel struct { mock.Mock } // Close provides a mock function with given fields: _a0 func (_m *IWebSocketChannel) Close(_a0 log.T) error { ret := _m.Called(_a0) var r0 error if rf, ok := ret.Get(0).(func(log.T) error); ok { r0 = rf(_a0) } else { r0 = ret.Error(0) } return r0 } // GetChannelToken provides a mock function with given fields: func (_m *IWebSocketChannel) GetChannelToken() string { ret := _m.Called() var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() } else { r0 = ret.Get(0).(string) } return r0 } // GetStreamUrl provides a mock function with given fields: func (_m *IWebSocketChannel) GetStreamUrl() string { ret := _m.Called() var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() } else { r0 = ret.Get(0).(string) } return r0 } // Initialize provides a mock function with given fields: _a0, channelUrl, channelToken func (_m *IWebSocketChannel) Initialize(_a0 log.T, channelUrl string, channelToken string) { _m.Called(_a0, channelUrl, channelToken) } // Open provides a mock function with given fields: _a0 func (_m *IWebSocketChannel) Open(_a0 log.T) error { ret := _m.Called(_a0) var r0 error if rf, ok := ret.Get(0).(func(log.T) error); ok { r0 = rf(_a0) } else { r0 = ret.Error(0) } return r0 } // SendMessage provides a mock function with given fields: _a0, input, inputType func (_m *IWebSocketChannel) SendMessage(_a0 log.T, input []byte, inputType int) error { ret := _m.Called(_a0, input, inputType) var r0 error if rf, ok := ret.Get(0).(func(log.T, []byte, int) error); ok { r0 = rf(_a0, input, inputType) } else { r0 = ret.Error(0) } return r0 } // SetChannelToken provides a mock function with given fields: _a0 func (_m *IWebSocketChannel) SetChannelToken(_a0 string) { _m.Called(_a0) } // SetOnError provides a mock function with given fields: onErrorHandler func (_m *IWebSocketChannel) SetOnError(onErrorHandler func(error)) { _m.Called(onErrorHandler) } // SetOnMessage provides a mock function with given fields: onMessageHandler func (_m *IWebSocketChannel) SetOnMessage(onMessageHandler func([]byte)) { _m.Called(onMessageHandler) } // StartPings provides a mock function with given fields: _a0, pingInterval func (_m *IWebSocketChannel) StartPings(_a0 log.T, pingInterval time.Duration) { _m.Called(_a0, pingInterval) }
112
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // config package implement configuration retrieval for session manager apis package config import "time" const ( RolePublishSubscribe = "publish_subscribe" MessageSchemaVersion = "1.0" DefaultTransmissionTimeout = 200 * time.Millisecond DefaultRoundTripTime = 100 * time.Millisecond DefaultRoundTripTimeVariation = 0 ResendSleepInterval = 100 * time.Millisecond ResendMaxAttempt = 3000 // 5 minutes / ResendSleepInterval StreamDataPayloadSize = 1024 OutgoingMessageBufferCapacity = 10000 IncomingMessageBufferCapacity = 10000 RTTConstant = 1.0 / 8.0 // Round trip time constant RTTVConstant = 1.0 / 4.0 // Round trip time variation constant ClockGranularity = 10 * time.Millisecond MaxTransmissionTimeout = 1 * time.Second RetryBase = 2 DataChannelNumMaxRetries = 5 DataChannelRetryInitialDelayMillis = 100 DataChannelRetryMaxIntervalMillis = 5000 RetryAttempt = 5 PingTimeInterval = 5 * time.Minute // Plugin names ShellPluginName = "Standard_Stream" PortPluginName = "Port" InteractiveCommandsPluginName = "InteractiveCommands" NonInteractiveCommandsPluginName = "NonInteractiveCommands" //Agent Versions TerminateSessionFlagSupportedAfterThisAgentVersion = "2.3.722.0" TCPMultiplexingSupportedAfterThisAgentVersion = "3.0.196.0" TCPMultiplexingWithSmuxKeepAliveDisabledAfterThisAgentVersion = "3.1.1511.0" )
52
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // datachannel package implement data channel for interactive sessions. package datachannel import ( "bytes" "container/list" "encoding/binary" "encoding/json" "errors" "fmt" "math" "os" "reflect" "sync" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/kms/kmsiface" "github.com/aws/session-manager-plugin/src/communicator" "github.com/aws/session-manager-plugin/src/config" "github.com/aws/session-manager-plugin/src/encryption" "github.com/aws/session-manager-plugin/src/log" "github.com/aws/session-manager-plugin/src/message" "github.com/aws/session-manager-plugin/src/service" "github.com/aws/session-manager-plugin/src/version" "github.com/gorilla/websocket" "github.com/twinj/uuid" ) type IDataChannel interface { Initialize(log log.T, clientId string, sessionId string, targetId string, isAwsCliUpgradeNeeded bool) SetWebsocket(log log.T, streamUrl string, tokenValue string) Reconnect(log log.T) error SendFlag(log log.T, flagType message.PayloadTypeFlag) error Open(log log.T) error Close(log log.T) error FinalizeDataChannelHandshake(log log.T, tokenValue string) error SendInputDataMessage(log log.T, payloadType message.PayloadType, inputData []byte) error ResendStreamDataMessageScheduler(log log.T) error ProcessAcknowledgedMessage(log log.T, acknowledgeMessageContent message.AcknowledgeContent) error OutputMessageHandler(log log.T, stopHandler Stop, sessionID string, rawMessage []byte) error SendAcknowledgeMessage(log log.T, clientMessage message.ClientMessage) error AddDataToOutgoingMessageBuffer(streamMessage StreamingMessage) RemoveDataFromOutgoingMessageBuffer(streamMessageElement *list.Element) AddDataToIncomingMessageBuffer(streamMessage StreamingMessage) RemoveDataFromIncomingMessageBuffer(sequenceNumber int64) CalculateRetransmissionTimeout(log log.T, streamingMessage StreamingMessage) SendMessage(log log.T, input []byte, inputType int) error RegisterOutputStreamHandler(handler OutputStreamDataMessageHandler, isSessionSpecificHandler bool) DeregisterOutputStreamHandler(handler OutputStreamDataMessageHandler) IsSessionTypeSet() chan bool IsStreamMessageResendTimeout() chan bool GetSessionType() string SetSessionType(sessionType string) GetSessionProperties() interface{} GetWsChannel() communicator.IWebSocketChannel SetWsChannel(wsChannel communicator.IWebSocketChannel) GetStreamDataSequenceNumber() int64 GetAgentVersion() string SetAgentVersion(agentVersion string) } // DataChannel used for communication between the mgs and the cli. type DataChannel struct { wsChannel communicator.IWebSocketChannel Role string ClientId string SessionId string TargetId string IsAwsCliUpgradeNeeded bool //records sequence number of last acknowledged message received over data channel ExpectedSequenceNumber int64 //records sequence number of last stream data message sent over data channel StreamDataSequenceNumber int64 //buffer to store outgoing stream messages until acknowledged //using linked list for this buffer as access to oldest message is required and it support faster deletion from any position of list OutgoingMessageBuffer ListMessageBuffer //buffer to store incoming stream messages if received out of sequence //using map for this buffer as incoming messages can be out of order and retrieval would be faster by sequenceId IncomingMessageBuffer MapMessageBuffer //round trip time of latest acknowledged message RoundTripTime float64 //round trip time variation of latest acknowledged message RoundTripTimeVariation float64 //timeout used for resending unacknowledged message RetransmissionTimeout time.Duration // Encrypter to encrypt/decrypt if agent requests encryption encryption encryption.IEncrypter encryptionEnabled bool // SessionType sessionType string isSessionTypeSet chan bool sessionProperties interface{} // Used to detect if resending a streaming message reaches timeout isStreamMessageResendTimeout chan bool // Handles data on output stream. Output stream is data outputted by the SSM agent and received here. outputStreamHandlers []OutputStreamDataMessageHandler isSessionSpecificHandlerSet bool // AgentVersion received during handshake agentVersion string } type ListMessageBuffer struct { Messages *list.List Capacity int Mutex *sync.Mutex } type MapMessageBuffer struct { Messages map[int64]StreamingMessage Capacity int Mutex *sync.Mutex } type StreamingMessage struct { Content []byte SequenceNumber int64 LastSentTime time.Time ResendAttempt *int } type OutputStreamDataMessageHandler func(log log.T, streamDataMessage message.ClientMessage) (bool, error) type Stop func() var SendAcknowledgeMessageCall = func(log log.T, dataChannel *DataChannel, streamDataMessage message.ClientMessage) error { return dataChannel.SendAcknowledgeMessage(log, streamDataMessage) } var ProcessAcknowledgedMessageCall = func(log log.T, dataChannel *DataChannel, acknowledgeMessage message.AcknowledgeContent) error { return dataChannel.ProcessAcknowledgedMessage(log, acknowledgeMessage) } var SendMessageCall = func(log log.T, dataChannel *DataChannel, input []byte, inputType int) error { return dataChannel.SendMessage(log, input, inputType) } var GetRoundTripTime = func(streamingMessage StreamingMessage) time.Duration { return time.Since(streamingMessage.LastSentTime) } var newEncrypter = func(log log.T, kmsKeyId string, encryptionConext map[string]*string, kmsService kmsiface.KMSAPI) (encryption.IEncrypter, error) { return encryption.NewEncrypter(log, kmsKeyId, encryptionConext, kmsService) } // Initialize populates the data channel object with the correct values. func (dataChannel *DataChannel) Initialize(log log.T, clientId string, sessionId string, targetId string, isAwsCliUpgradeNeeded bool) { //open data channel as publish_subscribe log.Debugf("Calling Initialize Datachannel for role: %s", config.RolePublishSubscribe) dataChannel.Role = config.RolePublishSubscribe dataChannel.ClientId = clientId dataChannel.SessionId = sessionId dataChannel.TargetId = targetId dataChannel.ExpectedSequenceNumber = 0 dataChannel.StreamDataSequenceNumber = 0 dataChannel.OutgoingMessageBuffer = ListMessageBuffer{ list.New(), config.OutgoingMessageBufferCapacity, &sync.Mutex{}, } dataChannel.IncomingMessageBuffer = MapMessageBuffer{ make(map[int64]StreamingMessage), config.IncomingMessageBufferCapacity, &sync.Mutex{}, } dataChannel.RoundTripTime = float64(config.DefaultRoundTripTime) dataChannel.RoundTripTimeVariation = config.DefaultRoundTripTimeVariation dataChannel.RetransmissionTimeout = config.DefaultTransmissionTimeout dataChannel.wsChannel = &communicator.WebSocketChannel{} dataChannel.encryptionEnabled = false dataChannel.isSessionTypeSet = make(chan bool, 1) dataChannel.isStreamMessageResendTimeout = make(chan bool, 1) dataChannel.sessionType = "" dataChannel.IsAwsCliUpgradeNeeded = isAwsCliUpgradeNeeded } // SetWebsocket function populates websocket channel object func (dataChannel *DataChannel) SetWebsocket(log log.T, channelUrl string, channelToken string) { dataChannel.wsChannel.Initialize(log, channelUrl, channelToken) } // FinalizeHandshake sends the token for service to acknowledge the connection. func (dataChannel *DataChannel) FinalizeDataChannelHandshake(log log.T, tokenValue string) (err error) { uuid.SwitchFormat(uuid.CleanHyphen) uid := uuid.NewV4().String() log.Infof("Sending token through data channel %s to acknowledge connection", dataChannel.wsChannel.GetStreamUrl()) openDataChannelInput := service.OpenDataChannelInput{ MessageSchemaVersion: aws.String(config.MessageSchemaVersion), RequestId: aws.String(uid), TokenValue: aws.String(tokenValue), ClientId: aws.String(dataChannel.ClientId), } var openDataChannelInputBytes []byte if openDataChannelInputBytes, err = json.Marshal(openDataChannelInput); err != nil { log.Errorf("Error serializing openDataChannelInput: %s", err) return } return dataChannel.SendMessage(log, openDataChannelInputBytes, websocket.TextMessage) } // SendMessage sends a message to the service through datachannel func (dataChannel *DataChannel) SendMessage(log log.T, input []byte, inputType int) error { return dataChannel.wsChannel.SendMessage(log, input, inputType) } // Open opens websocket connects and does final handshake to acknowledge connection func (dataChannel *DataChannel) Open(log log.T) (err error) { if err = dataChannel.wsChannel.Open(log); err != nil { return fmt.Errorf("failed to open data channel with error: %v", err) } if err = dataChannel.FinalizeDataChannelHandshake(log, dataChannel.wsChannel.GetChannelToken()); err != nil { return fmt.Errorf("error sending token for handshake: %v", err) } return } // Close closes datachannel - its web socket connection func (dataChannel *DataChannel) Close(log log.T) error { log.Infof("Closing datachannel with url %s", dataChannel.wsChannel.GetStreamUrl()) return dataChannel.wsChannel.Close(log) } // Reconnect calls ResumeSession API to reconnect datachannel when connection is lost func (dataChannel *DataChannel) Reconnect(log log.T) (err error) { if err = dataChannel.Close(log); err != nil { log.Debugf("Closing datachannel failed with error: %v", err) } if err = dataChannel.Open(log); err != nil { return fmt.Errorf("failed to reconnect data channel %s with error: %v", dataChannel.wsChannel.GetStreamUrl(), err) } log.Infof("Successfully reconnected to data channel: %s", dataChannel.wsChannel.GetStreamUrl()) return } // SendFlag sends a data message with PayloadType as given flag. func (dataChannel *DataChannel) SendFlag( log log.T, flagType message.PayloadTypeFlag) (err error) { flagBuf := new(bytes.Buffer) binary.Write(flagBuf, binary.BigEndian, flagType) return dataChannel.SendInputDataMessage(log, message.Flag, flagBuf.Bytes()) } // SendInputDataMessage sends a data message in a form of ClientMessage. func (dataChannel *DataChannel) SendInputDataMessage( log log.T, payloadType message.PayloadType, inputData []byte) (err error) { var ( flag uint64 = 0 msg []byte ) messageId := uuid.NewV4() // today 'enter' is taken as 'next line' in winpty shell. so hardcoding 'next line' byte to actual 'enter' byte if bytes.Equal(inputData, []byte{10}) { inputData = []byte{13} } // Encrypt if encryption is enabled and payload type is Output if dataChannel.encryptionEnabled && payloadType == message.Output { inputData, err = dataChannel.encryption.Encrypt(log, inputData) if err != nil { return err } } clientMessage := message.ClientMessage{ MessageType: message.InputStreamMessage, SchemaVersion: 1, CreatedDate: uint64(time.Now().UnixNano() / 1000000), Flags: flag, MessageId: messageId, PayloadType: uint32(payloadType), Payload: inputData, SequenceNumber: dataChannel.StreamDataSequenceNumber, } if msg, err = clientMessage.SerializeClientMessage(log); err != nil { log.Errorf("Cannot serialize StreamData message with error: %v", err) return } log.Tracef("Sending message with seq number: %d", dataChannel.StreamDataSequenceNumber) if err = SendMessageCall(log, dataChannel, msg, websocket.BinaryMessage); err != nil { log.Errorf("Error sending stream data message %v", err) return } streamingMessage := StreamingMessage{ msg, dataChannel.StreamDataSequenceNumber, time.Now(), new(int), } dataChannel.AddDataToOutgoingMessageBuffer(streamingMessage) dataChannel.StreamDataSequenceNumber = dataChannel.StreamDataSequenceNumber + 1 return } // ResendStreamDataMessageScheduler spawns a separate go thread which keeps checking OutgoingMessageBuffer at fixed interval // and resends first message if time elapsed since lastSentTime of the message is more than acknowledge wait time func (dataChannel *DataChannel) ResendStreamDataMessageScheduler(log log.T) (err error) { go func() { for { time.Sleep(config.ResendSleepInterval) dataChannel.OutgoingMessageBuffer.Mutex.Lock() streamMessageElement := dataChannel.OutgoingMessageBuffer.Messages.Front() dataChannel.OutgoingMessageBuffer.Mutex.Unlock() if streamMessageElement == nil { continue } streamMessage := streamMessageElement.Value.(StreamingMessage) if time.Since(streamMessage.LastSentTime) > dataChannel.RetransmissionTimeout { log.Debugf("Resend stream data message %d for the %d attempt.", streamMessage.SequenceNumber, *streamMessage.ResendAttempt) if *streamMessage.ResendAttempt >= config.ResendMaxAttempt { log.Warnf("Message %d was resent over %d times.", streamMessage.SequenceNumber, config.ResendMaxAttempt) dataChannel.isStreamMessageResendTimeout <- true } *streamMessage.ResendAttempt++ if err = SendMessageCall(log, dataChannel, streamMessage.Content, websocket.BinaryMessage); err != nil { log.Errorf("Unable to send stream data message: %s", err) } streamMessage.LastSentTime = time.Now() } } }() return } // ProcessAcknowledgedMessage processes acknowledge messages by deleting them from OutgoingMessageBuffer func (dataChannel *DataChannel) ProcessAcknowledgedMessage(log log.T, acknowledgeMessageContent message.AcknowledgeContent) error { acknowledgeSequenceNumber := acknowledgeMessageContent.SequenceNumber for streamMessageElement := dataChannel.OutgoingMessageBuffer.Messages.Front(); streamMessageElement != nil; streamMessageElement = streamMessageElement.Next() { streamMessage := streamMessageElement.Value.(StreamingMessage) if streamMessage.SequenceNumber == acknowledgeSequenceNumber { //Calculate retransmission timeout based on latest round trip time of message dataChannel.CalculateRetransmissionTimeout(log, streamMessage) dataChannel.RemoveDataFromOutgoingMessageBuffer(streamMessageElement) break } } return nil } // SendAcknowledgeMessage sends acknowledge message for stream data over data channel func (dataChannel *DataChannel) SendAcknowledgeMessage(log log.T, streamDataMessage message.ClientMessage) (err error) { dataStreamAcknowledgeContent := message.AcknowledgeContent{ MessageType: streamDataMessage.MessageType, MessageId: streamDataMessage.MessageId.String(), SequenceNumber: streamDataMessage.SequenceNumber, IsSequentialMessage: true, } var msg []byte if msg, err = message.SerializeClientMessageWithAcknowledgeContent(log, dataStreamAcknowledgeContent); err != nil { log.Errorf("Cannot serialize Acknowledge message err: %v", err) return } if err = SendMessageCall(log, dataChannel, msg, websocket.BinaryMessage); err != nil { log.Errorf("Error sending acknowledge message %v", err) return } return } // OutputMessageHandler gets output on the data channel func (dataChannel *DataChannel) OutputMessageHandler(log log.T, stopHandler Stop, sessionID string, rawMessage []byte) error { outputMessage := &message.ClientMessage{} err := outputMessage.DeserializeClientMessage(log, rawMessage) if err != nil { log.Errorf("Cannot deserialize raw message: %s, err: %v.", string(rawMessage), err) return err } if err = outputMessage.Validate(); err != nil { log.Errorf("Invalid outputMessage: %v, err: %v.", *outputMessage, err) return err } log.Tracef("Processing stream data message of type: %s", outputMessage.MessageType) switch outputMessage.MessageType { case message.OutputStreamMessage: return dataChannel.HandleOutputMessage(log, *outputMessage, rawMessage) case message.AcknowledgeMessage: return dataChannel.HandleAcknowledgeMessage(log, *outputMessage) case message.ChannelClosedMessage: dataChannel.HandleChannelClosedMessage(log, stopHandler, sessionID, *outputMessage) case message.StartPublicationMessage, message.PausePublicationMessage: return nil default: log.Warn("Invalid message type received: %s", outputMessage.MessageType) } return nil } // handleHandshakeRequest is the handler for payloads of type HandshakeRequest func (dataChannel *DataChannel) handleHandshakeRequest(log log.T, clientMessage message.ClientMessage) error { handshakeRequest, err := clientMessage.DeserializeHandshakeRequest(log) if err != nil { log.Errorf("Deserialize Handshake Request failed: %s", err) return err } dataChannel.agentVersion = handshakeRequest.AgentVersion var errorList []error var handshakeResponse message.HandshakeResponsePayload handshakeResponse.ClientVersion = version.Version handshakeResponse.ProcessedClientActions = []message.ProcessedClientAction{} for _, action := range handshakeRequest.RequestedClientActions { processedAction := message.ProcessedClientAction{} switch action.ActionType { case message.KMSEncryption: processedAction.ActionType = action.ActionType err := dataChannel.ProcessKMSEncryptionHandshakeAction(log, action.ActionParameters) if err != nil { processedAction.ActionStatus = message.Failed processedAction.Error = fmt.Sprintf("Failed to process action %s: %s", message.KMSEncryption, err) errorList = append(errorList, err) } else { processedAction.ActionStatus = message.Success processedAction.ActionResult = message.KMSEncryptionResponse{ KMSCipherTextKey: dataChannel.encryption.GetEncryptedDataKey(), } dataChannel.encryptionEnabled = true } case message.SessionType: processedAction.ActionType = action.ActionType err := dataChannel.ProcessSessionTypeHandshakeAction(action.ActionParameters) if err != nil { processedAction.ActionStatus = message.Failed processedAction.Error = fmt.Sprintf("Failed to process action %s: %s", message.SessionType, err) errorList = append(errorList, err) } else { processedAction.ActionStatus = message.Success } default: processedAction.ActionType = action.ActionType processedAction.ActionResult = message.Unsupported processedAction.Error = fmt.Sprintf("Unsupported action %s", action.ActionType) errorList = append(errorList, errors.New(processedAction.Error)) } handshakeResponse.ProcessedClientActions = append(handshakeResponse.ProcessedClientActions, processedAction) } for _, x := range errorList { handshakeResponse.Errors = append(handshakeResponse.Errors, x.Error()) } err = dataChannel.sendHandshakeResponse(log, handshakeResponse) return err } // handleHandshakeComplete is the handler for when the payload type is HandshakeComplete. This will trigger // the plugin to start. func (dataChannel *DataChannel) handleHandshakeComplete(log log.T, clientMessage message.ClientMessage) error { var err error var handshakeComplete message.HandshakeCompletePayload handshakeComplete, err = clientMessage.DeserializeHandshakeComplete(log) if err != nil { return err } // SessionType would be set when handshake request is received if dataChannel.sessionType != "" { dataChannel.isSessionTypeSet <- true } else { dataChannel.isSessionTypeSet <- false } log.Debugf("Handshake Complete. Handshake time to complete is: %s seconds", handshakeComplete.HandshakeTimeToComplete.Seconds()) if handshakeComplete.CustomerMessage != "" { fmt.Fprintln(os.Stdout, handshakeComplete.CustomerMessage) } return err } // handleEncryptionChallengeRequest receives EncryptionChallenge and responds. func (dataChannel *DataChannel) handleEncryptionChallengeRequest(log log.T, clientMessage message.ClientMessage) error { var err error var encChallengeReq message.EncryptionChallengeRequest err = json.Unmarshal(clientMessage.Payload, &encChallengeReq) if err != nil { return fmt.Errorf("Could not deserialize rawMessage, %s : %s", clientMessage.Payload, err) } challenge := encChallengeReq.Challenge challenge, err = dataChannel.encryption.Decrypt(log, challenge) if err != nil { return err } challenge, err = dataChannel.encryption.Encrypt(log, challenge) if err != nil { return err } encChallengeResp := message.EncryptionChallengeResponse{ Challenge: challenge, } err = dataChannel.sendEncryptionChallengeResponse(log, encChallengeResp) return err } // sendEncryptionChallengeResponse sends EncryptionChallengeResponse func (dataChannel *DataChannel) sendEncryptionChallengeResponse(log log.T, response message.EncryptionChallengeResponse) error { var resultBytes, err = json.Marshal(response) if err != nil { return fmt.Errorf("Could not serialize EncChallengeResponse message: %v, err: %s", response, err) } log.Tracef("Sending EncChallengeResponse message.") if err := dataChannel.SendInputDataMessage(log, message.EncChallengeResponse, resultBytes); err != nil { return err } return nil } // sendHandshakeResponse sends HandshakeResponse func (dataChannel *DataChannel) sendHandshakeResponse(log log.T, response message.HandshakeResponsePayload) error { var resultBytes, err = json.Marshal(response) if err != nil { log.Errorf("Could not serialize HandshakeResponse message: %v, err: %s", response, err) } log.Tracef("Sending HandshakeResponse message.") if err := dataChannel.SendInputDataMessage(log, message.HandshakeResponsePayloadType, resultBytes); err != nil { return err } return nil } // RegisterOutputStreamHandler register a handler for messages of type OutputStream. This is usually called by the plugin. func (dataChannel *DataChannel) RegisterOutputStreamHandler(handler OutputStreamDataMessageHandler, isSessionSpecificHandler bool) { dataChannel.isSessionSpecificHandlerSet = isSessionSpecificHandler dataChannel.outputStreamHandlers = append(dataChannel.outputStreamHandlers, handler) } // DeregisterOutputStreamHandler deregisters a handler previously registered using RegisterOutputStreamHandler func (dataChannel *DataChannel) DeregisterOutputStreamHandler(handler OutputStreamDataMessageHandler) { // Find and remove "handler" for i, v := range dataChannel.outputStreamHandlers { if reflect.ValueOf(v).Pointer() == reflect.ValueOf(handler).Pointer() { dataChannel.outputStreamHandlers = append(dataChannel.outputStreamHandlers[:i], dataChannel.outputStreamHandlers[i+1:]...) break } } } func (dataChannel *DataChannel) processOutputMessageWithHandlers(log log.T, message message.ClientMessage) (isHandlerReady bool, err error) { // Return false if sessionType is known but session specific handler is not set if dataChannel.sessionType != "" && !dataChannel.isSessionSpecificHandlerSet { return false, nil } for _, handler := range dataChannel.outputStreamHandlers { isHandlerReady, err = handler(log, message) // Break the processing of message and return if session specific handler is not ready if err != nil || !isHandlerReady { break } } return isHandlerReady, err } // handleOutputMessage handles incoming stream data message by processing the payload and updating expectedSequenceNumber func (dataChannel *DataChannel) HandleOutputMessage( log log.T, outputMessage message.ClientMessage, rawMessage []byte) (err error) { // On receiving expected stream data message, send acknowledgement, process it and increment expected sequence number by 1. // Further process messages from IncomingMessageBuffer if outputMessage.SequenceNumber == dataChannel.ExpectedSequenceNumber { switch message.PayloadType(outputMessage.PayloadType) { case message.HandshakeRequestPayloadType: { if err = SendAcknowledgeMessageCall(log, dataChannel, outputMessage); err != nil { return err } // PayloadType is HandshakeRequest so we call our own handler instead of the provided handler log.Debugf("Processing HandshakeRequest message %s", outputMessage) if err = dataChannel.handleHandshakeRequest(log, outputMessage); err != nil { log.Errorf("Unable to process incoming data payload, MessageType %s, "+ "PayloadType HandshakeRequestPayloadType, err: %s.", outputMessage.MessageType, err) return err } } case message.HandshakeCompletePayloadType: { if err = SendAcknowledgeMessageCall(log, dataChannel, outputMessage); err != nil { return err } if err = dataChannel.handleHandshakeComplete(log, outputMessage); err != nil { log.Errorf("Unable to process incoming data payload, MessageType %s, "+ "PayloadType HandshakeCompletePayloadType, err: %s.", outputMessage.MessageType, err) return err } } case message.EncChallengeRequest: { if err = SendAcknowledgeMessageCall(log, dataChannel, outputMessage); err != nil { return err } if err = dataChannel.handleEncryptionChallengeRequest(log, outputMessage); err != nil { log.Errorf("Unable to process incoming data payload, MessageType %s, "+ "PayloadType EncChallengeRequest, err: %s.", outputMessage.MessageType, err) return err } } default: log.Tracef("Process new incoming stream data message. Sequence Number: %d", outputMessage.SequenceNumber) // Decrypt if encryption is enabled and payload type is output if dataChannel.encryptionEnabled && (outputMessage.PayloadType == uint32(message.Output) || outputMessage.PayloadType == uint32(message.StdErr) || outputMessage.PayloadType == uint32(message.ExitCode)) { outputMessage.Payload, err = dataChannel.encryption.Decrypt(log, outputMessage.Payload) if err != nil { log.Errorf("Unable to decrypt incoming data payload, MessageType %s, "+ "PayloadType %d, err: %s.", outputMessage.MessageType, outputMessage.PayloadType, err) return err } } isHandlerReady, err := dataChannel.processOutputMessageWithHandlers(log, outputMessage) if err != nil { log.Error("Failed to process stream data message: %s", err.Error()) return err } if !isHandlerReady { log.Warnf("Stream data message with sequence number %d is not processed as session handler is not ready.", outputMessage.SequenceNumber) return nil } else { // Acknowledge outputMessage only if session specific handler is ready if err := SendAcknowledgeMessageCall(log, dataChannel, outputMessage); err != nil { return err } } } dataChannel.ExpectedSequenceNumber = dataChannel.ExpectedSequenceNumber + 1 return dataChannel.ProcessIncomingMessageBufferItems(log, outputMessage) } else { log.Debugf("Unexpected sequence message received. Received Sequence Number: %d. Expected Sequence Number: %d", outputMessage.SequenceNumber, dataChannel.ExpectedSequenceNumber) // If incoming message sequence number is greater then expected sequence number and IncomingMessageBuffer has capacity, // add message to IncomingMessageBuffer and send acknowledgement if outputMessage.SequenceNumber > dataChannel.ExpectedSequenceNumber { log.Debugf("Received Sequence Number %d is higher than Expected Sequence Number %d, adding to IncomingMessageBuffer", outputMessage.SequenceNumber, dataChannel.ExpectedSequenceNumber) if len(dataChannel.IncomingMessageBuffer.Messages) < dataChannel.IncomingMessageBuffer.Capacity { if err = SendAcknowledgeMessageCall(log, dataChannel, outputMessage); err != nil { return err } streamingMessage := StreamingMessage{ rawMessage, outputMessage.SequenceNumber, time.Now(), new(int), } //Add message to buffer for future processing dataChannel.AddDataToIncomingMessageBuffer(streamingMessage) } } } return nil } // processIncomingMessageBufferItems check if new expected sequence stream data is present in IncomingMessageBuffer. // If so process it and increment expected sequence number. // Repeat until expected sequence stream data is not found in IncomingMessageBuffer. func (dataChannel *DataChannel) ProcessIncomingMessageBufferItems(log log.T, outputMessage message.ClientMessage) (err error) { for { bufferedStreamMessage := dataChannel.IncomingMessageBuffer.Messages[dataChannel.ExpectedSequenceNumber] if bufferedStreamMessage.Content != nil { log.Debugf("Process stream data message from IncomingMessageBuffer. "+ "Sequence Number: %d", bufferedStreamMessage.SequenceNumber) if err := outputMessage.DeserializeClientMessage(log, bufferedStreamMessage.Content); err != nil { log.Errorf("Cannot deserialize raw message with err: %v.", err) return err } // Decrypt if encryption is enabled and payload type is output if dataChannel.encryptionEnabled && (outputMessage.PayloadType == uint32(message.Output) || outputMessage.PayloadType == uint32(message.StdErr) || outputMessage.PayloadType == uint32(message.ExitCode)) { outputMessage.Payload, err = dataChannel.encryption.Decrypt(log, outputMessage.Payload) if err != nil { log.Errorf("Unable to decrypt buffered message data payload, MessageType %s, "+ "PayloadType %d, err: %s.", outputMessage.MessageType, outputMessage.PayloadType, err) return err } } dataChannel.processOutputMessageWithHandlers(log, outputMessage) dataChannel.ExpectedSequenceNumber = dataChannel.ExpectedSequenceNumber + 1 dataChannel.RemoveDataFromIncomingMessageBuffer(bufferedStreamMessage.SequenceNumber) } else { break } } return } // handleAcknowledgeMessage deserialize acknowledge content and process it func (dataChannel *DataChannel) HandleAcknowledgeMessage( log log.T, outputMessage message.ClientMessage) (err error) { var acknowledgeMessage message.AcknowledgeContent if acknowledgeMessage, err = outputMessage.DeserializeDataStreamAcknowledgeContent(log); err != nil { log.Errorf("Cannot deserialize payload to AcknowledgeMessage with error: %v.", err) return err } err = ProcessAcknowledgedMessageCall(log, dataChannel, acknowledgeMessage) return err } // handleChannelClosedMessage exits the shell func (dataChannel DataChannel) HandleChannelClosedMessage(log log.T, stopHandler Stop, sessionId string, outputMessage message.ClientMessage) { var ( channelClosedMessage message.ChannelClosed err error ) if channelClosedMessage, err = outputMessage.DeserializeChannelClosedMessage(log); err != nil { log.Errorf("Cannot deserialize payload to ChannelClosedMessage: %v.", err) } log.Infof("Exiting session with sessionId: %s with output: %s", sessionId, channelClosedMessage.Output) if channelClosedMessage.Output == "" { fmt.Fprintf(os.Stdout, "\n\nExiting session with sessionId: %s.\n\n", sessionId) } else { fmt.Fprintf(os.Stdout, "\n\nSessionId: %s : %s\n\n", sessionId, channelClosedMessage.Output) } stopHandler() } // AddDataToOutgoingMessageBuffer removes first message from OutgoingMessageBuffer if capacity is full and adds given message at the end func (dataChannel *DataChannel) AddDataToOutgoingMessageBuffer(streamMessage StreamingMessage) { if dataChannel.OutgoingMessageBuffer.Messages.Len() == dataChannel.OutgoingMessageBuffer.Capacity { dataChannel.RemoveDataFromOutgoingMessageBuffer(dataChannel.OutgoingMessageBuffer.Messages.Front()) } dataChannel.OutgoingMessageBuffer.Mutex.Lock() dataChannel.OutgoingMessageBuffer.Messages.PushBack(streamMessage) dataChannel.OutgoingMessageBuffer.Mutex.Unlock() } // RemoveDataFromOutgoingMessageBuffer removes given element from OutgoingMessageBuffer func (dataChannel *DataChannel) RemoveDataFromOutgoingMessageBuffer(streamMessageElement *list.Element) { dataChannel.OutgoingMessageBuffer.Mutex.Lock() dataChannel.OutgoingMessageBuffer.Messages.Remove(streamMessageElement) dataChannel.OutgoingMessageBuffer.Mutex.Unlock() } // AddDataToIncomingMessageBuffer adds given message to IncomingMessageBuffer if it has capacity func (dataChannel *DataChannel) AddDataToIncomingMessageBuffer(streamMessage StreamingMessage) { if len(dataChannel.IncomingMessageBuffer.Messages) == dataChannel.IncomingMessageBuffer.Capacity { return } dataChannel.IncomingMessageBuffer.Mutex.Lock() dataChannel.IncomingMessageBuffer.Messages[streamMessage.SequenceNumber] = streamMessage dataChannel.IncomingMessageBuffer.Mutex.Unlock() } // RemoveDataFromIncomingMessageBuffer removes given sequence number message from IncomingMessageBuffer func (dataChannel *DataChannel) RemoveDataFromIncomingMessageBuffer(sequenceNumber int64) { dataChannel.IncomingMessageBuffer.Mutex.Lock() delete(dataChannel.IncomingMessageBuffer.Messages, sequenceNumber) dataChannel.IncomingMessageBuffer.Mutex.Unlock() } // CalculateRetransmissionTimeout calculates message retransmission timeout value based on round trip time on given message func (dataChannel *DataChannel) CalculateRetransmissionTimeout(log log.T, streamingMessage StreamingMessage) { newRoundTripTime := float64(GetRoundTripTime(streamingMessage)) dataChannel.RoundTripTimeVariation = ((1 - config.RTTVConstant) * dataChannel.RoundTripTimeVariation) + (config.RTTVConstant * math.Abs(dataChannel.RoundTripTime-newRoundTripTime)) dataChannel.RoundTripTime = ((1 - config.RTTConstant) * dataChannel.RoundTripTime) + (config.RTTConstant * newRoundTripTime) dataChannel.RetransmissionTimeout = time.Duration(dataChannel.RoundTripTime + math.Max(float64(config.ClockGranularity), float64(4*dataChannel.RoundTripTimeVariation))) // Ensure RetransmissionTimeout do not exceed maximum timeout defined if dataChannel.RetransmissionTimeout > config.MaxTransmissionTimeout { dataChannel.RetransmissionTimeout = config.MaxTransmissionTimeout } } // ProcessKMSEncryptionHandshakeAction sets up the encrypter and calls KMS to generate a new data key. This is triggered // when encryption is specified in HandshakeRequest func (dataChannel *DataChannel) ProcessKMSEncryptionHandshakeAction(log log.T, actionParams json.RawMessage) (err error) { if dataChannel.IsAwsCliUpgradeNeeded { return errors.New("Installed version of CLI does not support Session Manager encryption feature. Please upgrade to the latest version of your CLI (e.g., AWS CLI).") } kmsEncRequest := message.KMSEncryptionRequest{} json.Unmarshal(actionParams, &kmsEncRequest) log.Info(actionParams) kmsKeyId := kmsEncRequest.KMSKeyID kmsService, err := encryption.NewKMSService(log) if err != nil { return fmt.Errorf("error while creating new KMS service, %v", err) } encryptionContext := map[string]*string{"aws:ssm:SessionId": &dataChannel.SessionId, "aws:ssm:TargetId": &dataChannel.TargetId} dataChannel.encryption, err = newEncrypter(log, kmsKeyId, encryptionContext, kmsService) return } // ProcessSessionTypeHandshakeAction processes session type action in HandshakeRequest. This sets the session type in the datachannel. func (dataChannel *DataChannel) ProcessSessionTypeHandshakeAction(actionParams json.RawMessage) (err error) { sessTypeReq := message.SessionTypeRequest{} json.Unmarshal(actionParams, &sessTypeReq) switch sessTypeReq.SessionType { // This switch-case is just so that we can fail early if an unknown session type is passed in. case config.ShellPluginName, config.InteractiveCommandsPluginName, config.NonInteractiveCommandsPluginName: dataChannel.sessionType = config.ShellPluginName dataChannel.sessionProperties = sessTypeReq.Properties return nil case config.PortPluginName: dataChannel.sessionType = sessTypeReq.SessionType dataChannel.sessionProperties = sessTypeReq.Properties return nil default: return errors.New(fmt.Sprintf("Unknown session type %s", sessTypeReq.SessionType)) } } // IsSessionTypeSet check has data channel sessionType been set func (dataChannel *DataChannel) IsSessionTypeSet() chan bool { return dataChannel.isSessionTypeSet } // IsStreamMessageResendTimeout checks if resending a streaming message reaches timeout func (dataChannel *DataChannel) IsStreamMessageResendTimeout() chan bool { return dataChannel.isStreamMessageResendTimeout } // SetSessionType set session type func (dataChannel *DataChannel) SetSessionType(sessionType string) { dataChannel.sessionType = sessionType dataChannel.isSessionTypeSet <- true } // GetSessionType returns SessionType of the dataChannel func (dataChannel *DataChannel) GetSessionType() string { return dataChannel.sessionType } // GetSessionProperties returns SessionProperties of the dataChannel func (dataChannel *DataChannel) GetSessionProperties() interface{} { return dataChannel.sessionProperties } // GetWsChannel returns WsChannel of the dataChannel func (dataChannel *DataChannel) GetWsChannel() communicator.IWebSocketChannel { return dataChannel.wsChannel } // SetWsChannel set WsChannel of the dataChannel func (dataChannel *DataChannel) SetWsChannel(wsChannel communicator.IWebSocketChannel) { dataChannel.wsChannel = wsChannel } // GetStreamDataSequenceNumber returns StreamDataSequenceNumber of the dataChannel func (dataChannel *DataChannel) GetStreamDataSequenceNumber() int64 { return dataChannel.StreamDataSequenceNumber } // GetAgentVersion returns agent version of the target instance func (dataChannel *DataChannel) GetAgentVersion() string { return dataChannel.agentVersion } // SetAgentVersion set agent version of the target instance func (dataChannel *DataChannel) SetAgentVersion(agentVersion string) { dataChannel.agentVersion = agentVersion }
938
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // datachannel package implement data channel for interactive sessions. package datachannel import ( "encoding/json" "fmt" "reflect" "strconv" "strings" "sync" "testing" "time" "github.com/aws/aws-sdk-go/service/kms/kmsiface" communicatorMocks "github.com/aws/session-manager-plugin/src/communicator/mocks" "github.com/aws/session-manager-plugin/src/config" "github.com/aws/session-manager-plugin/src/encryption" "github.com/aws/session-manager-plugin/src/encryption/mocks" "github.com/aws/session-manager-plugin/src/log" "github.com/aws/session-manager-plugin/src/message" "github.com/aws/session-manager-plugin/src/version" "github.com/gorilla/websocket" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/twinj/uuid" ) var ( outputMessageType = message.OutputStreamMessage serializedClientMessages, streamingMessages = getClientAndStreamingMessageList(7) logger = log.NewMockLog() mockWsChannel = &communicatorMocks.IWebSocketChannel{} streamUrl = "stream-url" channelToken = "channel-token" sessionId = "session-id" clientId = "client-id" kmsKeyId = "some-key-id" instanceId = "some-instance-id" cipherTextKey = []byte("cipher-text-key") mockLogger = log.NewMockLog() messageType = message.OutputStreamMessage schemaVersion = uint32(1) messageId = "dd01e56b-ff48-483e-a508-b5f073f31b16" createdDate = uint64(1503434274948) payload = []byte("testPayload") streamDataSequenceNumber = int64(0) expectedSequenceNumber = int64(0) ) func TestInitialize(t *testing.T) { datachannel := DataChannel{} isAwsCliUpgradeNeeded := false datachannel.Initialize(mockLogger, clientId, sessionId, instanceId, isAwsCliUpgradeNeeded) assert.Equal(t, config.RolePublishSubscribe, datachannel.Role) assert.Equal(t, clientId, datachannel.ClientId) assert.True(t, datachannel.ExpectedSequenceNumber == 0) assert.True(t, datachannel.StreamDataSequenceNumber == 0) assert.NotNil(t, datachannel.OutgoingMessageBuffer) assert.NotNil(t, datachannel.IncomingMessageBuffer) assert.Equal(t, float64(config.DefaultRoundTripTime), datachannel.RoundTripTime) assert.Equal(t, float64(config.DefaultRoundTripTimeVariation), datachannel.RoundTripTimeVariation) assert.Equal(t, config.DefaultTransmissionTimeout, datachannel.RetransmissionTimeout) assert.NotNil(t, datachannel.wsChannel) } func TestSetWebsocket(t *testing.T) { datachannel := getDataChannel() mockWsChannel.On("GetStreamUrl").Return(streamUrl) mockWsChannel.On("GetChannelToken").Return(channelToken) mockWsChannel.On("Initialize", mock.Anything, mock.Anything, mock.Anything).Return(nil) datachannel.SetWebsocket(mockLogger, streamUrl, channelToken) assert.Equal(t, streamUrl, datachannel.wsChannel.GetStreamUrl()) assert.Equal(t, channelToken, datachannel.wsChannel.GetChannelToken()) mockWsChannel.AssertExpectations(t) } func TestReconnect(t *testing.T) { datachannel := getDataChannel() mockWsChannel.On("Close", mock.Anything).Return(nil) mockWsChannel.On("Open", mock.Anything).Return(nil) mockWsChannel.On("SendMessage", mock.Anything, mock.Anything, mock.Anything).Return(nil) // test reconnect err := datachannel.Reconnect(mockLogger) assert.Nil(t, err) mockWsChannel.AssertExpectations(t) } func TestOpen(t *testing.T) { datachannel := getDataChannel() mockWsChannel.On("Open", mock.Anything).Return(nil) err := datachannel.Open(mockLogger) assert.Nil(t, err) mockWsChannel.AssertExpectations(t) } func TestClose(t *testing.T) { datachannel := getDataChannel() mockWsChannel.On("Close", mock.Anything).Return(nil) // test close err := datachannel.Close(mockLogger) assert.Nil(t, err) mockWsChannel.AssertExpectations(t) } func TestFinalizeDataChannelHandshake(t *testing.T) { datachannel := getDataChannel() mockWsChannel.On("SendMessage", mock.Anything, mock.Anything, mock.Anything).Return(nil) mockWsChannel.On("GetStreamUrl").Return(streamUrl) err := datachannel.FinalizeDataChannelHandshake(mockLogger, channelToken) assert.Nil(t, err) assert.Equal(t, streamUrl, datachannel.wsChannel.GetStreamUrl()) mockWsChannel.AssertExpectations(t) } func TestSendMessage(t *testing.T) { datachannel := getDataChannel() mockWsChannel.On("SendMessage", mock.Anything, mock.Anything, mock.Anything).Return(nil) err := datachannel.SendMessage(mockLogger, []byte{10}, websocket.BinaryMessage) assert.Nil(t, err) mockWsChannel.AssertExpectations(t) } func TestSendInputDataMessage(t *testing.T) { dataChannel := getDataChannel() mockWsChannel.On("SendMessage", mock.Anything, mock.Anything, mock.Anything).Return(nil) dataChannel.SendInputDataMessage(mockLogger, message.Output, payload) assert.Equal(t, streamDataSequenceNumber+1, dataChannel.StreamDataSequenceNumber) assert.Equal(t, 1, dataChannel.OutgoingMessageBuffer.Messages.Len()) mockWsChannel.AssertExpectations(t) } func TestProcessAcknowledgedMessage(t *testing.T) { dataChannel := getDataChannel() dataChannel.AddDataToOutgoingMessageBuffer(streamingMessages[0]) dataStreamAcknowledgeContent := message.AcknowledgeContent{ MessageType: messageType, MessageId: messageId, SequenceNumber: 0, IsSequentialMessage: true, } dataChannel.ProcessAcknowledgedMessage(mockLogger, dataStreamAcknowledgeContent) assert.Equal(t, 0, dataChannel.OutgoingMessageBuffer.Messages.Len()) } func TestCalculateRetransmissionTimeout(t *testing.T) { dataChannel := getDataChannel() GetRoundTripTime = func(streamingMessage StreamingMessage) time.Duration { return time.Duration(140 * time.Millisecond) } dataChannel.CalculateRetransmissionTimeout(mockLogger, streamingMessages[0]) assert.Equal(t, int64(105), int64(time.Duration(dataChannel.RoundTripTime)/time.Millisecond)) assert.Equal(t, int64(10), int64(time.Duration(dataChannel.RoundTripTimeVariation)/time.Millisecond)) assert.Equal(t, int64(145), int64(dataChannel.RetransmissionTimeout/time.Millisecond)) } func TestAddDataToOutgoingMessageBuffer(t *testing.T) { dataChannel := getDataChannel() dataChannel.OutgoingMessageBuffer.Capacity = 2 dataChannel.AddDataToOutgoingMessageBuffer(streamingMessages[0]) assert.Equal(t, 1, dataChannel.OutgoingMessageBuffer.Messages.Len()) bufferedStreamMessage := dataChannel.OutgoingMessageBuffer.Messages.Front().Value.(StreamingMessage) assert.Equal(t, int64(0), bufferedStreamMessage.SequenceNumber) dataChannel.AddDataToOutgoingMessageBuffer(streamingMessages[1]) assert.Equal(t, 2, dataChannel.OutgoingMessageBuffer.Messages.Len()) bufferedStreamMessage = dataChannel.OutgoingMessageBuffer.Messages.Front().Value.(StreamingMessage) assert.Equal(t, int64(0), bufferedStreamMessage.SequenceNumber) bufferedStreamMessage = dataChannel.OutgoingMessageBuffer.Messages.Back().Value.(StreamingMessage) assert.Equal(t, int64(1), bufferedStreamMessage.SequenceNumber) dataChannel.AddDataToOutgoingMessageBuffer(streamingMessages[2]) assert.Equal(t, 2, dataChannel.OutgoingMessageBuffer.Messages.Len()) bufferedStreamMessage = dataChannel.OutgoingMessageBuffer.Messages.Front().Value.(StreamingMessage) assert.Equal(t, int64(1), bufferedStreamMessage.SequenceNumber) bufferedStreamMessage = dataChannel.OutgoingMessageBuffer.Messages.Back().Value.(StreamingMessage) assert.Equal(t, int64(2), bufferedStreamMessage.SequenceNumber) } func TestAddDataToIncomingMessageBuffer(t *testing.T) { dataChannel := getDataChannel() dataChannel.IncomingMessageBuffer.Capacity = 2 dataChannel.AddDataToIncomingMessageBuffer(streamingMessages[0]) assert.Equal(t, 1, len(dataChannel.IncomingMessageBuffer.Messages)) bufferedStreamMessage := dataChannel.IncomingMessageBuffer.Messages[0] assert.Equal(t, int64(0), bufferedStreamMessage.SequenceNumber) dataChannel.AddDataToIncomingMessageBuffer(streamingMessages[1]) assert.Equal(t, 2, len(dataChannel.IncomingMessageBuffer.Messages)) bufferedStreamMessage = dataChannel.IncomingMessageBuffer.Messages[0] assert.Equal(t, int64(0), bufferedStreamMessage.SequenceNumber) bufferedStreamMessage = dataChannel.IncomingMessageBuffer.Messages[1] assert.Equal(t, int64(1), bufferedStreamMessage.SequenceNumber) dataChannel.AddDataToIncomingMessageBuffer(streamingMessages[2]) assert.Equal(t, 2, len(dataChannel.IncomingMessageBuffer.Messages)) bufferedStreamMessage = dataChannel.IncomingMessageBuffer.Messages[0] assert.Equal(t, int64(0), bufferedStreamMessage.SequenceNumber) bufferedStreamMessage = dataChannel.IncomingMessageBuffer.Messages[1] assert.Equal(t, int64(1), bufferedStreamMessage.SequenceNumber) bufferedStreamMessage = dataChannel.IncomingMessageBuffer.Messages[2] assert.Nil(t, bufferedStreamMessage.Content) } func TestRemoveDataFromOutgoingMessageBuffer(t *testing.T) { dataChannel := getDataChannel() for i := 0; i < 3; i++ { dataChannel.AddDataToOutgoingMessageBuffer(streamingMessages[i]) } dataChannel.RemoveDataFromOutgoingMessageBuffer(dataChannel.OutgoingMessageBuffer.Messages.Front()) assert.Equal(t, 2, dataChannel.OutgoingMessageBuffer.Messages.Len()) } func TestRemoveDataFromIncomingMessageBuffer(t *testing.T) { dataChannel := getDataChannel() for i := 0; i < 3; i++ { dataChannel.AddDataToIncomingMessageBuffer(streamingMessages[i]) } dataChannel.RemoveDataFromIncomingMessageBuffer(0) assert.Equal(t, 2, len(dataChannel.IncomingMessageBuffer.Messages)) } func TestResendStreamDataMessageScheduler(t *testing.T) { dataChannel := getDataChannel() mockChannel := &communicatorMocks.IWebSocketChannel{} dataChannel.wsChannel = mockChannel for i := 0; i < 3; i++ { dataChannel.AddDataToOutgoingMessageBuffer(streamingMessages[i]) } var wg sync.WaitGroup wg.Add(1) // Spawning a separate go routine to close websocket connection. // This is required as ResendStreamDataMessageScheduler has a for loop which will continuosly resend data until channel is closed. go func() { time.Sleep(1 * time.Second) wg.Done() }() SendMessageCallCount := 0 SendMessageCall = func(log log.T, dataChannel *DataChannel, input []byte, inputType int) error { SendMessageCallCount++ return nil } dataChannel.ResendStreamDataMessageScheduler(mockLogger) wg.Wait() assert.True(t, SendMessageCallCount > 1) } func TestDataChannelIncomingMessageHandlerForExpectedInputStreamDataMessage(t *testing.T) { dataChannel := getDataChannel() mockChannel := &communicatorMocks.IWebSocketChannel{} dataChannel.wsChannel = mockChannel SendAcknowledgeMessageCallCount := 0 SendAcknowledgeMessageCall = func(log log.T, dataChannel *DataChannel, streamDataMessage message.ClientMessage) error { SendAcknowledgeMessageCallCount++ return nil } var handler OutputStreamDataMessageHandler = func(log log.T, outputMessage message.ClientMessage) (bool, error) { return true, nil } var stopHandler Stop dataChannel.RegisterOutputStreamHandler(handler, true) // First scenario is to test when incoming message sequence number matches with expected sequence number // and no message found in IncomingMessageBuffer err := dataChannel.OutputMessageHandler(logger, stopHandler, sessionId, serializedClientMessages[0]) assert.Nil(t, err) assert.Equal(t, int64(1), dataChannel.ExpectedSequenceNumber) assert.Equal(t, 0, len(dataChannel.IncomingMessageBuffer.Messages)) assert.Equal(t, 1, SendAcknowledgeMessageCallCount) // Second scenario is to test when incoming message sequence number matches with expected sequence number // and there are more messages found in IncomingMessageBuffer to be processed dataChannel.AddDataToIncomingMessageBuffer(streamingMessages[2]) dataChannel.AddDataToIncomingMessageBuffer(streamingMessages[6]) dataChannel.AddDataToIncomingMessageBuffer(streamingMessages[4]) dataChannel.AddDataToIncomingMessageBuffer(streamingMessages[3]) err = dataChannel.OutputMessageHandler(logger, stopHandler, sessionId, serializedClientMessages[1]) assert.Nil(t, err) assert.Equal(t, int64(5), dataChannel.ExpectedSequenceNumber) assert.Equal(t, 1, len(dataChannel.IncomingMessageBuffer.Messages)) // All messages from buffer should get processed except sequence number 6 as expected number to be processed at this time is 5 bufferedStreamMessage := dataChannel.IncomingMessageBuffer.Messages[6] assert.Equal(t, int64(6), bufferedStreamMessage.SequenceNumber) } func TestDataChannelIncomingMessageHandlerForUnexpectedInputStreamDataMessage(t *testing.T) { dataChannel := getDataChannel() mockChannel := &communicatorMocks.IWebSocketChannel{} dataChannel.wsChannel = mockChannel dataChannel.IncomingMessageBuffer.Capacity = 2 SendAcknowledgeMessageCallCount := 0 SendAcknowledgeMessageCall = func(log log.T, dataChannel *DataChannel, streamDataMessage message.ClientMessage) error { SendAcknowledgeMessageCallCount++ return nil } var stopHandler Stop err := dataChannel.OutputMessageHandler(logger, stopHandler, sessionId, serializedClientMessages[1]) assert.Nil(t, err) err = dataChannel.OutputMessageHandler(logger, stopHandler, sessionId, serializedClientMessages[2]) assert.Nil(t, err) err = dataChannel.OutputMessageHandler(logger, stopHandler, sessionId, serializedClientMessages[3]) assert.Nil(t, err) // Since capacity of IncomingMessageBuffer is 2, stream data with sequence number 3 should be ignored without sending acknowledgement assert.Equal(t, expectedSequenceNumber, dataChannel.ExpectedSequenceNumber) assert.Equal(t, 2, len(dataChannel.IncomingMessageBuffer.Messages)) assert.Equal(t, 2, SendAcknowledgeMessageCallCount) bufferedStreamMessage := dataChannel.IncomingMessageBuffer.Messages[1] assert.Equal(t, int64(1), bufferedStreamMessage.SequenceNumber) bufferedStreamMessage = dataChannel.IncomingMessageBuffer.Messages[2] assert.Equal(t, int64(2), bufferedStreamMessage.SequenceNumber) bufferedStreamMessage = dataChannel.IncomingMessageBuffer.Messages[3] assert.Nil(t, bufferedStreamMessage.Content) } func TestDataChannelIncomingMessageHandlerForAcknowledgeMessage(t *testing.T) { dataChannel := getDataChannel() mockChannel := &communicatorMocks.IWebSocketChannel{} dataChannel.wsChannel = mockChannel var stopHandler Stop for i := 0; i < 3; i++ { dataChannel.AddDataToOutgoingMessageBuffer(streamingMessages[i]) } ProcessAcknowledgedMessageCallCount := 0 ProcessAcknowledgedMessageCall = func(log log.T, dataChannel *DataChannel, acknowledgeMessage message.AcknowledgeContent) error { ProcessAcknowledgedMessageCallCount++ return nil } acknowledgeContent := message.AcknowledgeContent{ MessageType: outputMessageType, MessageId: messageId, SequenceNumber: 1, IsSequentialMessage: true, } payload, _ = json.Marshal(acknowledgeContent) clientMessage := getClientMessage(0, message.AcknowledgeMessage, uint32(message.Output), payload) serializedClientMessage, _ := clientMessage.SerializeClientMessage(logger) err := dataChannel.OutputMessageHandler(logger, stopHandler, sessionId, serializedClientMessage) assert.Nil(t, err) assert.Equal(t, 1, ProcessAcknowledgedMessageCallCount) assert.Equal(t, 3, dataChannel.OutgoingMessageBuffer.Messages.Len()) } func TestDataChannelIncomingMessageHandlerForPausePublicationessage(t *testing.T) { dataChannel := getDataChannel() mockChannel := &communicatorMocks.IWebSocketChannel{} dataChannel.wsChannel = mockChannel size := 5 streamingMessages = make([]StreamingMessage, size) serializedClientMessage := make([][]byte, size) for i := 0; i < size; i++ { clientMessage := getClientMessage(int64(i), message.PausePublicationMessage, uint32(message.Output), []byte("")) serializedClientMessage[i], _ = clientMessage.SerializeClientMessage(mockLogger) streamingMessages[i] = StreamingMessage{ serializedClientMessage[i], int64(i), time.Now(), new(int), } } var handler OutputStreamDataMessageHandler = func(log log.T, outputMessage message.ClientMessage) (bool, error) { return true, nil } var stopHandler Stop dataChannel.RegisterOutputStreamHandler(handler, true) err := dataChannel.OutputMessageHandler(logger, stopHandler, sessionId, serializedClientMessages[0]) assert.Nil(t, err) } func TestHandshakeRequestHandler(t *testing.T) { dataChannel := getDataChannel() mockChannel := &communicatorMocks.IWebSocketChannel{} dataChannel.wsChannel = mockChannel mockEncrypter := &mocks.IEncrypter{} handshakeRequestBytes, _ := json.Marshal(buildHandshakeRequest()) clientMessage := getClientMessage(0, message.OutputStreamMessage, uint32(message.HandshakeRequestPayloadType), handshakeRequestBytes) handshakeRequestMessageBytes, _ := clientMessage.SerializeClientMessage(mockLogger) newEncrypter = func(log log.T, kmsKeyIdInput string, context map[string]*string, KMSService kmsiface.KMSAPI) (encryption.IEncrypter, error) { expectedContext := map[string]*string{"aws:ssm:SessionId": &sessionId, "aws:ssm:TargetId": &instanceId} assert.Equal(t, kmsKeyId, kmsKeyIdInput) assert.Equal(t, expectedContext, context) mockEncrypter.On("GetEncryptedDataKey").Return(cipherTextKey) return mockEncrypter, nil } // Mock sending of encryption challenge handshakeResponseMatcher := func(sentData []byte) bool { clientMessage := &message.ClientMessage{} clientMessage.DeserializeClientMessage(mockLogger, sentData) var handshakeResponse = message.HandshakeResponsePayload{} json.Unmarshal(clientMessage.Payload, &handshakeResponse) // Return true if any other message type (typically to account for acknowledge) if clientMessage.MessageType != message.OutputStreamMessage { return true } expectedActions := []message.ProcessedClientAction{} processedAction := message.ProcessedClientAction{} processedAction.ActionType = message.KMSEncryption processedAction.ActionStatus = message.Success processedAction.ActionResult = message.KMSEncryptionResponse{ KMSCipherTextKey: cipherTextKey, } expectedActions = append(expectedActions, processedAction) processedAction = message.ProcessedClientAction{} processedAction.ActionType = message.SessionType processedAction.ActionStatus = message.Success expectedActions = append(expectedActions, processedAction) return handshakeResponse.ClientVersion == version.Version && reflect.DeepEqual(handshakeResponse.ProcessedClientActions, expectedActions) } mockChannel.On("SendMessage", mock.Anything, mock.MatchedBy(handshakeResponseMatcher), mock.Anything).Return(nil) dataChannel.OutputMessageHandler(mockLogger, func() {}, sessionId, handshakeRequestMessageBytes) assert.Equal(t, mockEncrypter, dataChannel.encryption) } func TestHandleOutputMessageForDefaultTypeWithError(t *testing.T) { dataChannel := getDataChannel() mockChannel := &communicatorMocks.IWebSocketChannel{} dataChannel.wsChannel = mockChannel clientMessage := getClientMessage(0, message.OutputStreamMessage, uint32(message.Output), payload) rawMessage := []byte("rawMessage") var handler OutputStreamDataMessageHandler = func(log log.T, outputMessage message.ClientMessage) (bool, error) { return true, log.Errorf("OutputStreamDataMessageHandler Error") } dataChannel.RegisterOutputStreamHandler(handler, true) err := dataChannel.HandleOutputMessage(mockLogger, clientMessage, rawMessage) assert.NotNil(t, err) } func TestHandleOutputMessageForExitCodePayloadTypeWithError(t *testing.T) { dataChannel := getDataChannel() mockChannel := &communicatorMocks.IWebSocketChannel{} dataChannel.wsChannel = mockChannel clientMessage := getClientMessage(0, message.OutputStreamMessage, uint32(message.ExitCode), payload) dataChannel.encryptionEnabled = true mockEncrypter := &mocks.IEncrypter{} dataChannel.encryption = mockEncrypter mockErr := fmt.Errorf("Decrypt Error") mockEncrypter.On("Decrypt", mock.Anything, mock.Anything).Return([]byte{10, 11, 12}, mockErr) rawMessage := []byte("rawMessage") err := dataChannel.HandleOutputMessage(mockLogger, clientMessage, rawMessage) assert.Equal(t, mockErr, err) } func TestHandleHandshakeRequestWithMessageDeserializeError(t *testing.T) { dataChannel := getDataChannel() handshakeRequestBytes, _ := json.Marshal(buildHandshakeRequest()) //Using HandshakeCompletePayloadType to trigger the type check error clientMessage := getClientMessage(0, message.OutputStreamMessage, uint32(message.HandshakeCompletePayloadType), handshakeRequestBytes) err := dataChannel.handleHandshakeRequest(mockLogger, clientMessage) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), "ClientMessage PayloadType is not of type HandshakeRequestPayloadType")) } func TestProcessOutputMessageWithHandlers(t *testing.T) { dataChannel := getDataChannel() mockChannel := &communicatorMocks.IWebSocketChannel{} dataChannel.wsChannel = mockChannel var handler OutputStreamDataMessageHandler = func(log log.T, outputMessage message.ClientMessage) (bool, error) { return true, log.Errorf("OutputStreamDataMessageHandler Error") } dataChannel.RegisterOutputStreamHandler(handler, true) handshakeRequestBytes, _ := json.Marshal(buildHandshakeRequest()) clientMessage := getClientMessage(0, message.OutputStreamMessage, uint32(message.HandshakeCompletePayloadType), handshakeRequestBytes) isHandlerReady, err := dataChannel.processOutputMessageWithHandlers(mockLogger, clientMessage) assert.NotNil(t, err) assert.Equal(t, isHandlerReady, true) } func TestProcessSessionTypeHandshakeActionForInteractiveCommands(t *testing.T) { actionParams := []byte("{\"SessionType\":\"InteractiveCommands\"}") dataChannel := getDataChannel() err := dataChannel.ProcessSessionTypeHandshakeAction(actionParams) // Test that InteractiveCommands is a valid session type assert.Nil(t, err) // Test that InteractiveCommands is translated to Standard_Stream in data channel assert.Equal(t, config.ShellPluginName, dataChannel.sessionType) } func TestProcessSessionTypeHandshakeActionForNonInteractiveCommands(t *testing.T) { actionParams := []byte("{\"SessionType\":\"NonInteractiveCommands\"}") dataChannel := getDataChannel() err := dataChannel.ProcessSessionTypeHandshakeAction(actionParams) // Test that NonInteractiveCommands is a valid session type assert.Nil(t, err) // Test that NonInteractiveCommands is translated to Standard_Stream in data channel assert.Equal(t, config.ShellPluginName, dataChannel.sessionType) } func buildHandshakeRequest() message.HandshakeRequestPayload { handshakeRquest := message.HandshakeRequestPayload{} handshakeRquest.AgentVersion = "10.0.0.1" handshakeRquest.RequestedClientActions = []message.RequestedClientAction{} requestedAction := message.RequestedClientAction{} requestedAction.ActionType = message.KMSEncryption requestedAction.ActionParameters, _ = json.Marshal(message.KMSEncryptionRequest{KMSKeyID: kmsKeyId}) handshakeRquest.RequestedClientActions = append(handshakeRquest.RequestedClientActions, requestedAction) requestedAction = message.RequestedClientAction{} requestedAction.ActionType = message.SessionType requestedAction.ActionParameters, _ = json.Marshal(message.SessionTypeRequest{SessionType: config.ShellPluginName}) handshakeRquest.RequestedClientActions = append(handshakeRquest.RequestedClientActions, requestedAction) return handshakeRquest } func getDataChannel() *DataChannel { dataChannel := &DataChannel{} dataChannel.Initialize(mockLogger, clientId, sessionId, instanceId, false) dataChannel.wsChannel = mockWsChannel return dataChannel } // GetClientMessage constructs and returns ClientMessage with given sequenceNumber, messageType & payload func getClientMessage(sequenceNumber int64, messageType string, payloadType uint32, payload []byte) message.ClientMessage { messageUUID, _ := uuid.Parse(messageId) clientMessage := message.ClientMessage{ MessageType: messageType, SchemaVersion: schemaVersion, CreatedDate: createdDate, SequenceNumber: sequenceNumber, Flags: 2, MessageId: messageUUID, PayloadType: payloadType, Payload: payload, } return clientMessage } func getClientAndStreamingMessageList(size int) (serializedClientMessage [][]byte, streamingMessages []StreamingMessage) { var payload string streamingMessages = make([]StreamingMessage, size) serializedClientMessage = make([][]byte, size) for i := 0; i < size; i++ { payload = "testPayload" + strconv.Itoa(i) clientMessage := getClientMessage(int64(i), messageType, uint32(message.Output), []byte(payload)) serializedClientMessage[i], _ = clientMessage.SerializeClientMessage(mockLogger) streamingMessages[i] = StreamingMessage{ serializedClientMessage[i], int64(i), time.Now(), new(int), } } return }
626
session-manager-plugin
aws
Go
// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. // Code generated by mockery 2.7.4. DO NOT EDIT. package mocks import ( list "container/list" communicator "github.com/aws/session-manager-plugin/src/communicator" datachannel "github.com/aws/session-manager-plugin/src/datachannel" log "github.com/aws/session-manager-plugin/src/log" message "github.com/aws/session-manager-plugin/src/message" mock "github.com/stretchr/testify/mock" ) // IDataChannel is an autogenerated mock type for the IDataChannel type type IDataChannel struct { mock.Mock } // AddDataToIncomingMessageBuffer provides a mock function with given fields: streamMessage func (_m *IDataChannel) AddDataToIncomingMessageBuffer(streamMessage datachannel.StreamingMessage) { _m.Called(streamMessage) } // AddDataToOutgoingMessageBuffer provides a mock function with given fields: streamMessage func (_m *IDataChannel) AddDataToOutgoingMessageBuffer(streamMessage datachannel.StreamingMessage) { _m.Called(streamMessage) } // CalculateRetransmissionTimeout provides a mock function with given fields: _a0, streamingMessage func (_m *IDataChannel) CalculateRetransmissionTimeout(_a0 log.T, streamingMessage datachannel.StreamingMessage) { _m.Called(_a0, streamingMessage) } // Close provides a mock function with given fields: _a0 func (_m *IDataChannel) Close(_a0 log.T) error { ret := _m.Called(_a0) var r0 error if rf, ok := ret.Get(0).(func(log.T) error); ok { r0 = rf(_a0) } else { r0 = ret.Error(0) } return r0 } // DeregisterOutputStreamHandler provides a mock function with given fields: handler func (_m *IDataChannel) DeregisterOutputStreamHandler(handler datachannel.OutputStreamDataMessageHandler) { _m.Called(handler) } // FinalizeDataChannelHandshake provides a mock function with given fields: _a0, tokenValue func (_m *IDataChannel) FinalizeDataChannelHandshake(_a0 log.T, tokenValue string) error { ret := _m.Called(_a0, tokenValue) var r0 error if rf, ok := ret.Get(0).(func(log.T, string) error); ok { r0 = rf(_a0, tokenValue) } else { r0 = ret.Error(0) } return r0 } // GetAgentVersion provides a mock function with given fields: func (_m *IDataChannel) GetAgentVersion() string { ret := _m.Called() var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() } else { r0 = ret.Get(0).(string) } return r0 } // GetSessionProperties provides a mock function with given fields: func (_m *IDataChannel) GetSessionProperties() interface{} { ret := _m.Called() var r0 interface{} if rf, ok := ret.Get(0).(func() interface{}); ok { r0 = rf() } else { if ret.Get(0) != nil { r0 = ret.Get(0).(interface{}) } } return r0 } // GetSessionType provides a mock function with given fields: func (_m *IDataChannel) GetSessionType() string { ret := _m.Called() var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() } else { r0 = ret.Get(0).(string) } return r0 } // GetStreamDataSequenceNumber provides a mock function with given fields: func (_m *IDataChannel) GetStreamDataSequenceNumber() int64 { ret := _m.Called() var r0 int64 if rf, ok := ret.Get(0).(func() int64); ok { r0 = rf() } else { r0 = ret.Get(0).(int64) } return r0 } // GetWsChannel provides a mock function with given fields: func (_m *IDataChannel) GetWsChannel() communicator.IWebSocketChannel { ret := _m.Called() var r0 communicator.IWebSocketChannel if rf, ok := ret.Get(0).(func() communicator.IWebSocketChannel); ok { r0 = rf() } else { if ret.Get(0) != nil { r0 = ret.Get(0).(communicator.IWebSocketChannel) } } return r0 } // Initialize provides a mock function with given fields: _a0, clientId, sessionId, targetId, isAwsCliUpgradeNeeded func (_m *IDataChannel) Initialize(_a0 log.T, clientId string, sessionId string, targetId string, isAwsCliUpgradeNeeded bool) { _m.Called(_a0, clientId, sessionId, targetId, isAwsCliUpgradeNeeded) } // IsSessionTypeSet provides a mock function with given fields: func (_m *IDataChannel) IsSessionTypeSet() chan bool { ret := _m.Called() var r0 chan bool if rf, ok := ret.Get(0).(func() chan bool); ok { r0 = rf() } else { if ret.Get(0) != nil { r0 = ret.Get(0).(chan bool) } } return r0 } // IsStreamMessageResendTimeout checks if resending a streaming message reaches timeout func (_m *IDataChannel) IsStreamMessageResendTimeout() chan bool { ret := _m.Called() var r0 chan bool if rf, ok := ret.Get(0).(func() chan bool); ok { r0 = rf() } else { if ret.Get(0) != nil { r0 = ret.Get(0).(chan bool) } } return r0 } // Open provides a mock function with given fields: _a0 func (_m *IDataChannel) Open(_a0 log.T) error { ret := _m.Called(_a0) var r0 error if rf, ok := ret.Get(0).(func(log.T) error); ok { r0 = rf(_a0) } else { r0 = ret.Error(0) } return r0 } // OutputMessageHandler provides a mock function with given fields: _a0, stopHandler, sessionID, rawMessage func (_m *IDataChannel) OutputMessageHandler(_a0 log.T, stopHandler datachannel.Stop, sessionID string, rawMessage []byte) error { ret := _m.Called(_a0, stopHandler, sessionID, rawMessage) var r0 error if rf, ok := ret.Get(0).(func(log.T, datachannel.Stop, string, []byte) error); ok { r0 = rf(_a0, stopHandler, sessionID, rawMessage) } else { r0 = ret.Error(0) } return r0 } // ProcessAcknowledgedMessage provides a mock function with given fields: _a0, acknowledgeMessageContent func (_m *IDataChannel) ProcessAcknowledgedMessage(_a0 log.T, acknowledgeMessageContent message.AcknowledgeContent) error { ret := _m.Called(_a0, acknowledgeMessageContent) var r0 error if rf, ok := ret.Get(0).(func(log.T, message.AcknowledgeContent) error); ok { r0 = rf(_a0, acknowledgeMessageContent) } else { r0 = ret.Error(0) } return r0 } // Reconnect provides a mock function with given fields: _a0 func (_m *IDataChannel) Reconnect(_a0 log.T) error { ret := _m.Called(_a0) var r0 error if rf, ok := ret.Get(0).(func(log.T) error); ok { r0 = rf(_a0) } else { r0 = ret.Error(0) } return r0 } // RegisterOutputStreamHandler provides a mock function with given fields: handler, isSessionSpecificHandler func (_m *IDataChannel) RegisterOutputStreamHandler(handler datachannel.OutputStreamDataMessageHandler, isSessionSpecificHandler bool) { _m.Called(handler, isSessionSpecificHandler) } // RemoveDataFromIncomingMessageBuffer provides a mock function with given fields: sequenceNumber func (_m *IDataChannel) RemoveDataFromIncomingMessageBuffer(sequenceNumber int64) { _m.Called(sequenceNumber) } // RemoveDataFromOutgoingMessageBuffer provides a mock function with given fields: streamMessageElement func (_m *IDataChannel) RemoveDataFromOutgoingMessageBuffer(streamMessageElement *list.Element) { _m.Called(streamMessageElement) } // ResendStreamDataMessageScheduler provides a mock function with given fields: _a0 func (_m *IDataChannel) ResendStreamDataMessageScheduler(_a0 log.T) error { ret := _m.Called(_a0) var r0 error if rf, ok := ret.Get(0).(func(log.T) error); ok { r0 = rf(_a0) } else { r0 = ret.Error(0) } return r0 } // SendAcknowledgeMessage provides a mock function with given fields: _a0, clientMessage func (_m *IDataChannel) SendAcknowledgeMessage(_a0 log.T, clientMessage message.ClientMessage) error { ret := _m.Called(_a0, clientMessage) var r0 error if rf, ok := ret.Get(0).(func(log.T, message.ClientMessage) error); ok { r0 = rf(_a0, clientMessage) } else { r0 = ret.Error(0) } return r0 } // SendFlag provides a mock function with given fields: _a0, flagType func (_m *IDataChannel) SendFlag(_a0 log.T, flagType message.PayloadTypeFlag) error { ret := _m.Called(_a0, flagType) var r0 error if rf, ok := ret.Get(0).(func(log.T, message.PayloadTypeFlag) error); ok { r0 = rf(_a0, flagType) } else { r0 = ret.Error(0) } return r0 } // SendInputDataMessage provides a mock function with given fields: _a0, payloadType, inputData func (_m *IDataChannel) SendInputDataMessage(_a0 log.T, payloadType message.PayloadType, inputData []byte) error { ret := _m.Called(_a0, payloadType, inputData) var r0 error if rf, ok := ret.Get(0).(func(log.T, message.PayloadType, []byte) error); ok { r0 = rf(_a0, payloadType, inputData) } else { r0 = ret.Error(0) } return r0 } // SendMessage provides a mock function with given fields: _a0, input, inputType func (_m *IDataChannel) SendMessage(_a0 log.T, input []byte, inputType int) error { ret := _m.Called(_a0, input, inputType) var r0 error if rf, ok := ret.Get(0).(func(log.T, []byte, int) error); ok { r0 = rf(_a0, input, inputType) } else { r0 = ret.Error(0) } return r0 } // SetAgentVersion provides a mock function with given fields: agentVersion func (_m *IDataChannel) SetAgentVersion(agentVersion string) { _m.Called(agentVersion) } // SetSessionType provides a mock function with given fields: sessionType func (_m *IDataChannel) SetSessionType(sessionType string) { _m.Called(sessionType) } // SetWebsocket provides a mock function with given fields: _a0, streamUrl, tokenValue func (_m *IDataChannel) SetWebsocket(_a0 log.T, streamUrl string, tokenValue string) { _m.Called(_a0, streamUrl, tokenValue) } // SetWsChannel provides a mock function with given fields: wsChannel func (_m *IDataChannel) SetWsChannel(wsChannel communicator.IWebSocketChannel) { _m.Called(wsChannel) }
340
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. package encryption import ( "crypto/aes" "crypto/cipher" "crypto/rand" "fmt" "io" "github.com/aws/aws-sdk-go/service/kms/kmsiface" "github.com/aws/session-manager-plugin/src/log" ) const ( nonceSize = 12 ) type KMSKeyProvider interface { GenerateDataKey() } type IEncrypter interface { Encrypt(log log.T, plainText []byte) (cipherText []byte, err error) Decrypt(log log.T, cipherText []byte) (plainText []byte, err error) GetEncryptedDataKey() (ciptherTextBlob []byte) } type Encrypter struct { KMSService kmsiface.KMSAPI kmsKeyId string cipherTextKey []byte encryptionKey []byte decryptionKey []byte } var NewEncrypter = func(log log.T, kmsKeyId string, context map[string]*string, KMSService kmsiface.KMSAPI) (*Encrypter, error) { encrypter := Encrypter{kmsKeyId: kmsKeyId, KMSService: KMSService} err := encrypter.generateEncryptionKey(log, kmsKeyId, context) return &encrypter, err } // generateEncryptionKey calls KMS to generate a new encryption key func (encrypter *Encrypter) generateEncryptionKey(log log.T, kmsKeyId string, context map[string]*string) error { cipherTextKey, plainTextKey, err := KMSGenerateDataKey(kmsKeyId, encrypter.KMSService, context) if err != nil { log.Errorf("Error generating data key from KMS: %s,", err) return err } keySize := len(plainTextKey) / 2 encrypter.decryptionKey = plainTextKey[:keySize] encrypter.encryptionKey = plainTextKey[keySize:] encrypter.cipherTextKey = cipherTextKey return nil } // GetEncryptedDataKey returns the cipherText that was pulled from KMS func (encrypter *Encrypter) GetEncryptedDataKey() (ciptherTextBlob []byte) { return encrypter.cipherTextKey } // GetKMSKeyId gets the KMS key id that is used to generate the encryption key func (encrypter *Encrypter) GetKMSKeyId() (kmsKey string) { return encrypter.kmsKeyId } // getAEAD gets AEAD which is a GCM cipher mode providing authenticated encryption with associated data func getAEAD(plainTextKey []byte) (aesgcm cipher.AEAD, err error) { var block cipher.Block if block, err = aes.NewCipher(plainTextKey); err != nil { return nil, fmt.Errorf("error creating NewCipher, %v", err) } if aesgcm, err = cipher.NewGCM(block); err != nil { return nil, fmt.Errorf("error creating NewGCM, %v", err) } return aesgcm, nil } // Encrypt encrypts a byte slice and returns the encrypted slice func (encrypter *Encrypter) Encrypt(log log.T, plainText []byte) (cipherText []byte, err error) { var aesgcm cipher.AEAD if aesgcm, err = getAEAD(encrypter.encryptionKey); err != nil { err = fmt.Errorf("%v", err) return } cipherText = make([]byte, nonceSize+len(plainText)) nonce := make([]byte, nonceSize) if _, err = io.ReadFull(rand.Reader, nonce); err != nil { err = fmt.Errorf("error when generating nonce for encryption, %v", err) return } // Encrypt plain text using given key and newly generated nonce cipherTextWithoutNonce := aesgcm.Seal(nil, nonce, plainText, nil) // Append nonce to the beginning of the cipher text to be used while decrypting cipherText = append(cipherText[:nonceSize], nonce...) cipherText = append(cipherText[nonceSize:], cipherTextWithoutNonce...) return cipherText, nil } // Decrypt decrypts a byte slice and returns the decrypted slice func (encrypter *Encrypter) Decrypt(log log.T, cipherText []byte) (plainText []byte, err error) { var aesgcm cipher.AEAD if aesgcm, err = getAEAD(encrypter.decryptionKey); err != nil { err = fmt.Errorf("%v", err) return } // Pull the nonce out of the cipherText nonce := cipherText[:nonceSize] cipherTextWithoutNonce := cipherText[nonceSize:] // Decrypt just the actual cipherText using nonce extracted above if plainText, err = aesgcm.Open(nil, nonce, cipherTextWithoutNonce, nil); err != nil { err = fmt.Errorf("error decrypting encrypted test, %v", err) return } return plainText, nil }
138
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. package encryption import ( "fmt" sdkSession "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/kms" "github.com/aws/aws-sdk-go/service/kms/kmsiface" "github.com/aws/session-manager-plugin/src/log" "github.com/aws/session-manager-plugin/src/sdkutil" ) // KMSKeySizeInBytes is the key size that is fetched from KMS. 64 bytes key is split into two halves. // First half 32 bytes key is used by agent for encryption and second half 32 bytes by clients like cli/console const KMSKeySizeInBytes int64 = 64 func NewKMSService(log log.T) (kmsService *kms.KMS, err error) { var session *sdkSession.Session if session, err = sdkutil.GetDefaultSession(); err != nil { return nil, err } kmsService = kms.New(session) return kmsService, nil } func KMSDecrypt(log log.T, svc kmsiface.KMSAPI, ciptherTextBlob []byte, encryptionContext map[string]*string) (plainText []byte, err error) { output, err := svc.Decrypt(&kms.DecryptInput{ CiphertextBlob: ciptherTextBlob, EncryptionContext: encryptionContext}) if err != nil { log.Error("Error when decrypting data key", err) return nil, err } return output.Plaintext, nil } // GenerateDataKey gets cipher text and plain text keys from KMS service func KMSGenerateDataKey(kmsKeyId string, svc kmsiface.KMSAPI, context map[string]*string) (cipherTextKey []byte, plainTextKey []byte, err error) { kmsKeySize := KMSKeySizeInBytes generateDataKeyInput := kms.GenerateDataKeyInput{ KeyId: &kmsKeyId, NumberOfBytes: &kmsKeySize, EncryptionContext: context, } var generateDataKeyOutput *kms.GenerateDataKeyOutput if generateDataKeyOutput, err = svc.GenerateDataKey(&generateDataKeyInput); err != nil { return nil, nil, fmt.Errorf("Error calling KMS GenerateDataKey API: %s", err) } return generateDataKeyOutput.CiphertextBlob, generateDataKeyOutput.Plaintext, nil }
67
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // Code generated by mockery v1.0.0. DO NOT EDIT. package mocks import ( log "github.com/aws/session-manager-plugin/src/log" mock "github.com/stretchr/testify/mock" ) // IEncrypter is an autogenerated mock type for the IEncrypter type type IEncrypter struct { mock.Mock } // Decrypt provides a mock function with given fields: _a0, cipherText func (_m *IEncrypter) Decrypt(_a0 log.T, cipherText []byte) ([]byte, error) { ret := _m.Called(_a0, cipherText) var r0 []byte if rf, ok := ret.Get(0).(func(log.T, []byte) []byte); ok { r0 = rf(_a0, cipherText) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) } } var r1 error if rf, ok := ret.Get(1).(func(log.T, []byte) error); ok { r1 = rf(_a0, cipherText) } else { r1 = ret.Error(1) } return r0, r1 } // Encrypt provides a mock function with given fields: _a0, plainText func (_m *IEncrypter) Encrypt(_a0 log.T, plainText []byte) ([]byte, error) { ret := _m.Called(_a0, plainText) var r0 []byte if rf, ok := ret.Get(0).(func(log.T, []byte) []byte); ok { r0 = rf(_a0, plainText) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) } } var r1 error if rf, ok := ret.Get(1).(func(log.T, []byte) error); ok { r1 = rf(_a0, plainText) } else { r1 = ret.Error(1) } return r0, r1 } // GetEncryptedDataKey provides a mock function with given fields: func (_m *IEncrypter) GetEncryptedDataKey() []byte { ret := _m.Called() var r0 []byte if rf, ok := ret.Get(0).(func() []byte); ok { r0 = rf() } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) } } return r0 }
78
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // Package jsonutil contains various utilities for dealing with json data. package jsonutil import "io/ioutil" // dependency var ioUtil ioUtility = ioU{} type ioUtility interface { ReadFile(filename string) ([]byte, error) } type ioU struct{} // ioU implements io/ioutil. func (ioU) ReadFile(filename string) ([]byte, error) { return ioutil.ReadFile(filename) }
30
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // Package jsonutil contains various utilities for dealing with json data. package jsonutil import ( "bytes" "encoding/json" ) // jsonFormat json formatIndent const jsonFormat = " " // Indent indents a json string. func Indent(jsonStr string) string { var dst bytes.Buffer json.Indent(&dst, []byte(jsonStr), "", jsonFormat) return string(dst.Bytes()) } // Remarshal marshals an object to Json then parses it back to another object. // This is useful for example when we want to go from map[string]interface{} // to a more specific struct type or if we want a deep copy of the object. func Remarshal(obj interface{}, remarshalledObj interface{}) (err error) { b, err := json.Marshal(obj) if err != nil { return } err = json.Unmarshal(b, remarshalledObj) if err != nil { return } return nil } // Marshal marshals an object to a json string. // Returns empty string if marshal fails. func Marshal(obj interface{}) (result string, err error) { var resultB []byte resultB, err = json.Marshal(obj) if err != nil { return } result = string(resultB) return } // UnmarshalFile reads the content of a file then Unmarshals the content to an object. func UnmarshalFile(filePath string, dest interface{}) (err error) { content, err := ioUtil.ReadFile(filePath) if err != nil { return } err = json.Unmarshal(content, dest) return } // Unmarshal unmarshals the content in string format to an object. func Unmarshal(jsonContent string, dest interface{}) (err error) { content := []byte(jsonContent) err = json.Unmarshal(content, dest) return } // MarshalIndent is like Marshal but applies Indent to format the output. // Returns empty string if marshal fails func MarshalIndent(obj interface{}) (result string, err error) { var resultsByte []byte // Make sure the output file keeps formal json format resultsByte, err = json.MarshalIndent(obj, "", jsonFormat) if err != nil { return } result = string(resultsByte) return }
88
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // Package jsonutil contains various utilities for dealing with json data. package jsonutil import ( "fmt" "io/ioutil" "log" "path/filepath" "testing" "github.com/stretchr/testify/assert" ) func ExampleMarshal() { type ColorGroup struct { ID int Name string Colors []string } group := ColorGroup{ ID: 1, Name: "Reds", Colors: []string{"Crimson", "Red", "Ruby", "Maroon"}, } b, err := Marshal(group) if err != nil { fmt.Println("error:", err) } fmt.Println(b) // Output: // {"ID":1,"Name":"Reds","Colors":["Crimson","Red","Ruby","Maroon"]} } func ExampleRemarshal() { type ColorGroup struct { ID int Name string Colors []string } group := ColorGroup{ ID: 1, Name: "Reds", Colors: []string{"Crimson", "Red", "Ruby", "Maroon"}, } var newGroup ColorGroup err := Remarshal(group, &newGroup) if err != nil { fmt.Println("error:", err) } out, err := Marshal(newGroup) if err != nil { fmt.Println("error:", err) } fmt.Println(out) // Output: // {"ID":1,"Name":"Reds","Colors":["Crimson","Red","Ruby","Maroon"]} } func ExampleIndent() { type Road struct { Name string Number int } roads := []Road{ {"Diamond Fork", 29}, {"Sheep Creek", 51}, } b, err := Marshal(roads) if err != nil { log.Fatal(err) } out := Indent(b) fmt.Println(out) // Output: // [ // { // "Name": "Diamond Fork", // "Number": 29 // }, // { // "Name": "Sheep Creek", // "Number": 51 // } // ] } func TestIndent(t *testing.T) { testCases := []struct { name string input string }{ {"Basic", "[{\"Name\":\"Diamond Fork\", \"Number\":29}, {\"Name\":\"Sheep Creek\", \"Number\":51}]"}, {"BasicMoreWhitespace", "[\n{\"Name\":\"Diamond Fork\", \"Number\":29}, { \"Name\" : \"Sheep Creek\", \"Number\":51}]"}, } for _, tc := range testCases { out := Indent(tc.input) correct, err := ioutil.ReadFile(filepath.Join("testdata", t.Name()+tc.name+".golden")) if err != nil { t.Errorf("error reading file: %v", err) } assert.Equal(t, string(correct), out) } } func TestMarshal(t *testing.T) { group := struct { ID int Name string Colors []string }{ 1, "Reds", []string{"Crimson", "Red", "Ruby", "Maroon"}, } out, err := Marshal(group) if err != nil { t.Errorf("error in %s: %v", t.Name(), err) } correct, err := ioutil.ReadFile(filepath.Join("testdata", t.Name()+".golden")) assert.Equal(t, string(correct), out) } func TestUnmarshalFile(t *testing.T) { filename := "rumpelstilzchen" var contents interface{} // missing file ioUtil = ioUtilStub{err: fmt.Errorf("some error")} err1 := UnmarshalFile(filename, &contents) assert.Error(t, err1, "expected readfile error") // non json content ioUtil = ioUtilStub{b: []byte("Sample text")} err2 := UnmarshalFile(filename, &contents) assert.Error(t, err2, "expected json parsing error") // valid json content ioUtil = ioUtilStub{b: []byte("{\"ID\":1,\"Name\":\"Reds\",\"Colors\":[\"Crimson\",\"Red\",\"Ruby\",\"Maroon\"]}")} err3 := UnmarshalFile(filename, &contents) assert.NoError(t, err3, "message should parse successfully") } func TestRemarshal(t *testing.T) { prop := make(map[string]string) prop["RunCommand"] = "echo" prop2 := make(map[string]string) prop2["command"] = "echo" type Property struct { RunCommand string } var newProp Property var newProp2 Property err := Remarshal(prop, &newProp) assert.NoError(t, err, "message should remarshal successfully") err = Remarshal(prop2, &newProp2) assert.NoError(t, err, "key mismatch should not report error") assert.Equal(t, Property{}, newProp2, "mismatched remarshal should return an empty object") } func TestRemarshalInvalidInput(t *testing.T) { // Using channel as unsupported json type // Expect an error and no change to input object badInput := make(chan bool) type Output struct { name string } var output Output // Save an copy of output to compare to after Remarshal has been called to confirm no changes were made copy := output err := Remarshal(badInput, &output) assert.NotNil(t, err) if !assert.ObjectsAreEqual(copy, output) { t.Fatalf("Object was modified by call to Remarshal") } } func TestUnmarshal(t *testing.T) { content := `{"parameter": "1"}` type TestStruct struct { Parameter string `json:"parameter"` } output := TestStruct{} err := Unmarshal(content, &output) assert.NoError(t, err, "Message should parse correctly") assert.Equal(t, output.Parameter, "1") } func TestUnmarshalExtraInput(t *testing.T) { content := `{"parameter": "1", "name": "Richard"}` type TestStruct struct { Parameter string `json:"parameter"` } output := TestStruct{} err := Unmarshal(content, &output) assert.NoError(t, err, "Message should parse correctly") assert.Equal(t, output.Parameter, "1") } func TestUnmarshalInvalidInput(t *testing.T) { content := "Hello" var dest interface{} err := Unmarshal(content, &dest) assert.Error(t, err, "This is not json format. Error expected") } func TestMarshalIndent(t *testing.T) { group := struct { ID int Name string Colors []string }{ 1, "Reds", []string{"Crimson", "Red", "Ruby", "Maroon"}, } correct, err := ioutil.ReadFile(filepath.Join("testdata", t.Name()+".golden")) if err != nil { t.Errorf("error: %v", err) t.FailNow() } out, err := MarshalIndent(group) if err != nil { t.Errorf("error: %v", err) t.FailNow() } assert.Equal(t, string(correct), out) } func TestMarshalIndentErrorsOnInvalidInput(t *testing.T) { // Using channel as invalid input // Breaks the same for any json-invalid types _, err := MarshalIndent(make(chan int)) assert.NotNil(t, err) } // ioutil stub type ioUtilStub struct { b []byte err error } func (a ioUtilStub) ReadFile(_ string) ([]byte, error) { return a.b, a.err }
264
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // Package log is used to initialize logger package log import ( "path/filepath" "github.com/fsnotify/fsnotify" ) // IFileWatcher interface for FileWatcher with functions to initialize, start and stop the watcher type IFileWatcher interface { Init(log T, configFilePath string, replaceLogger func()) Start() Stop() } // FileWatcher implements the IFileWatcher by using fileChangeWatcher and fileExistsWatcher type FileWatcher struct { configFilePath string replaceLogger func() log T watcher *fsnotify.Watcher } // Init initializes the data and channels for the filewatcher func (fileWatcher *FileWatcher) Init(log T, configFilePath string, replaceLogger func()) { fileWatcher.replaceLogger = replaceLogger fileWatcher.configFilePath = configFilePath fileWatcher.log = log } // Start creates and starts the go routines for filewatcher func (fileWatcher *FileWatcher) Start() { fileWatcher.log.Debugf("Start File Watcher On: %v", fileWatcher.configFilePath) // Since the filewatcher fails if the file does not exist, need to watch the parent directory for any changes dirPath := filepath.Dir(fileWatcher.configFilePath) fileWatcher.log.Debugf("Start Watcher on directory: %v", dirPath) // Creating Watcher watcher, err := fsnotify.NewWatcher() if err != nil { // Error initializing the watcher fileWatcher.log.Errorf("Error initializing the watcher: %v", err) return } fileWatcher.watcher = watcher // Starting the goroutine for event handler go fileWatcher.fileEventHandler() // Add the directory to watcher err = fileWatcher.watcher.Add(dirPath) if err != nil { // Error adding the file to watcher fileWatcher.log.Errorf("Error adding the directory to watcher: %v", err) return } } // fileEventHandler implements handling of the events triggered by the OS func (fileWatcher *FileWatcher) fileEventHandler() { // Waiting on signals from OS for event := range fileWatcher.watcher.Events { // Event signalled by OS on file fileWatcher.log.Debugf("Event on file %v : %v", event.Name, event) if event.Name == fileWatcher.configFilePath { // Event on the file being watched if event.Op&fsnotify.Write == fsnotify.Write || event.Op&fsnotify.Create == fsnotify.Create || event.Op&fsnotify.Rename == fsnotify.Rename { // One of Write or Create or Rename Event fileWatcher.log.Debugf("File Watcher Triggers Function Execution: %v", fileWatcher.configFilePath) // Execute the function fileWatcher.replaceLogger() } } } } // Stop stops the filewatcher func (fileWatcher *FileWatcher) Stop() { fileWatcher.log.Infof("Stop the filewatcher on :%v", fileWatcher.configFilePath) // Check if watcher instance is set if fileWatcher.watcher != nil { err := fileWatcher.watcher.Close() if err != nil { // Error closing the filewatcher. Logging the error fileWatcher.log.Debugf("Error Closing the filewatcher :%v", err) } } }
107
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // Package log is used to initialize the logger. package log import ( "path/filepath" ) func DefaultConfig() []byte { return LoadLog(DefaultLogDir, ApplicationLogFile, ErrorLogFile) } func LoadLog(defaultLogDir string, logFile string, errorFile string) []byte { var logFilePath, errorFilePath string logFilePath = filepath.Join(defaultLogDir, logFile) errorFilePath = filepath.Join(defaultLogDir, errorFile) logConfig := ` <seelog type="adaptive" mininterval="2000000" maxinterval="100000000" critmsgcount="500" minlevel="off"> <exceptions> <exception filepattern="test*" minlevel="error"/> </exceptions> <outputs formatid="fmtinfo"> ` logConfig += `<rollingfile type="size" filename="` + logFilePath + `" maxsize="30000000" maxrolls="5"/>` logConfig += ` <filter levels="error,critical" formatid="fmterror"> ` logConfig += `<rollingfile type="size" filename="` + errorFilePath + `" maxsize="10000000" maxrolls="5"/>` logConfig += ` </filter> </outputs> <formats> <format id="fmterror" format="%Date %Time %LEVEL [%FuncShort @ %File.%Line] %Msg%n"/> <format id="fmtdebug" format="%Date %Time %LEVEL [%FuncShort @ %File.%Line] %Msg%n"/> <format id="fmtinfo" format="%Date %Time %LEVEL %Msg%n"/> </formats> </seelog> ` return []byte(logConfig) }
55
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // Package log is used to initialize the logger. package log // BasicT represents structs capable of logging messages. // This interface matches seelog.LoggerInterface. type BasicT interface { // Tracef formats message according to format specifier // and writes to log with level Trace. Tracef(format string, params ...interface{}) // Debugf formats message according to format specifier // and writes to log with level Debug. Debugf(format string, params ...interface{}) // Infof formats message according to format specifier // and writes to log with level Info. Infof(format string, params ...interface{}) // Warnf formats message according to format specifier // and writes to log with level Warn. Warnf(format string, params ...interface{}) error // Errorf formats message according to format specifier // and writes to log with level Error. Errorf(format string, params ...interface{}) error // Criticalf formats message according to format specifier // and writes to log with level Critical. Criticalf(format string, params ...interface{}) error // Trace formats message using the default formats for its operands // and writes to log with level Trace. Trace(v ...interface{}) // Debug formats message using the default formats for its operands // and writes to log with level Debug. Debug(v ...interface{}) // Info formats message using the default formats for its operands // and writes to log with level Info. Info(v ...interface{}) // Warn formats message using the default formats for its operands // and writes to log with level Warn. Warn(v ...interface{}) error // Error formats message using the default formats for its operands // and writes to log with level Error. Error(v ...interface{}) error // Critical formats message using the default formats for its operands // and writes to log with level Critical. Critical(v ...interface{}) error // Flush flushes all the messages in the logger. Flush() // Close flushes all the messages in the logger and closes it. The logger cannot be used after this operation. Close() } // T represents structs capable of logging messages, and context management. type T interface { BasicT WithContext(context ...string) (contextLogger T) }
80
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // Package log is used to initialize the logger. package log import ( "sync" "github.com/cihub/seelog" ) const ( LogFileExtension = ".log" SeelogConfigFileName = "seelog.xml" ErrorLogFileSuffix = "errors" ) var ( err error DefaultSeelogConfigFilePath string // DefaultSeelogConfigFilePath specifies the default seelog location DefaultLogDir string // DefaultLogDir specifies default log location ApplicationLogFile string // ApplicationLogFile specifies name of application log file ErrorLogFile string // ErrorLogFile specifies name of error log file loadedLogger *T lock sync.RWMutex ) // pkgMutex is the lock used to serialize calls to the logger. var pkgMutex = new(sync.Mutex) // loggerInstance is the delegate logger in the wrapper var loggerInstance = &DelegateLogger{} // ContextFormatFilter is a filter that can add a context to the parameters of a log message. type ContextFormatFilter struct { Context []string } type LogConfig struct { ClientName string } // Filter adds the context at the beginning of the parameter slice. func (f ContextFormatFilter) Filter(params ...interface{}) (newParams []interface{}) { newParams = make([]interface{}, len(f.Context)+len(params)) for i, param := range f.Context { newParams[i] = param + " " } ctxLen := len(f.Context) for i, param := range params { newParams[ctxLen+i] = param } return newParams } // Filterf adds the context in from of the format string. func (f ContextFormatFilter) Filterf(format string, params ...interface{}) (newFormat string, newParams []interface{}) { newFormat = "" for _, param := range f.Context { newFormat += param + " " } newFormat += format newParams = params return } // Logger is the starting point to initialize with client name. func Logger(useWatcher bool, clientName string) T { logConfig := LogConfig{ ClientName: clientName, } if !isLoaded() { logger := logConfig.InitLogger(useWatcher) cache(logger) } return getCached() } // initLogger initializes a new logger based on current configurations and starts file watcher on the configurations file func (config *LogConfig) InitLogger(useWatcher bool) (logger T) { // Read the current configurations or get the default configurations logConfigBytes := config.GetLogConfigBytes() // Initialize the base seelog logger baseLogger, _ := initBaseLoggerFromBytes(logConfigBytes) // Create the wrapper logger logger = withContext(baseLogger) if useWatcher { // Start the config file watcher config.startWatcher(logger) } return } // check if a logger has be loaded func isLoaded() bool { lock.RLock() defer lock.RUnlock() return loadedLogger != nil } // cache the loaded logger func cache(logger T) { lock.Lock() defer lock.Unlock() loadedLogger = &logger } // return the cached logger func getCached() T { lock.RLock() defer lock.RUnlock() return *loadedLogger } // startWatcher starts the file watcher on the seelog configurations file path func (config *LogConfig) startWatcher(logger T) { defer func() { // In case the creation of watcher panics, let the current logger continue if msg := recover(); msg != nil { logger.Errorf("Seelog File Watcher Initilization Failed. Any updates on config file will be ignored unless agent is restarted: %v", msg) } }() fileWatcher := &FileWatcher{} fileWatcher.Init(logger, DefaultSeelogConfigFilePath, config.replaceLogger) // Start the file watcher fileWatcher.Start() } // ReplaceLogger replaces the current logger with a new logger initialized from the current configurations file func (config *LogConfig) replaceLogger() { // Get the current logger logger := getCached() //Create new logger logConfigBytes := config.GetLogConfigBytes() baseLogger, err := initBaseLoggerFromBytes(logConfigBytes) // If err in creating logger, do not replace logger if err != nil { logger.Error("New logger creation failed") return } setStackDepth(baseLogger) baseLogger.Debug("New Logger Successfully Created") // Safe conversion to *Wrapper wrapper, ok := logger.(*Wrapper) if !ok { logger.Errorf("Logger replace failed. The logger is not a wrapper") return } // Replace the underlying base logger in wrapper wrapper.ReplaceDelegate(baseLogger) } func (config *LogConfig) GetLogConfigBytes() []byte { return getLogConfigBytes(config.ClientName) } // initBaseLoggerFromBytes initializes the base logger using the specified configuration as bytes. func initBaseLoggerFromBytes(seelogConfig []byte) (seelogger seelog.LoggerInterface, err error) { seelogger, err = seelog.LoggerFromConfigAsBytes(seelogConfig) if err != nil { // Create logger with default config seelogger, _ = seelog.LoggerFromConfigAsBytes(DefaultConfig()) } return } // withContext creates a wrapper logger on the base logger passed with context is passed func withContext(logger seelog.LoggerInterface, context ...string) (contextLogger T) { loggerInstance.BaseLoggerInstance = logger formatFilter := &ContextFormatFilter{Context: context} contextLogger = &Wrapper{Format: formatFilter, M: pkgMutex, Delegate: loggerInstance} setStackDepth(logger) return contextLogger } // setStackDepth sets the stack depth of the logger passed func setStackDepth(logger seelog.LoggerInterface) { // additional stack depth so that we print the calling function correctly // stack depth 0 would print the function in the wrapper (e.g. wrapper.Debug) // stack depth 1 prints the function calling the logger (wrapper), which is what we want. logger.SetAdditionalStackDepth(1) }
201
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. //go:build darwin || freebsd || linux || netbsd || openbsd // +build darwin freebsd linux netbsd openbsd // Package log is used to initialize logger package log import ( "fmt" "io/ioutil" "path/filepath" ) const ( LogsDirectory = "logs" DefaultInstallLocationPrefix = "/usr/local" ) func getApplicationName(clientName string) string { var applicationName string if clientName == "ssmcli" { applicationName = "SSMCLI" } else if clientName == "session-manager-plugin" { applicationName = "sessionmanagerplugin" } return applicationName } // getLogConfigBytes reads and returns the seelog configs from the config file path if present // otherwise returns the seelog default configurations // Linux uses seelog.xml file as configuration by default. func getLogConfigBytes(clientName string) (logConfigBytes []byte) { applicationName := getApplicationName(clientName) DefaultSeelogConfigFilePath = filepath.Join(DefaultInstallLocationPrefix, applicationName, SeelogConfigFileName) DefaultLogDir = filepath.Join(DefaultInstallLocationPrefix, applicationName, LogsDirectory) ApplicationLogFile = fmt.Sprintf("%s%s", clientName, LogFileExtension) ErrorLogFile = fmt.Sprintf("%s%s", ErrorLogFileSuffix, LogFileExtension) if logConfigBytes, err = ioutil.ReadFile(DefaultSeelogConfigFilePath); err != nil { logConfigBytes = DefaultConfig() } return }
57
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. //go:build windows // +build windows // Package log is used to initialize logger package log import ( "fmt" "io/ioutil" "os" "path/filepath" ) const ( // ApplicationFolder is the path under local app data. ApplicationFolderPrefix = "Amazon\\" LogsDirectory = "Logs" ) var EnvProgramFiles = os.Getenv("ProgramFiles") // Windows environment variable %ProgramFiles% func getApplicationName(clientName string) string { var applicationName string if clientName == "ssmcli" { applicationName = "SSMCLI" } else if clientName == "session-manager-plugin" { applicationName = "SessionManagerPlugin" } return applicationName } // getLogConfigBytes reads and returns the seelog configs from the config file path if present // otherwise returns the seelog default configurations // Windows uses default log configuration if there is no seelog.xml override provided. func getLogConfigBytes(clientName string) (logConfigBytes []byte) { DefaultProgramFolder := filepath.Join( EnvProgramFiles, ApplicationFolderPrefix, getApplicationName(clientName)) DefaultSeelogConfigFilePath = filepath.Join(DefaultProgramFolder, SeelogConfigFileName) DefaultLogDir = filepath.Join( DefaultProgramFolder, LogsDirectory) ApplicationLogFile = fmt.Sprintf("%s%s", clientName, LogFileExtension) ErrorLogFile = fmt.Sprintf("%s%s", ErrorLogFileSuffix, LogFileExtension) if logConfigBytes, err = ioutil.ReadFile(DefaultSeelogConfigFilePath); err != nil { logConfigBytes = DefaultConfig() } return }
66
session-manager-plugin
aws
Go
// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // Package log is used to initialize the logger. package log import ( "errors" "fmt" "github.com/stretchr/testify/mock" ) // Mock stands for a mocked log. type Mock struct { mock.Mock context string } // NewMockLogger returns an instance of Mock with default expectations set. func NewMockLog() *Mock { log := new(Mock) log.On("Close").Return() log.On("Flush").Return() log.On("Debug", mock.Anything).Return() log.On("Error", mock.Anything).Return(mock.AnythingOfType("error")) log.On("Warn", mock.Anything).Return(mock.AnythingOfType("error")) log.On("Trace", mock.Anything).Return() log.On("Info", mock.Anything).Return() log.On("Debugf", mock.Anything, mock.Anything).Return() log.On("Errorf", mock.AnythingOfType("string"), mock.Anything).Return(mock.AnythingOfType("error")) log.On("Warnf", mock.AnythingOfType("string"), mock.Anything).Return(mock.AnythingOfType("error")) log.On("Tracef", mock.Anything, mock.Anything).Return() log.On("Infof", mock.Anything, mock.Anything).Return() return log } func NewMockLogWithContext(ctx string) *Mock { log := new(Mock) log.context = "[" + ctx + "]" log.On("Close").Return() log.On("Flush").Return() log.On("Debug", mock.Anything).Return() log.On("Error", mock.Anything).Return(mock.AnythingOfType("error")) log.On("Warn", mock.Anything).Return(mock.AnythingOfType("error")) log.On("Trace", mock.Anything).Return() log.On("Info", mock.Anything).Return() log.On("Debugf", mock.Anything, mock.Anything).Return() log.On("Errorf", mock.AnythingOfType("string"), mock.Anything).Return(mock.AnythingOfType("error")) log.On("Tracef", mock.Anything, mock.Anything).Return() log.On("Infof", mock.Anything, mock.Anything).Return() return log } func (_m *Mock) WithContext(context ...string) (contextLogger T) { fmt.Print(_m.context) fmt.Printf("WithContext: %v", context) ret := _m.Called(context) return ret.Get(0).(T) } // Tracef mocks the Tracef function. func (_m *Mock) Tracef(format string, params ...interface{}) { fmt.Print(_m.context) fmt.Printf("Tracef: "+format+"\n", params...) _m.Called(format, params) } // Debugf mocks the Debugf function. func (_m *Mock) Debugf(format string, params ...interface{}) { fmt.Print(_m.context) fmt.Printf("Debugf: "+format, params...) fmt.Println() _m.Called(format, params) } // Infof mocks the Infof function. func (_m *Mock) Infof(format string, params ...interface{}) { fmt.Print(_m.context) fmt.Printf("Infof: "+format, params...) fmt.Println() _m.Called(format, params) } // Warnf mocks the Warnf function. func (_m *Mock) Warnf(format string, params ...interface{}) error { fmt.Print(_m.context) msg := fmt.Sprintf("Warnf: "+format, params...) fmt.Printf(msg) fmt.Println() _m.Called(format, params) return errors.New(msg) } // Errorf mocks the Errorf function. func (_m *Mock) Errorf(format string, params ...interface{}) error { fmt.Print(_m.context) msg := fmt.Sprintf("Errorf: "+format, params...) fmt.Printf(msg) fmt.Println() _m.Called(format, params) return errors.New(msg) } // Criticalf mocks the Criticalf function. func (_m *Mock) Criticalf(format string, params ...interface{}) error { fmt.Print(_m.context) msg := fmt.Sprintf("Criticalf: "+format, params...) fmt.Printf(msg) fmt.Println() _m.Called(format, params) return errors.New(msg) } // Trace mocks the Trace function. func (_m *Mock) Trace(v ...interface{}) { fmt.Print(_m.context) fmt.Print("Trace: ") fmt.Println(v...) _m.Called(v) } // Debug mocks the Debug function. func (_m *Mock) Debug(v ...interface{}) { fmt.Print(_m.context) fmt.Print("Debug: ") fmt.Println(v...) _m.Called(v) } // Info mocks the Info function. func (_m *Mock) Info(v ...interface{}) { fmt.Print(_m.context) fmt.Print("Info: ") fmt.Println(v...) _m.Called(v) } // Warn mocks the Warn function. func (_m *Mock) Warn(v ...interface{}) error { fmt.Print(_m.context) msg := fmt.Sprint("Warn: ") + fmt.Sprint(v...) fmt.Printf(msg) fmt.Println() _m.Called(v) return errors.New(msg) } // Error mocks the Error function. func (_m *Mock) Error(v ...interface{}) error { fmt.Print(_m.context) msg := fmt.Sprint("Error: ") + fmt.Sprint(v...) fmt.Printf(msg) fmt.Println() _m.Called(v) return errors.New(msg) } // Critical mocks the Critical function. func (_m *Mock) Critical(v ...interface{}) error { fmt.Print(_m.context) fmt.Print("Critical: ") fmt.Println(v...) ret := _m.Called(v) return ret.Error(0) } // Flush mocks the Flush function. func (_m *Mock) Flush() { _m.Called() } // Close mocks the Close function. func (_m *Mock) Close() { _m.Called() }
187
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // Package log is used to initialize the logger. package log import ( "sync" ) // DelegateLogger holds the base logger for logging type DelegateLogger struct { BaseLoggerInstance BasicT } // Wrapper is a logger that can modify the format of a log message before delegating to another logger. type Wrapper struct { Format FormatFilter M *sync.Mutex Delegate *DelegateLogger } // FormatFilter can modify the format and or parameters to be passed to a logger. type FormatFilter interface { // Filter modifies parameters that will be passed to log.Debug, log.Info, etc. Filter(params ...interface{}) (newParams []interface{}) // Filter modifies format and/or parameter strings that will be passed to log.Debugf, log.Infof, etc. Filterf(format string, params ...interface{}) (newFormat string, newParams []interface{}) } // WithContext creates a wrapper logger with context func (w *Wrapper) WithContext(context ...string) (contextLogger T) { formatFilter := &ContextFormatFilter{Context: context} contextLogger = &Wrapper{Format: formatFilter, M: w.M, Delegate: w.Delegate} return contextLogger } // Tracef formats message according to format specifier // and writes to log with level = Trace. func (w *Wrapper) Tracef(format string, params ...interface{}) { format, params = w.Format.Filterf(format, params...) w.M.Lock() defer w.M.Unlock() w.Delegate.BaseLoggerInstance.Tracef(format, params...) } // Debugf formats message according to format specifier // and writes to log with level = Debug. func (w *Wrapper) Debugf(format string, params ...interface{}) { format, params = w.Format.Filterf(format, params...) w.M.Lock() defer w.M.Unlock() w.Delegate.BaseLoggerInstance.Debugf(format, params...) } // Infof formats message according to format specifier // and writes to log with level = Info. func (w *Wrapper) Infof(format string, params ...interface{}) { format, params = w.Format.Filterf(format, params...) w.M.Lock() defer w.M.Unlock() w.Delegate.BaseLoggerInstance.Infof(format, params...) } // Warnf formats message according to format specifier // and writes to log with level = Warn. func (w *Wrapper) Warnf(format string, params ...interface{}) error { format, params = w.Format.Filterf(format, params...) w.M.Lock() defer w.M.Unlock() return w.Delegate.BaseLoggerInstance.Warnf(format, params...) } // Errorf formats message according to format specifier // and writes to log with level = Error. func (w *Wrapper) Errorf(format string, params ...interface{}) error { format, params = w.Format.Filterf(format, params...) w.M.Lock() defer w.M.Unlock() return w.Delegate.BaseLoggerInstance.Errorf(format, params...) } // Criticalf formats message according to format specifier // and writes to log with level = Critical. func (w *Wrapper) Criticalf(format string, params ...interface{}) error { format, params = w.Format.Filterf(format, params...) w.M.Lock() defer w.M.Unlock() return w.Delegate.BaseLoggerInstance.Criticalf(format, params...) } // Trace formats message using the default formats for its operands // and writes to log with level = Trace func (w *Wrapper) Trace(v ...interface{}) { v = w.Format.Filter(v...) w.M.Lock() defer w.M.Unlock() w.Delegate.BaseLoggerInstance.Trace(v...) } // Debug formats message using the default formats for its operands // and writes to log with level = Debug func (w *Wrapper) Debug(v ...interface{}) { v = w.Format.Filter(v...) w.M.Lock() defer w.M.Unlock() w.Delegate.BaseLoggerInstance.Debug(v...) } // Info formats message using the default formats for its operands // and writes to log with level = Info func (w *Wrapper) Info(v ...interface{}) { v = w.Format.Filter(v...) w.M.Lock() defer w.M.Unlock() w.Delegate.BaseLoggerInstance.Info(v...) } // Warn formats message using the default formats for its operands // and writes to log with level = Warn func (w *Wrapper) Warn(v ...interface{}) error { v = w.Format.Filter(v...) w.M.Lock() defer w.M.Unlock() return w.Delegate.BaseLoggerInstance.Warn(v...) } // Error formats message using the default formats for its operands // and writes to log with level = Error func (w *Wrapper) Error(v ...interface{}) error { v = w.Format.Filter(v...) w.M.Lock() defer w.M.Unlock() return w.Delegate.BaseLoggerInstance.Error(v...) } // Critical formats message using the default formats for its operands // and writes to log with level = Critical func (w *Wrapper) Critical(v ...interface{}) error { v = w.Format.Filter(v...) w.M.Lock() defer w.M.Unlock() return w.Delegate.BaseLoggerInstance.Critical(v...) } // Flush flushes all the messages in the logger. func (w *Wrapper) Flush() { w.M.Lock() defer w.M.Unlock() w.Delegate.BaseLoggerInstance.Flush() } // Close flushes all the messages in the logger and closes it. It cannot be used after this operation. func (w *Wrapper) Close() { w.M.Lock() defer w.M.Unlock() w.Delegate.BaseLoggerInstance.Close() } // ReplaceDelegate replaces the delegate logger with a new logger func (w *Wrapper) ReplaceDelegate(newLogger BasicT) { w.M.Lock() defer w.M.Unlock() w.Delegate.BaseLoggerInstance.Flush() w.Delegate.BaseLoggerInstance = newLogger w.Delegate.BaseLoggerInstance.Info("Logger Replaced. New Logger Used to log the message") }
191
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // message package defines data channel messages structure. package message import ( "github.com/aws/session-manager-plugin/src/log" "github.com/twinj/uuid" ) const ( // InputStreamMessage represents message type for input data InputStreamMessage = "input_stream_data" // OutputStreamMessage represents message type for output data OutputStreamMessage = "output_stream_data" // AcknowledgeMessage represents message type for acknowledge AcknowledgeMessage = "acknowledge" // ChannelClosedMessage represents message type for ChannelClosed ChannelClosedMessage = "channel_closed" // StartPublicationMessage represents the message type that notifies the CLI to start sending stream messages StartPublicationMessage = "start_publication" // PausePublicationMessage represents the message type that notifies the CLI to pause sending stream messages // as the remote data channel is inactive PausePublicationMessage = "pause_publication" ) // AcknowledgeContent is used to inform the sender of an acknowledge message that the message has been received. // * MessageType is a 32 byte UTF-8 string containing the message type. // * MessageId is a 40 byte UTF-8 string containing the UUID identifying this message being acknowledged. // * SequenceNumber is an 8 byte integer containing the message sequence number for serialized message. // * IsSequentialMessage is a boolean field representing whether the acknowledged message is part of a sequence type AcknowledgeContent struct { MessageType string `json:"AcknowledgedMessageType"` MessageId string `json:"AcknowledgedMessageId"` SequenceNumber int64 `json:"AcknowledgedMessageSequenceNumber"` IsSequentialMessage bool `json:"IsSequentialMessage"` } // ChannelClosed is used to inform the client to close the channel // * MessageId is a 40 byte UTF-8 string containing the UUID identifying this message. // * CreatedDate is a string field containing the message create epoch millis in UTC. // * DestinationId is a string field containing the session target. // * SessionId is a string field representing which session to close. // * MessageType is a 32 byte UTF-8 string containing the message type. // * SchemaVersion is a 4 byte integer containing the message schema version number. // * Output is a string field containing the error message for channel close. type ChannelClosed struct { MessageId string `json:"MessageId"` CreatedDate string `json:"CreatedDate"` DestinationId string `json:"DestinationId"` SessionId string `json:"SessionId"` MessageType string `json:"MessageType"` SchemaVersion int `json:"SchemaVersion"` Output string `json:"Output"` } type PayloadType uint32 const ( Output PayloadType = 1 Error PayloadType = 2 Size PayloadType = 3 Parameter PayloadType = 4 HandshakeRequestPayloadType PayloadType = 5 HandshakeResponsePayloadType PayloadType = 6 HandshakeCompletePayloadType PayloadType = 7 EncChallengeRequest PayloadType = 8 EncChallengeResponse PayloadType = 9 Flag PayloadType = 10 StdErr PayloadType = 11 ExitCode PayloadType = 12 ) type PayloadTypeFlag uint32 const ( DisconnectToPort PayloadTypeFlag = 1 TerminateSession PayloadTypeFlag = 2 ConnectToPortError PayloadTypeFlag = 3 ) type SizeData struct { Cols uint32 `json:"cols"` Rows uint32 `json:"rows"` } type IClientMessage interface { Validate() error DeserializeClientMessage(log log.T, input []byte) (err error) SerializeClientMessage(log log.T) (result []byte, err error) DeserializeDataStreamAcknowledgeContent(log log.T) (dataStreamAcknowledge AcknowledgeContent, err error) DeserializeChannelClosedMessage(log log.T) (channelClosed ChannelClosed, err error) DeserializeHandshakeRequest(log log.T) (handshakeRequest HandshakeRequestPayload, err error) DeserializeHandshakeComplete(log log.T) (handshakeComplete HandshakeCompletePayload, err error) } // ClientMessage represents a message for client to send/receive. ClientMessage Message in MGS is equivalent to MDS' InstanceMessage. // All client messages are sent in this form to the MGS service. type ClientMessage struct { HeaderLength uint32 MessageType string SchemaVersion uint32 CreatedDate uint64 SequenceNumber int64 Flags uint64 MessageId uuid.UUID PayloadDigest []byte PayloadType uint32 PayloadLength uint32 Payload []byte } // * HL - HeaderLength is a 4 byte integer that represents the header length. // * MessageType is a 32 byte UTF-8 string containing the message type. // * SchemaVersion is a 4 byte integer containing the message schema version number. // * CreatedDate is an 8 byte integer containing the message create epoch millis in UTC. // * SequenceNumber is an 8 byte integer containing the message sequence number for serialized message streams. // * Flags is an 8 byte unsigned integer containing a packed array of control flags: // * Bit 0 is SYN - SYN is set (1) when the recipient should consider Seq to be the first message number in the stream // * Bit 1 is FIN - FIN is set (1) when this message is the final message in the sequence. // * MessageId is a 40 byte UTF-8 string containing a random UUID identifying this message. // * Payload digest is a 32 byte containing the SHA-256 hash of the payload. // * Payload length is an 4 byte unsigned integer containing the byte length of data in the Payload field. // * Payload is a variable length byte data. // // * | HL| MessageType |Ver| CD | Seq | Flags | // * | MessageId | Digest | PayType | PayLen| // * | Payload | const ( ClientMessage_HLLength = 4 ClientMessage_MessageTypeLength = 32 ClientMessage_SchemaVersionLength = 4 ClientMessage_CreatedDateLength = 8 ClientMessage_SequenceNumberLength = 8 ClientMessage_FlagsLength = 8 ClientMessage_MessageIdLength = 16 ClientMessage_PayloadDigestLength = 32 ClientMessage_PayloadTypeLength = 4 ClientMessage_PayloadLengthLength = 4 ) const ( ClientMessage_HLOffset = 0 ClientMessage_MessageTypeOffset = ClientMessage_HLOffset + ClientMessage_HLLength ClientMessage_SchemaVersionOffset = ClientMessage_MessageTypeOffset + ClientMessage_MessageTypeLength ClientMessage_CreatedDateOffset = ClientMessage_SchemaVersionOffset + ClientMessage_SchemaVersionLength ClientMessage_SequenceNumberOffset = ClientMessage_CreatedDateOffset + ClientMessage_CreatedDateLength ClientMessage_FlagsOffset = ClientMessage_SequenceNumberOffset + ClientMessage_SequenceNumberLength ClientMessage_MessageIdOffset = ClientMessage_FlagsOffset + ClientMessage_FlagsLength ClientMessage_PayloadDigestOffset = ClientMessage_MessageIdOffset + ClientMessage_MessageIdLength ClientMessage_PayloadTypeOffset = ClientMessage_PayloadDigestOffset + ClientMessage_PayloadDigestLength ClientMessage_PayloadLengthOffset = ClientMessage_PayloadTypeOffset + ClientMessage_PayloadTypeLength ClientMessage_PayloadOffset = ClientMessage_PayloadLengthOffset + ClientMessage_PayloadLengthLength )
172
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // message package defines data channel messages structure. package message import ( "encoding/json" "time" ) // ActionType used in Handshake to determine action requested by the agent type ActionType string const ( KMSEncryption ActionType = "KMSEncryption" SessionType ActionType = "SessionType" ) type ActionStatus int const ( Success ActionStatus = 1 Failed ActionStatus = 2 Unsupported ActionStatus = 3 ) // This is sent by the agent to initialize KMS encryption type KMSEncryptionRequest struct { KMSKeyID string `json:"KMSKeyId"` } // This is received by the agent to set up KMS encryption type KMSEncryptionResponse struct { KMSCipherTextKey []byte `json:"KMSCipherTextKey"` KMSCipherTextHash []byte `json:"KMSCipherTextHash"` } // SessionType request contains type of the session that needs to be launched and properties for plugin type SessionTypeRequest struct { SessionType string `json:"SessionType"` Properties interface{} `json:"Properties"` } // Handshake payload sent by the agent to the session manager plugin type HandshakeRequestPayload struct { AgentVersion string `json:"AgentVersion"` RequestedClientActions []RequestedClientAction `json:"RequestedClientActions"` } // An action requested by the agent to the plugin type RequestedClientAction struct { ActionType ActionType `json:"ActionType"` ActionParameters json.RawMessage `json:"ActionParameters"` } // The result of processing the action by the plugin type ProcessedClientAction struct { ActionType ActionType `json:"ActionType"` ActionStatus ActionStatus `json:"ActionStatus"` ActionResult interface{} `json:"ActionResult"` Error string `json:"Error"` } // Handshake Response sent by the plugin in response to the handshake request type HandshakeResponsePayload struct { ClientVersion string `json:"ClientVersion"` ProcessedClientActions []ProcessedClientAction `json:"ProcessedClientActions"` Errors []string `json:"Errors"` } // This is sent by the agent as a challenge to the client. The challenge field // is some data that was encrypted by the agent. The client must be able to decrypt // this and in turn encrypt it with its own key. type EncryptionChallengeRequest struct { Challenge []byte `json:"Challenge"` } // This is received by the agent from the client. The challenge field contains // some data received, decrypted and then encrypted by the client. Agent must // be able to decrypt this and verify it matches the original plaintext challenge. type EncryptionChallengeResponse struct { Challenge []byte `json:"Challenge"` } // Handshake Complete indicates to client that handshake is complete. // This signals the client to start the plugin and display a customer message where appropriate. type HandshakeCompletePayload struct { HandshakeTimeToComplete time.Duration `json:"HandshakeTimeToComplete"` CustomerMessage string `json:"CustomerMessage"` }
102
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // message package defines data channel messages structure. package message import ( "bytes" "crypto/sha256" "encoding/binary" "encoding/json" "errors" "fmt" "strings" "time" "github.com/aws/session-manager-plugin/src/log" "github.com/twinj/uuid" ) // DeserializeClientMessage deserializes the byte array into an ClientMessage message. // * Payload is a variable length byte data. // * | HL| MessageType |Ver| CD | Seq | Flags | // * | MessageId | Digest | PayType | PayLen| // * | Payload | func (clientMessage *ClientMessage) DeserializeClientMessage(log log.T, input []byte) (err error) { clientMessage.MessageType, err = getString(log, input, ClientMessage_MessageTypeOffset, ClientMessage_MessageTypeLength) if err != nil { log.Errorf("Could not deserialize field MessageType with error: %v", err) return err } clientMessage.SchemaVersion, err = getUInteger(log, input, ClientMessage_SchemaVersionOffset) if err != nil { log.Errorf("Could not deserialize field SchemaVersion with error: %v", err) return err } clientMessage.CreatedDate, err = getULong(log, input, ClientMessage_CreatedDateOffset) if err != nil { log.Errorf("Could not deserialize field CreatedDate with error: %v", err) return err } clientMessage.SequenceNumber, err = getLong(log, input, ClientMessage_SequenceNumberOffset) if err != nil { log.Errorf("Could not deserialize field SequenceNumber with error: %v", err) return err } clientMessage.Flags, err = getULong(log, input, ClientMessage_FlagsOffset) if err != nil { log.Errorf("Could not deserialize field Flags with error: %v", err) return err } clientMessage.MessageId, err = getUuid(log, input, ClientMessage_MessageIdOffset) if err != nil { log.Errorf("Could not deserialize field MessageId with error: %v", err) return err } clientMessage.PayloadDigest, err = getBytes(log, input, ClientMessage_PayloadDigestOffset, ClientMessage_PayloadDigestLength) if err != nil { log.Errorf("Could not deserialize field PayloadDigest with error: %v", err) return err } clientMessage.PayloadType, err = getUInteger(log, input, ClientMessage_PayloadTypeOffset) if err != nil { log.Errorf("Could not deserialize field PayloadType with error: %v", err) return err } clientMessage.PayloadLength, err = getUInteger(log, input, ClientMessage_PayloadLengthOffset) headerLength, herr := getUInteger(log, input, ClientMessage_HLOffset) if herr != nil { log.Errorf("Could not deserialize field HeaderLength with error: %v", err) return err } clientMessage.HeaderLength = headerLength clientMessage.Payload = input[headerLength+ClientMessage_PayloadLengthLength:] return err } // getString get a string value from the byte array starting from the specified offset to the defined length. func getString(log log.T, byteArray []byte, offset int, stringLength int) (result string, err error) { byteArrayLength := len(byteArray) if offset > byteArrayLength-1 || offset+stringLength-1 > byteArrayLength-1 || offset < 0 { log.Error("getString failed: Offset is invalid.") return "", errors.New("Offset is outside the byte array.") } //remove nulls from the bytes array b := bytes.Trim(byteArray[offset:offset+stringLength], "\x00") return strings.TrimSpace(string(b)), nil } // getUInteger gets an unsigned integer func getUInteger(log log.T, byteArray []byte, offset int) (result uint32, err error) { var temp int32 temp, err = getInteger(log, byteArray, offset) return uint32(temp), err } // getInteger gets an integer value from a byte array starting from the specified offset. func getInteger(log log.T, byteArray []byte, offset int) (result int32, err error) { byteArrayLength := len(byteArray) if offset > byteArrayLength-1 || offset+4 > byteArrayLength || offset < 0 { log.Error("getInteger failed: Offset is invalid.") return 0, errors.New("Offset is bigger than the byte array.") } return bytesToInteger(log, byteArray[offset:offset+4]) } // bytesToInteger gets an integer from a byte array. func bytesToInteger(log log.T, input []byte) (result int32, err error) { var res int32 inputLength := len(input) if inputLength != 4 { log.Error("bytesToInteger failed: input array size is not equal to 4.") return 0, errors.New("Input array size is not equal to 4.") } buf := bytes.NewBuffer(input) binary.Read(buf, binary.BigEndian, &res) return res, nil } // getULong gets an unsigned long integer func getULong(log log.T, byteArray []byte, offset int) (result uint64, err error) { var temp int64 temp, err = getLong(log, byteArray, offset) return uint64(temp), err } // getLong gets a long integer value from a byte array starting from the specified offset. 64 bit. func getLong(log log.T, byteArray []byte, offset int) (result int64, err error) { byteArrayLength := len(byteArray) if offset > byteArrayLength-1 || offset+8 > byteArrayLength || offset < 0 { log.Error("getLong failed: Offset is invalid.") return 0, errors.New("Offset is outside the byte array.") } return bytesToLong(log, byteArray[offset:offset+8]) } // bytesToLong gets a Long integer from a byte array. func bytesToLong(log log.T, input []byte) (result int64, err error) { var res int64 inputLength := len(input) if inputLength != 8 { log.Error("bytesToLong failed: input array size is not equal to 8.") return 0, errors.New("Input array size is not equal to 8.") } buf := bytes.NewBuffer(input) binary.Read(buf, binary.BigEndian, &res) return res, nil } // getUuid gets the 128bit uuid from an array of bytes starting from the offset. func getUuid(log log.T, byteArray []byte, offset int) (result uuid.UUID, err error) { byteArrayLength := len(byteArray) if offset > byteArrayLength-1 || offset+16-1 > byteArrayLength-1 || offset < 0 { log.Error("getUuid failed: Offset is invalid.") return nil, errors.New("Offset is outside the byte array.") } leastSignificantLong, err := getLong(log, byteArray, offset) if err != nil { log.Error("getUuid failed: failed to get uuid LSBs Long value.") return nil, errors.New("Failed to get uuid LSBs long value.") } leastSignificantBytes, err := longToBytes(log, leastSignificantLong) if err != nil { log.Error("getUuid failed: failed to get uuid LSBs bytes value.") return nil, errors.New("Failed to get uuid LSBs bytes value.") } mostSignificantLong, err := getLong(log, byteArray, offset+8) if err != nil { log.Error("getUuid failed: failed to get uuid MSBs Long value.") return nil, errors.New("Failed to get uuid MSBs long value.") } mostSignificantBytes, err := longToBytes(log, mostSignificantLong) if err != nil { log.Error("getUuid failed: failed to get uuid MSBs bytes value.") return nil, errors.New("Failed to get uuid MSBs bytes value.") } uuidBytes := append(mostSignificantBytes, leastSignificantBytes...) return uuid.New(uuidBytes), nil } // longToBytes gets bytes array from a long integer. func longToBytes(log log.T, input int64) (result []byte, err error) { buf := new(bytes.Buffer) binary.Write(buf, binary.BigEndian, input) if buf.Len() != 8 { log.Error("longToBytes failed: buffer output length is not equal to 8.") return make([]byte, 8), errors.New("Input array size is not equal to 8.") } return buf.Bytes(), nil } // getBytes gets an array of bytes starting from the offset. func getBytes(log log.T, byteArray []byte, offset int, byteLength int) (result []byte, err error) { byteArrayLength := len(byteArray) if offset > byteArrayLength-1 || offset+byteLength-1 > byteArrayLength-1 || offset < 0 { log.Error("getBytes failed: Offset is invalid.") return make([]byte, byteLength), errors.New("Offset is outside the byte array.") } return byteArray[offset : offset+byteLength], nil } // Validate returns error if the message is invalid func (clientMessage *ClientMessage) Validate() error { if StartPublicationMessage == clientMessage.MessageType || PausePublicationMessage == clientMessage.MessageType { return nil } if clientMessage.HeaderLength == 0 { return errors.New("HeaderLength cannot be zero") } if clientMessage.MessageType == "" { return errors.New("MessageType is missing") } if clientMessage.CreatedDate == 0 { return errors.New("CreatedDate is missing") } if clientMessage.PayloadLength != 0 { hasher := sha256.New() hasher.Write(clientMessage.Payload) if !bytes.Equal(hasher.Sum(nil), clientMessage.PayloadDigest) { return errors.New("payload Hash is not valid") } } return nil } // SerializeClientMessage serializes ClientMessage message into a byte array. // * Payload is a variable length byte data. // * | HL| MessageType |Ver| CD | Seq | Flags | // * | MessageId | Digest |PayType| PayLen| // * | Payload | func (clientMessage *ClientMessage) SerializeClientMessage(log log.T) (result []byte, err error) { payloadLength := uint32(len(clientMessage.Payload)) headerLength := uint32(ClientMessage_PayloadLengthOffset) // Set payload length clientMessage.PayloadLength = payloadLength totalMessageLength := headerLength + ClientMessage_PayloadLengthLength + payloadLength result = make([]byte, totalMessageLength) err = putUInteger(log, result, ClientMessage_HLOffset, headerLength) if err != nil { log.Errorf("Could not serialize HeaderLength with error: %v", err) return make([]byte, 1), err } startPosition := ClientMessage_MessageTypeOffset endPosition := ClientMessage_MessageTypeOffset + ClientMessage_MessageTypeLength - 1 err = putString(log, result, startPosition, endPosition, clientMessage.MessageType) if err != nil { log.Errorf("Could not serialize MessageType with error: %v", err) return make([]byte, 1), err } err = putUInteger(log, result, ClientMessage_SchemaVersionOffset, clientMessage.SchemaVersion) if err != nil { log.Errorf("Could not serialize SchemaVersion with error: %v", err) return make([]byte, 1), err } err = putULong(log, result, ClientMessage_CreatedDateOffset, clientMessage.CreatedDate) if err != nil { log.Errorf("Could not serialize CreatedDate with error: %v", err) return make([]byte, 1), err } err = putLong(log, result, ClientMessage_SequenceNumberOffset, clientMessage.SequenceNumber) if err != nil { log.Errorf("Could not serialize SequenceNumber with error: %v", err) return make([]byte, 1), err } err = putULong(log, result, ClientMessage_FlagsOffset, clientMessage.Flags) if err != nil { log.Errorf("Could not serialize Flags with error: %v", err) return make([]byte, 1), err } err = putUuid(log, result, ClientMessage_MessageIdOffset, clientMessage.MessageId) if err != nil { log.Errorf("Could not serialize MessageId with error: %v", err) return make([]byte, 1), err } hasher := sha256.New() hasher.Write(clientMessage.Payload) startPosition = ClientMessage_PayloadDigestOffset endPosition = ClientMessage_PayloadDigestOffset + ClientMessage_PayloadDigestLength - 1 err = putBytes(log, result, startPosition, endPosition, hasher.Sum(nil)) if err != nil { log.Errorf("Could not serialize PayloadDigest with error: %v", err) return make([]byte, 1), err } err = putUInteger(log, result, ClientMessage_PayloadTypeOffset, clientMessage.PayloadType) if err != nil { log.Errorf("Could not serialize PayloadType with error: %v", err) return make([]byte, 1), err } err = putUInteger(log, result, ClientMessage_PayloadLengthOffset, clientMessage.PayloadLength) if err != nil { log.Errorf("Could not serialize PayloadLength with error: %v", err) return make([]byte, 1), err } startPosition = ClientMessage_PayloadOffset endPosition = ClientMessage_PayloadOffset + int(payloadLength) - 1 err = putBytes(log, result, startPosition, endPosition, clientMessage.Payload) if err != nil { log.Errorf("Could not serialize Payload with error: %v", err) return make([]byte, 1), err } return result, nil } // putUInteger puts an unsigned integer func putUInteger(log log.T, byteArray []byte, offset int, value uint32) (err error) { return putInteger(log, byteArray, offset, int32(value)) } // putInteger puts an integer value to a byte array starting from the specified offset. func putInteger(log log.T, byteArray []byte, offset int, value int32) (err error) { byteArrayLength := len(byteArray) if offset > byteArrayLength-1 || offset+4 > byteArrayLength || offset < 0 { log.Error("putInteger failed: Offset is invalid.") return errors.New("Offset is outside the byte array.") } bytes, err := integerToBytes(log, value) if err != nil { log.Error("putInteger failed: getBytesFromInteger Failed.") return err } copy(byteArray[offset:offset+4], bytes) return nil } // integerToBytes gets bytes array from an integer. func integerToBytes(log log.T, input int32) (result []byte, err error) { buf := new(bytes.Buffer) binary.Write(buf, binary.BigEndian, input) if buf.Len() != 4 { log.Error("integerToBytes failed: buffer output length is not equal to 4.") return make([]byte, 4), errors.New("Input array size is not equal to 4.") } return buf.Bytes(), nil } // putString puts a string value to a byte array starting from the specified offset. func putString(log log.T, byteArray []byte, offsetStart int, offsetEnd int, inputString string) (err error) { byteArrayLength := len(byteArray) if offsetStart > byteArrayLength-1 || offsetEnd > byteArrayLength-1 || offsetStart > offsetEnd || offsetStart < 0 { log.Error("putString failed: Offset is invalid.") return errors.New("Offset is outside the byte array.") } if offsetEnd-offsetStart+1 < len(inputString) { log.Error("putString failed: Not enough space to save the string.") return errors.New("Not enough space to save the string.") } // wipe out the array location first and then insert the new value. for i := offsetStart; i <= offsetEnd; i++ { byteArray[i] = ' ' } copy(byteArray[offsetStart:offsetEnd+1], inputString) return nil } // putBytes puts bytes into the array at the correct offset. func putBytes(log log.T, byteArray []byte, offsetStart int, offsetEnd int, inputBytes []byte) (err error) { byteArrayLength := len(byteArray) if offsetStart > byteArrayLength-1 || offsetEnd > byteArrayLength-1 || offsetStart > offsetEnd || offsetStart < 0 { log.Error("putBytes failed: Offset is invalid.") return errors.New("Offset is outside the byte array.") } if offsetEnd-offsetStart+1 != len(inputBytes) { log.Error("putBytes failed: Not enough space to save the bytes.") return errors.New("Not enough space to save the bytes.") } copy(byteArray[offsetStart:offsetEnd+1], inputBytes) return nil } // putUuid puts the 128 bit uuid to an array of bytes starting from the offset. func putUuid(log log.T, byteArray []byte, offset int, input uuid.UUID) (err error) { if input == nil { log.Error("putUuid failed: input is null.") return errors.New("putUuid failed: input is null.") } byteArrayLength := len(byteArray) if offset > byteArrayLength-1 || offset+16-1 > byteArrayLength-1 || offset < 0 { log.Error("putUuid failed: Offset is invalid.") return errors.New("Offset is outside the byte array.") } leastSignificantLong, err := bytesToLong(log, input.Bytes()[8:16]) if err != nil { log.Error("putUuid failed: Failed to get leastSignificant Long value.") return errors.New("Failed to get leastSignificant Long value.") } mostSignificantLong, err := bytesToLong(log, input.Bytes()[0:8]) if err != nil { log.Error("putUuid failed: Failed to get mostSignificantLong Long value.") return errors.New("Failed to get mostSignificantLong Long value.") } err = putLong(log, byteArray, offset, leastSignificantLong) if err != nil { log.Error("putUuid failed: Failed to put leastSignificantLong Long value.") return errors.New("Failed to put leastSignificantLong Long value.") } err = putLong(log, byteArray, offset+8, mostSignificantLong) if err != nil { log.Error("putUuid failed: Failed to put mostSignificantLong Long value.") return errors.New("Failed to put mostSignificantLong Long value.") } return nil } // putLong puts a long integer value to a byte array starting from the specified offset. func putLong(log log.T, byteArray []byte, offset int, value int64) (err error) { byteArrayLength := len(byteArray) if offset > byteArrayLength-1 || offset+8 > byteArrayLength || offset < 0 { log.Error("putInteger failed: Offset is invalid.") return errors.New("Offset is outside the byte array.") } mbytes, err := longToBytes(log, value) if err != nil { log.Error("putInteger failed: getBytesFromInteger Failed.") return err } copy(byteArray[offset:offset+8], mbytes) return nil } // putULong puts an unsigned long integer. func putULong(log log.T, byteArray []byte, offset int, value uint64) (err error) { return putLong(log, byteArray, offset, int64(value)) } // SerializeClientMessagePayload marshals payloads for all session specific messages into bytes. func SerializeClientMessagePayload(log log.T, obj interface{}) (reply []byte, err error) { reply, err = json.Marshal(obj) if err != nil { log.Errorf("Could not serialize message with err: %s", err) } return } // SerializeClientMessageWithAcknowledgeContent marshals client message with payloads of acknowledge contents into bytes. func SerializeClientMessageWithAcknowledgeContent(log log.T, acknowledgeContent AcknowledgeContent) (reply []byte, err error) { acknowledgeContentBytes, err := SerializeClientMessagePayload(log, acknowledgeContent) if err != nil { // should not happen log.Errorf("Cannot marshal acknowledge content to json string: %v", acknowledgeContentBytes) return } uuid.SwitchFormat(uuid.CleanHyphen) messageId := uuid.NewV4() clientMessage := ClientMessage{ MessageType: AcknowledgeMessage, SchemaVersion: 1, CreatedDate: uint64(time.Now().UnixNano() / 1000000), SequenceNumber: 0, Flags: 3, MessageId: messageId, Payload: acknowledgeContentBytes, } reply, err = clientMessage.SerializeClientMessage(log) if err != nil { log.Errorf("Error serializing client message with acknowledge content err: %v", err) } return } // DeserializeDataStreamAcknowledgeContent parses acknowledge content from payload of ClientMessage. func (clientMessage *ClientMessage) DeserializeDataStreamAcknowledgeContent(log log.T) (dataStreamAcknowledge AcknowledgeContent, err error) { if clientMessage.MessageType != AcknowledgeMessage { err = fmt.Errorf("ClientMessage is not of type AcknowledgeMessage. Found message type: %s", clientMessage.MessageType) return } err = json.Unmarshal(clientMessage.Payload, &dataStreamAcknowledge) if err != nil { log.Errorf("Could not deserialize rawMessage: %s", err) } return } // DeserializeChannelClosedMessage parses channelClosed message from payload of ClientMessage. func (clientMessage *ClientMessage) DeserializeChannelClosedMessage(log log.T) (channelClosed ChannelClosed, err error) { if clientMessage.MessageType != ChannelClosedMessage { err = fmt.Errorf("ClientMessage is not of type ChannelClosed. Found message type: %s", clientMessage.MessageType) return } err = json.Unmarshal(clientMessage.Payload, &channelClosed) if err != nil { log.Errorf("Could not deserialize rawMessage: %s", err) } return } func (clientMessage *ClientMessage) DeserializeHandshakeRequest(log log.T) (handshakeRequest HandshakeRequestPayload, err error) { if clientMessage.PayloadType != uint32(HandshakeRequestPayloadType) { err = log.Errorf("ClientMessage PayloadType is not of type HandshakeRequestPayloadType. Found payload type: %d", clientMessage.PayloadType) return } err = json.Unmarshal(clientMessage.Payload, &handshakeRequest) if err != nil { log.Errorf("Could not deserialize rawMessage: %s", err) } return } func (clientMessage *ClientMessage) DeserializeHandshakeComplete(log log.T) (handshakeComplete HandshakeCompletePayload, err error) { if clientMessage.PayloadType != uint32(HandshakeCompletePayloadType) { err = log.Errorf("ClientMessage PayloadType is not of type HandshakeCompletePayloadType. Found payload type: %d", clientMessage.PayloadType) return } err = json.Unmarshal(clientMessage.Payload, &handshakeComplete) if err != nil { log.Errorf("Could not deserialize rawMessage, %s : %s", clientMessage.Payload, err) } return }
572
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // message package defines data channel messages structure. package message import ( "crypto/sha256" "encoding/json" "fmt" "reflect" "strconv" "strings" "testing" "time" "github.com/aws/session-manager-plugin/src/log" "github.com/stretchr/testify/assert" "github.com/twinj/uuid" ) type EXPECTATION int const ( SUCCESS EXPECTATION = iota ERROR ) func getNByteBuffer(n int) []byte { return make([]byte, n) } // Default generator for smaller data types e.g. strings, integers func get8ByteBuffer() []byte { return getNByteBuffer(8) } // Default generator for UUID func get16ByteBuffer() []byte { return getNByteBuffer(16) } var ( mockLogger = log.NewMockLog() defaultByteBufferGenerator = get8ByteBuffer messageId = "dd01e56b-ff48-483e-a508-b5f073f31b16" messageType = InputStreamMessage schemaVersion = uint32(1) createdDate = uint64(1503434274948) destinationId = "destination-id" actionType = "start" payload = []byte("payload") defaultUuid = "dd01e56b-ff48-483e-a508-b5f073f31b16" ackMessagePayload = []byte(fmt.Sprintf( `{ "AcknowledgedMessageType": "%s", "AcknowledgedMessageId":"%s" }`, AcknowledgeMessage, messageId)) channelClosedPayload = []byte(fmt.Sprintf( `{ "MessageType": "%s", "MessageId": "%s", "CreatedDate": "%s", "SessionId": "%s", "SchemaVersion": %s, "Output": "%s" }`, ChannelClosedMessage, messageId, strconv.FormatUint(createdDate, 10), sessionId, fmt.Sprint(schemaVersion), string(payload), )) handshakeReqPayload = []byte(fmt.Sprintf( `{ "AgentVersion": "%s", "RequestedClientActions": [ { "ActionType": "%s", "ActionParameters": %s } ] }`, agentVersion, actionType, sampleParameters, )) handshakeCompletePayload = []byte(fmt.Sprintf( `{ "HandshakeTimeToComplete": %d, "CustomerMessage": "%s" }`, timeToComplete, customerMessage, )) timeToComplete = 1000000 customerMessage = "Handshake Complete" sampleParameters = "{\"name\": \"richard\"}" sequenceNumber = int64(2) agentVersion = "3.0" sessionId = "sessionId_01234567890abcedf" ) type TestParams struct { name string expectation EXPECTATION byteArray []byte offsetStart int offsetEnd int input interface{} expected interface{} } func TestPutString(t *testing.T) { t.Logf("Starting test suite: %s", t.Name()) testCases := []TestParams{ { "Basic", SUCCESS, defaultByteBufferGenerator(), 0, 7, "hello", "hello", }, { "Basic offset", SUCCESS, defaultByteBufferGenerator(), 1, 7, "hello", "hello", }, { "Bad offset", ERROR, defaultByteBufferGenerator(), -1, 7, "hello", "Offset is outside", }, { "Data too long for buffer", ERROR, defaultByteBufferGenerator(), 0, 7, "longinputstring", "Not enough space", }, } for _, tc := range testCases { testString := fmt.Sprintf("Running test case: %s", tc.name) t.Run(testString, func(t *testing.T) { // Asserting type as string for input strInput, ok := tc.input.(string) assert.True(t, ok, "Type assertion failed in %s:%s", t.Name(), tc.name) err := putString( mockLogger, tc.byteArray, tc.offsetStart, tc.offsetEnd, strInput) if tc.expectation == SUCCESS { assert.Nil(t, err, "%s:%s threw an error when no error was expected.", t.Name(), tc.name) assert.Contains(t, string(tc.byteArray), tc.expected) } else if tc.expectation == ERROR { assert.Error(t, err, "%s:%s did not throw an error when an error was expected.", t.Name(), tc.name) assert.Contains(t, err.Error(), tc.expected, "%s:%s does not contain the intended message. Expected: \"%s\", Actual: \"%s\"", tc.expected, err) } else { t.Fatal("Test expectation was not correctly set.") } }) } } func TestPutBytes(t *testing.T) { t.Logf("Starting test suite: %s", t.Name()) testCases := []TestParams{ { "Basic", SUCCESS, defaultByteBufferGenerator(), 0, 3, []byte{0x22, 0x55, 0xff, 0x22}, []byte{0x22, 0x55, 0xff, 0x22, 0x00, 0x00, 0x00, 0x00}, }, { "Basic offset", SUCCESS, defaultByteBufferGenerator(), 1, 4, []byte{0x22, 0x55, 0xff, 0x22}, []byte{0x00, 0x22, 0x55, 0xff, 0x22, 0x00, 0x00, 0x00}, }, { "Bad offset", ERROR, defaultByteBufferGenerator(), -1, 7, []byte{0x22, 0x55, 0x00, 0x22}, "Offset is outside", }, { "Data too long for buffer", ERROR, defaultByteBufferGenerator(), 0, 2, []byte{0x22, 0x55, 0x00, 0x22}, "Not enough space", }, } for _, tc := range testCases { testString := fmt.Sprintf("Running test case: %s", tc.name) t.Run(testString, func(t *testing.T) { // Assert type as byte array byteInput, ok := tc.input.([]byte) assert.True(t, ok, "Type assertion failed in %s:%s", t.Name(), tc.name) err := putBytes( mockLogger, tc.byteArray, tc.offsetStart, tc.offsetEnd, byteInput) if tc.expectation == SUCCESS { assert.Nil(t, err, "%s:%s threw an error when no error was expected.", t.Name(), tc.name) assert.True(t, reflect.DeepEqual(tc.byteArray, tc.expected)) } else if tc.expectation == ERROR { assert.Error(t, err, "%s:%s did not throw an error when an error was expected.", t.Name(), tc.name) assert.Contains(t, err.Error(), tc.expected, "%s:%s does not contain the intended message. Expected: \"%s\", Actual: \"%s\"", tc.expected, err) } else { t.Fatal("Test expectation was not correctly set.") } }) } } func TestLongToBytes(t *testing.T) { t.Logf("Starting test suite: %s", t.Name()) testcases := []struct { name string expectation EXPECTATION input int64 expected interface{} }{ { "Basic", SUCCESS, 5747283, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x57, 0xb2, 0x53}, }, } for _, tc := range testcases { testString := fmt.Sprintf("Running test case: %s", tc.name) t.Run(testString, func(t *testing.T) { bytes, err := longToBytes(mockLogger, tc.input) if tc.expectation == SUCCESS { assert.Nil(t, err, "An error was thrown when none was expected.") assert.True(t, reflect.DeepEqual(bytes, tc.expected)) } else if tc.expectation == ERROR { assert.Error(t, err, "No error was thrown when one was expected.") assert.Contains(t, err, tc.expected) } }) } } func TestPutLong(t *testing.T) { t.Logf("Starting test suite: %s", t.Name()) // OffsetEnd is not used in PutLong: Long is always 8-bytes testCases := []TestParams{ { "Basic", SUCCESS, getNByteBuffer(9), 0, 0, 5747283, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x57, 0xb2, 0x53, 0x00}, }, { "Basic offset", SUCCESS, getNByteBuffer(10), 1, 0, 92837273, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x88, 0x95, 0x99, 0x00}, }, { "Exact offset", SUCCESS, defaultByteBufferGenerator(), 0, 0, 50, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x32}, }, { "Exact offset +1", ERROR, defaultByteBufferGenerator(), 1, 0, 50, "Offset is outside", }, { "Negative offset", ERROR, getNByteBuffer(9), -1, 0, 5748, "Offset is outside", }, { "Offset out of bounds", ERROR, getNByteBuffer(4), 10, 0, 938283, "Offset is outside", }, } for _, tc := range testCases { testString := fmt.Sprintf("Running test case: %s", tc.name) t.Run(testString, func(t *testing.T) { // Assert type as long int longInput, ok := tc.input.(int) assert.True(t, reflect.DeepEqual(tc.input, longInput), "Cast went wrong. Expected: %v, Got: %v", tc.input, longInput) assert.True(t, ok, "Type assertion failed in %s:%s", t.Name(), tc.name) err := putLong( mockLogger, tc.byteArray, tc.offsetStart, int64(longInput)) if tc.expectation == SUCCESS { assert.Nil(t, err, "%s:%s threw an error when no error was expected.", t.Name(), tc.name) assert.Equal(t, tc.expected, tc.byteArray) } else if tc.expectation == ERROR { assert.Error(t, err, "%s:%s did not throw an error when an error was expected.", t.Name(), tc.name) assert.Contains(t, err.Error(), tc.expected, "%s:%s does not contain the intended message. Expected: \"%s\", Actual: \"%s\"", tc.expected, err) } else { t.Fatal("Test expectation was not correctly set.") } }) } } func TestPutInteger(t *testing.T) { t.Logf("Starting test suite: %s", t.Name()) // OffsetEnd is not used in PutInt: Int is always 4-bytes testCases := []TestParams{ { "Basic", SUCCESS, getNByteBuffer(5), 0, 0, 324, []byte{0x00, 0x00, 0x01, 0x44, 0x00}, }, { "Basic offset", SUCCESS, defaultByteBufferGenerator(), 1, 0, 520392, []byte{0x00, 0x00, 0x07, 0xf0, 0xc8, 0x00, 0x00, 0x00}, }, { "Exact offset", SUCCESS, getNByteBuffer(4), 0, 0, 50, []byte{0x00, 0x00, 0x00, 0x32}, }, { "Exact offset +1", ERROR, defaultByteBufferGenerator(), 5, 0, 50, "Offset is outside", }, { "Negative offset", ERROR, getNByteBuffer(9), -1, 0, 5748, "Offset is outside", }, { "Offset out of bounds", ERROR, getNByteBuffer(4), 10, 0, 938283, "Offset is outside", }, } for _, tc := range testCases { testString := fmt.Sprintf("Running test case: %s", tc.name) t.Run(testString, func(t *testing.T) { // Assert type as long int intInput, ok := tc.input.(int) assert.True(t, reflect.DeepEqual(tc.input, intInput), "Cast went wrong. Expected: %v, Got: %v", tc.input, intInput) assert.True(t, ok, "Type assertion failed in %s:%s", t.Name(), tc.name) err := putInteger( mockLogger, tc.byteArray, tc.offsetStart, int32(intInput)) if tc.expectation == SUCCESS { assert.Nil(t, err, "%s:%s threw an error when no error was expected.", t.Name(), tc.name) assert.Equal(t, tc.expected, tc.byteArray) } else if tc.expectation == ERROR { assert.Error(t, err, "%s:%s did not throw an error when an error was expected.", t.Name(), tc.name) assert.Contains(t, err.Error(), tc.expected, "%s:%s does not contain the intended message. Expected: \"%s\", Actual: \"%s\"", tc.expected, err) } else { t.Fatal("Test expectation was not correctly set.") } }) } } func TestGetString(t *testing.T) { t.Logf("Starting test suite: %s", t.Name()) // For GetString, the test parameter "offsetEnd" is used to indicate the length of the string to be read. testCases := []TestParams{ { "Basic", SUCCESS, []byte{0x72, 0x77, 0x00}, 0, 2, nil, "rw", }, { "Basic offset", SUCCESS, []byte{0x00, 0x00, 0x72, 0x77, 0x00}, 2, 2, nil, "rw", }, { "Negative offset", ERROR, getNByteBuffer(9), -1, 0, nil, "Offset is outside", }, { "Offset out of bounds", ERROR, getNByteBuffer(4), 10, 2, nil, "Offset is outside", }, } for _, tc := range testCases { testString := fmt.Sprintf("Running test case: %s", tc.name) t.Run(testString, func(t *testing.T) { strOut, err := getString( mockLogger, tc.byteArray, tc.offsetStart, tc.offsetEnd) if tc.expectation == SUCCESS { assert.Nil(t, err, "%s:%s threw an error when no error was expected.", t.Name(), tc.name) assert.Equal(t, tc.expected, strOut) } else if tc.expectation == ERROR { assert.Error(t, err, "%s:%s did not throw an error when an error was expected.", t.Name(), tc.name) assert.Contains(t, err.Error(), tc.expected, "%s:%s does not contain the intended message. Expected: \"%s\", Actual: \"%s\"", tc.expected, err) } else { t.Fatal("Test expectation was not correctly set.") } }) } } func TestGetBytes(t *testing.T) { t.Logf("Starting test suite: %s", t.Name()) // For GetBytes, the test parameter "offsetEnd" is used to indicate the length of the bytes to be read. testCases := []TestParams{ { "Basic", SUCCESS, []byte{0x72, 0x77, 0x00}, 0, 2, nil, []byte{0x72, 0x77}, }, { "Basic offset", SUCCESS, []byte{0x00, 0x00, 0x72, 0x77, 0x00}, 2, 2, nil, []byte{0x72, 0x77}, }, { "Negative offset", ERROR, defaultByteBufferGenerator(), -1, 0, nil, "Offset is outside", }, { "Offset out of bounds", ERROR, getNByteBuffer(4), 10, 2, nil, "Offset is outside", }, } for _, tc := range testCases { testString := fmt.Sprintf("Running test case: %s", tc.name) t.Run(testString, func(t *testing.T) { byteOut, err := getBytes( mockLogger, tc.byteArray, tc.offsetStart, tc.offsetEnd) if tc.expectation == SUCCESS { assert.Nil(t, err, "%s:%s threw an error when no error was expected.", t.Name(), tc.name) assert.Equal(t, tc.expected, byteOut) } else if tc.expectation == ERROR { assert.Error(t, err, "%s:%s did not throw an error when an error was expected.", t.Name(), tc.name) assert.Contains(t, err.Error(), tc.expected, "%s:%s does not contain the intended message. Expected: \"%s\", Actual: \"%s\"", tc.expected, err) } else { t.Fatal("Test expectation was not correctly set.") } }) } } func TestGetLong(t *testing.T) { t.Logf("Starting test suite: %s", t.Name()) // For GetLong, effsetEnd is not used as a test parameter. testCases := []TestParams{ { "Basic", SUCCESS, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x05, 0x66, 0x00}, 0, 0, nil, 5899622, }, { "Basic offset", SUCCESS, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x05, 0x6a, 0x00}, 2, 0, nil, 5899626, }, { "Exact offset", SUCCESS, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x32}, 0, 0, nil, 50, }, { "Exact offset +1", ERROR, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, 1, 0, nil, "Offset is outside", }, { "Negative offset", ERROR, getNByteBuffer(9), -1, 0, nil, "Offset is outside", }, { "Offset out of bounds", ERROR, getNByteBuffer(4), 10, 2, nil, "Offset is outside", }, } for _, tc := range testCases { testString := fmt.Sprintf("Running test case: %s", tc.name) t.Run(testString, func(t *testing.T) { longOut, err := getLong( mockLogger, tc.byteArray, tc.offsetStart) assert.IsType(t, int64(1), longOut, "Returned value is not the correct type.") if tc.expectation == SUCCESS { expectedInt := tc.expected.(int) expectedLong := int64(expectedInt) assert.Nil(t, err, "%s:%s threw an error when no error was expected.", t.Name(), tc.name) assert.Equal(t, expectedLong, longOut) } else if tc.expectation == ERROR { assert.Error(t, err, "%s:%s did not throw an error when an error was expected.", t.Name(), tc.name) assert.Contains(t, err.Error(), tc.expected, "%s:%s does not contain the intended message. Expected: \"%s\", Actual: \"%s\"", tc.expected, err) } else { t.Fatal("Test expectation was not correctly set.") } }) } } func TestClientMessage_Validate(t *testing.T) { u, _ := uuid.Parse(messageId) clientMessage := ClientMessage{ SchemaVersion: schemaVersion, SequenceNumber: 1, Flags: 2, MessageId: u, Payload: payload, PayloadLength: 3, } err := clientMessage.Validate() assert.Error(t, err, "No error was thrown when one was expected.") assert.Contains(t, err.Error(), "HeaderLength cannot be zero") clientMessage.HeaderLength = 1 err = clientMessage.Validate() assert.Error(t, err, "No error was thrown when one was expected.") assert.Contains(t, err.Error(), "MessageType is missing") clientMessage.MessageType = messageType err = clientMessage.Validate() assert.Error(t, err, "No error was thrown when one was expected.") assert.Contains(t, err.Error(), "CreatedDate is missing") clientMessage.CreatedDate = createdDate err = clientMessage.Validate() assert.Error(t, err, "No error was thrown when one was expected.") assert.Contains(t, err.Error(), "payload Hash is not valid") hasher := sha256.New() hasher.Write(payload) clientMessage.PayloadDigest = hasher.Sum(nil) err = clientMessage.Validate() assert.NoError(t, err, "An error was thrown when none was expected.") } func TestClientMessage_ValidateStartPublicationMessage(t *testing.T) { u, _ := uuid.Parse(messageId) clientMessage := ClientMessage{ SchemaVersion: schemaVersion, SequenceNumber: 1, Flags: 2, MessageId: u, Payload: payload, PayloadLength: 3, MessageType: StartPublicationMessage, } err := clientMessage.Validate() assert.NoError(t, err, "Validating StartPublicationMessage should not throw an error") } func TestClientMessage_DeserializeDataStreamAcknowledgeContent(t *testing.T) { t.Logf("Starting test: %s", t.Name()) // ClientMessage is initialized with improperly formatted json data testMessage := ClientMessage{ Payload: payload, } ackMessage, err := testMessage.DeserializeDataStreamAcknowledgeContent(mockLogger) assert.Equal(t, AcknowledgeContent{}, ackMessage) assert.NotNil(t, err, "An error was not thrown when one was expected.") testMessage.MessageType = AcknowledgeMessage ackMessage2, err := testMessage.DeserializeDataStreamAcknowledgeContent(mockLogger) assert.Equal(t, AcknowledgeContent{}, ackMessage2) assert.NotNil(t, err, "An error was not thrown when one was expected.") testMessage.Payload = ackMessagePayload ackMessage3, err := testMessage.DeserializeDataStreamAcknowledgeContent(mockLogger) assert.Equal(t, AcknowledgeMessage, ackMessage3.MessageType) assert.Equal(t, messageId, ackMessage3.MessageId) assert.Nil(t, err, "An error was thrown when one was not expected.") } func TestClientMessage_DeserializeChannelClosedMessage(t *testing.T) { t.Logf("Starting test: %s", t.Name()) // ClientMessage is initialized with improperly formatted json data testMessage := ClientMessage{ Payload: payload, } closeMessage, err := testMessage.DeserializeChannelClosedMessage(mockLogger) assert.Equal(t, ChannelClosed{}, closeMessage) assert.NotNil(t, err, "An error was not thrown when one was expected.") testMessage.MessageType = ChannelClosedMessage closeMessage2, err := testMessage.DeserializeChannelClosedMessage(mockLogger) assert.Equal(t, ChannelClosed{}, closeMessage2) assert.NotNil(t, err, "An error was not thrown when one was expected.") testMessage.Payload = channelClosedPayload closeMessage3, err := testMessage.DeserializeChannelClosedMessage(mockLogger) assert.Equal(t, ChannelClosedMessage, closeMessage3.MessageType) assert.Equal(t, messageId, closeMessage3.MessageId) assert.Equal(t, strconv.FormatUint(createdDate, 10), closeMessage3.CreatedDate) assert.Equal(t, int(schemaVersion), closeMessage3.SchemaVersion) assert.Equal(t, sessionId, closeMessage3.SessionId) assert.Equal(t, string(payload), closeMessage3.Output) assert.Nil(t, err, "An error was thrown when one was not expected.") } func TestClientMessage_DeserializeHandshakeRequest(t *testing.T) { t.Logf("Starting test: %s", t.Name()) // ClientMessage is initialized with improperly formatted json data testMessage := ClientMessage{ Payload: payload, } handshakeReq, err := testMessage.DeserializeHandshakeRequest(mockLogger) assert.Equal(t, HandshakeRequestPayload{}, handshakeReq) assert.NotNil(t, err, "An error was not thrown when one was expected.") testMessage.PayloadType = uint32(HandshakeRequestPayloadType) handshakeReq2, err := testMessage.DeserializeHandshakeRequest(mockLogger) assert.Equal(t, HandshakeRequestPayload{}, handshakeReq2) assert.NotNil(t, err, "An error was not thrown when one was expected.") testMessage.Payload = handshakeReqPayload handshakeReq3, err := testMessage.DeserializeHandshakeRequest(mockLogger) assert.Equal(t, agentVersion, handshakeReq3.AgentVersion) assert.Equal(t, ActionType(actionType), handshakeReq3.RequestedClientActions[0].ActionType) assert.Equal(t, json.RawMessage(sampleParameters), handshakeReq3.RequestedClientActions[0].ActionParameters) assert.Nil(t, err, "An error was thrown when one was not expected.") } func TestClientMessage_DeserializeHandshakeComplete(t *testing.T) { t.Logf("Starting test: %s", t.Name()) // ClientMessage is initialized with improperly formatted json data testMessage := ClientMessage{ Payload: payload, } handshakeComplete, err := testMessage.DeserializeHandshakeComplete(mockLogger) assert.Equal(t, HandshakeCompletePayload{}, handshakeComplete) assert.NotNil(t, err, "An error was not thrown when one was expected.") testMessage.PayloadType = uint32(HandshakeCompletePayloadType) handshakeComplete2, err := testMessage.DeserializeHandshakeComplete(mockLogger) assert.Equal(t, HandshakeCompletePayload{}, handshakeComplete2) assert.NotNil(t, err, "An error was not thrown when one was expected.") testMessage.Payload = handshakeCompletePayload handshakeComplete3, err := testMessage.DeserializeHandshakeComplete(mockLogger) assert.Equal(t, time.Duration(timeToComplete), handshakeComplete3.HandshakeTimeToComplete) assert.Equal(t, customerMessage, handshakeComplete3.CustomerMessage) assert.Nil(t, err, "An error was thrown when one was not expected.") } func TestPutUuid(t *testing.T) { t.Logf("Starting test suite: %s", t.Name()) // OffsetEnd is not used for putUuid as uuid are always 128-bit testCases := []TestParams{ { "Basic", SUCCESS, get16ByteBuffer(), 0, 0, defaultUuid, defaultUuid, }, { "Nil uuid", ERROR, get16ByteBuffer(), 0, 0, "00000000-0000-0000-0000-000000000000", "null", }, { "Bad offset", ERROR, defaultByteBufferGenerator(), 8, 0, defaultUuid, "Offset is outside", }, } for _, tc := range testCases { testString := fmt.Sprintf("Running test case: %s", tc.name) t.Run(testString, func(t *testing.T) { // Asserting type as string for input strInput, ok := tc.input.(string) assert.True(t, ok, "Type assertion failed in %s:%s", t.Name(), tc.name) // Get Uuid from string uuidInput, err := uuid.Parse(strInput) err = putUuid( mockLogger, tc.byteArray, tc.offsetStart, uuidInput) if tc.expectation == SUCCESS { assert.Nil(t, err, "%s:%s threw an error when no error was expected.", t.Name(), tc.name) strExpected := tc.expected.(string) uuidOut, _ := uuid.Parse(strExpected) expectedBuffer := get16ByteBuffer() putUuid(mockLogger, expectedBuffer, 0, uuidOut) assert.Equal(t, tc.byteArray, expectedBuffer) } else if tc.expectation == ERROR { assert.Error(t, err, "%s:%s did not throw an error when an error was expected.", t.Name(), tc.name) assert.Contains(t, err.Error(), tc.expected, "%s:%s does not contain the intended message. Expected: \"%s\", Actual: \"%s\"", tc.expected, err) } else { t.Fatal("Test expectation was not correctly set.") } }) } } func TestPutGetString(t *testing.T) { input := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x01} err1 := putString(log.NewMockLog(), input, 1, 8, "hello") assert.Nil(t, err1) result, err := getString(log.NewMockLog(), input, 1, 8) assert.Nil(t, err) assert.Equal(t, "hello", result) } func TestPutGetInteger(t *testing.T) { input := []byte{0x00, 0x00, 0x00, 0x00, 0xFF, 0x00} err := putInteger(log.NewMockLog(), input, 1, 256) assert.Nil(t, err) assert.Equal(t, byte(0x00), input[1]) assert.Equal(t, byte(0x00), input[2]) assert.Equal(t, byte(0x01), input[3]) assert.Equal(t, byte(0x00), input[4]) result, err2 := getInteger(log.NewMockLog(), input, 1) assert.Nil(t, err2) assert.Equal(t, int32(256), result) result2, err3 := getInteger(log.NewMockLog(), input, 2) assert.Equal(t, int32(65536), result2) assert.Nil(t, err3) result3, err4 := getInteger(mockLogger, input, 3) assert.Equal(t, int32(0), result3) assert.NotNil(t, err4) } func TestPutGetLong(t *testing.T) { input := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00} err := putLong(log.NewMockLog(), input, 1, 4294967296) // 2 to the 32 + 1 assert.Nil(t, err) assert.Equal(t, byte(0x00), input[1]) assert.Equal(t, byte(0x00), input[2]) assert.Equal(t, byte(0x00), input[3]) assert.Equal(t, byte(0x01), input[4]) assert.Equal(t, byte(0x00), input[5]) assert.Equal(t, byte(0x00), input[6]) assert.Equal(t, byte(0x00), input[7]) assert.Equal(t, byte(0x00), input[8]) testLong, err2 := getLong(log.NewMockLog(), input, 1) assert.Nil(t, err2) assert.Equal(t, int64(4294967296), testLong) } func TestGetBytesFromInteger(t *testing.T) { input := int32(256) result, err := integerToBytes(log.NewMockLog(), input) assert.Nil(t, err) assert.Equal(t, byte(0x00), result[0]) assert.Equal(t, byte(0x00), result[1]) assert.Equal(t, byte(0x01), result[2]) assert.Equal(t, byte(0x00), result[3]) } func TestSerializeAndDeserializeClientMessage(t *testing.T) { u, _ := uuid.Parse(messageId) clientMessage := ClientMessage{ MessageType: messageType, SchemaVersion: schemaVersion, CreatedDate: createdDate, SequenceNumber: 1, Flags: 2, MessageId: u, Payload: payload, } // Test SerializeClientMessage serializedBytes, err := clientMessage.SerializeClientMessage(log.NewMockLog()) assert.Nil(t, err, "Error serializing message") seralizedMessageType := strings.TrimRight(string(serializedBytes[ClientMessage_MessageTypeOffset:ClientMessage_MessageTypeOffset+ClientMessage_MessageTypeLength-1]), " ") assert.Equal(t, seralizedMessageType, messageType) serializedVersion, err := getUInteger(log.NewMockLog(), serializedBytes, ClientMessage_SchemaVersionOffset) assert.Nil(t, err) assert.Equal(t, serializedVersion, schemaVersion) serializedCD, err := getULong(log.NewMockLog(), serializedBytes, ClientMessage_CreatedDateOffset) assert.Nil(t, err) assert.Equal(t, serializedCD, createdDate) serializedSequence, err := getLong(log.NewMockLog(), serializedBytes, ClientMessage_SequenceNumberOffset) assert.Nil(t, err) assert.Equal(t, serializedSequence, int64(1)) serializedFlags, err := getULong(log.NewMockLog(), serializedBytes, ClientMessage_FlagsOffset) assert.Nil(t, err) assert.Equal(t, serializedFlags, uint64(2)) seralizedMessageId, err := getUuid(log.NewMockLog(), serializedBytes, ClientMessage_MessageIdOffset) assert.Nil(t, err) assert.Equal(t, seralizedMessageId.String(), messageId) serializedDigest, err := getBytes(log.NewMockLog(), serializedBytes, ClientMessage_PayloadDigestOffset, ClientMessage_PayloadDigestLength) assert.Nil(t, err) hasher := sha256.New() hasher.Write(clientMessage.Payload) expectedHash := hasher.Sum(nil) assert.True(t, reflect.DeepEqual(serializedDigest, expectedHash)) //Test DeserializeClientMessage deserializedClientMessage := &ClientMessage{} err = deserializedClientMessage.DeserializeClientMessage(log.NewMockLog(), serializedBytes) assert.Nil(t, err) assert.Equal(t, messageType, deserializedClientMessage.MessageType) assert.Equal(t, schemaVersion, deserializedClientMessage.SchemaVersion) assert.Equal(t, messageId, deserializedClientMessage.MessageId.String()) assert.Equal(t, createdDate, deserializedClientMessage.CreatedDate) assert.Equal(t, uint64(2), deserializedClientMessage.Flags) assert.Equal(t, int64(1), deserializedClientMessage.SequenceNumber) assert.True(t, reflect.DeepEqual(payload, deserializedClientMessage.Payload)) } func TestSerializeMessagePayloadNegative(t *testing.T) { var functionEx = func() {} _, err := SerializeClientMessagePayload(mockLogger, functionEx) assert.NotNil(t, err) } func TestSerializeAndDeserializeClientMessageWithAcknowledgeContent(t *testing.T) { acknowledgeContent := AcknowledgeContent{ MessageType: messageType, MessageId: messageId, SequenceNumber: sequenceNumber, IsSequentialMessage: true, } serializedClientMsg, err := SerializeClientMessageWithAcknowledgeContent(log.NewMockLog(), acknowledgeContent) deserializedClientMsg := &ClientMessage{} err = deserializedClientMsg.DeserializeClientMessage(log.NewMockLog(), serializedClientMsg) assert.Nil(t, err) deserializedAcknowledgeContent, err := deserializedClientMsg.DeserializeDataStreamAcknowledgeContent(log.NewMockLog()) assert.Nil(t, err) assert.Equal(t, messageType, deserializedAcknowledgeContent.MessageType) assert.Equal(t, messageId, deserializedAcknowledgeContent.MessageId) assert.Equal(t, sequenceNumber, deserializedAcknowledgeContent.SequenceNumber) assert.True(t, deserializedAcknowledgeContent.IsSequentialMessage) } func TestDeserializeAgentMessageWithChannelClosed(t *testing.T) { channelClosed := ChannelClosed{ MessageType: ChannelClosedMessage, MessageId: messageId, DestinationId: destinationId, SessionId: sessionId, SchemaVersion: 1, CreatedDate: "2018-01-01", } u, _ := uuid.Parse(messageId) channelClosedJson, err := json.Marshal(channelClosed) agentMessage := ClientMessage{ MessageType: ChannelClosedMessage, SchemaVersion: schemaVersion, CreatedDate: createdDate, SequenceNumber: 1, Flags: 2, MessageId: u, Payload: channelClosedJson, } deserializedChannelClosed, err := agentMessage.DeserializeChannelClosedMessage(log.NewMockLog()) assert.Nil(t, err) assert.Equal(t, ChannelClosedMessage, deserializedChannelClosed.MessageType) assert.Equal(t, messageId, deserializedChannelClosed.MessageId) assert.Equal(t, sessionId, deserializedChannelClosed.SessionId) assert.Equal(t, "destination-id", deserializedChannelClosed.DestinationId) }
1,065
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // retry implements back off retry strategy for reconnect web socket connection. package retry import ( "time" "github.com/aws/session-manager-plugin/src/log" ) const sleepConstant = 2 // Retry implements back off retry strategy for reconnect web socket connection. func Retry(log log.T, attempts int, sleep time.Duration, fn func() error) (err error) { log.Info("Retrying connection to channel") for attempts > 0 { attempts-- if err = fn(); err != nil { time.Sleep(sleep) sleep = sleep * sleepConstant log.Debugf("%v attempts to connect web socket connection.", attempts) continue } return nil } return err }
41
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // retry implements back off retry strategy for reconnect web socket connection. package retry import ( "math" "time" ) type Retryer interface { Call() error NextSleepTime(int32) time.Duration } type RepeatableExponentialRetryer struct { CallableFunc func() error GeometricRatio float64 InitialDelayInMilli int MaxDelayInMilli int MaxAttempts int } // NextSleepTime calculates the next delay of retry. func (retryer *RepeatableExponentialRetryer) NextSleepTime(attempt int) time.Duration { return time.Duration(float64(retryer.InitialDelayInMilli)*math.Pow(retryer.GeometricRatio, float64(attempt))) * time.Millisecond } // Call calls the operation and does exponential retry if error happens. func (retryer *RepeatableExponentialRetryer) Call() (err error) { attempt := 0 failedAttemptsSoFar := 0 for { err := retryer.CallableFunc() if err == nil || failedAttemptsSoFar == retryer.MaxAttempts { return err } sleep := retryer.NextSleepTime(attempt) if int(sleep/time.Millisecond) > retryer.MaxDelayInMilli { attempt = 0 sleep = retryer.NextSleepTime(attempt) } time.Sleep(sleep) attempt++ failedAttemptsSoFar++ } }
59
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // retry implements back off retry strategy for reconnect web socket connection. package retry import ( "errors" "math/rand" "testing" "github.com/aws/session-manager-plugin/src/config" "github.com/stretchr/testify/assert" ) var ( callableFunc = func() error { return errors.New("Error occured in callable function") } ) func TestRepeatableExponentialRetryerRetriesForGivenNumberOfMaxRetries(t *testing.T) { retryer := RepeatableExponentialRetryer{ callableFunc, config.RetryBase, rand.Intn(config.DataChannelRetryInitialDelayMillis) + config.DataChannelRetryInitialDelayMillis, config.DataChannelRetryMaxIntervalMillis, config.DataChannelNumMaxRetries, } err := retryer.Call() assert.NotNil(t, err) }
43
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // Package sdkutil provides utilities used to call awssdk. package sdkutil import ( "fmt" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/session-manager-plugin/src/sdkutil/retryer" ) var defaultRegion string var defaultProfile string // GetNewSessionWithEndpoint creates aws sdk session with given profile, region and endpoint func GetNewSessionWithEndpoint(endpoint string) (sess *session.Session, err error) { if sess, err = session.NewSessionWithOptions(session.Options{ Config: aws.Config{ Retryer: newRetryer(), SleepDelay: sleepDelay, Region: aws.String(defaultRegion), Endpoint: aws.String(endpoint), }, SharedConfigState: session.SharedConfigEnable, Profile: defaultProfile, }); err != nil { return nil, fmt.Errorf("Error creating new aws sdk session %s", err) } return sess, nil } // GetDefaultSession creates aws sdk session with given profile and region func GetDefaultSession() (sess *session.Session, err error) { return GetNewSessionWithEndpoint("") } // Sets the region and profile for default aws sessions func SetRegionAndProfile(region string, profile string) { defaultRegion = region defaultProfile = profile } var newRetryer = func() aws.RequestRetryer { r := retryer.SsmCliRetryer{} r.NumMaxRetries = 3 return r } var sleepDelay = func(d time.Duration) { time.Sleep(d) }
66
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // Package retryer overrides the default aws sdk retryer delay logic to better suit the mds needs package retryer import ( "math" "math/rand" "strings" "time" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" ) type SsmCliRetryer struct { client.DefaultRetryer } // RetryRules returns the delay duration before retrying this request again func (s SsmCliRetryer) RetryRules(r *request.Request) time.Duration { // Handle GetMessages Client.Timeout error if r.Operation.Name == "GetMessages" && r.Error != nil && strings.Contains(r.Error.Error(), "Client.Timeout") { // expected error. we will retry with a short 100 ms delay return time.Duration(100 * time.Millisecond) } // retry after a > 1 sec timeout, increasing exponentially with each retry rand.Seed(time.Now().UnixNano()) delay := int(math.Pow(2, float64(r.RetryCount))) * (rand.Intn(500) + 1000) return time.Duration(delay) * time.Millisecond }
44
session-manager-plugin
aws
Go
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may not // use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing // permissions and limitations under the License. // Package service is a wrapper for the new Service package service // OpenDataChannelInput type OpenDataChannelInput struct { _ struct{} `type:"structure"` // MessageSchemaVersion is a required field MessageSchemaVersion *string `json:"MessageSchemaVersion" min:"1" type:"string" required:"true"` // RequestId is a required field RequestId *string `json:"RequestId" min:"16" type:"string" required:"true"` // TokenValue is a required field TokenValue *string `json:"TokenValue" min:"1" type:"string" required:"true"` // ClientId is a required field ClientId *string `json:"ClientId" min:"1" type:"string" required:"true"` }
33