repo
stringclasses
856 values
pull_number
int64
3
127k
instance_id
stringlengths
12
58
issue_numbers
sequencelengths
1
5
base_commit
stringlengths
40
40
patch
stringlengths
67
1.54M
test_patch
stringlengths
0
107M
problem_statement
stringlengths
3
307k
hints_text
stringlengths
0
908k
created_at
timestamp[s]
ansible-collections/community.aws
1,181
ansible-collections__community.aws-1181
[ "1137" ]
688c7e89d9a6e7853b1aadab91288ed1c14c7cac
diff --git a/plugins/modules/ecs_service.py b/plugins/modules/ecs_service.py --- a/plugins/modules/ecs_service.py +++ b/plugins/modules/ecs_service.py @@ -159,6 +159,26 @@ required: false choices: ["EC2", "FARGATE"] type: str + capacity_provider_strategy: + version_added: 4.0.0 + description: + - The capacity provider strategy to use with your service. You can specify a maximum of 6 providers per strategy. + required: false + type: list + elements: dict + suboptions: + capacity_provider: + description: + - Name of capacity provider. + type: str + weight: + description: + - The relative percentage of the total number of launched tasks that should use the specified provider. + type: int + base: + description: + - How many tasks, at a minimum, should use the specified provider. + type: int platform_version: type: str description: @@ -250,6 +270,18 @@ placement_strategy: - type: binpack field: memory + +# With capacity_provider_strategy (added in version 4.0) +- community.aws.ecs_service: + state: present + name: test-service + cluster: test-cluster + task_definition: test-task-definition + desired_count: 1 + capacity_provider_strategy: + - capacity_provider: test-capacity-provider-1 + weight: 1 + base: 0 ''' RETURN = r''' @@ -258,6 +290,24 @@ returned: when creating a service type: complex contains: + capacityProviderStrategy: + version_added: 4.0.0 + description: The capacity provider strategy to use with your service. + returned: always + type: complex + contains: + base: + description: How many tasks, at a minimum, should use the specified provider. + returned: always + type: int + capacityProvider: + description: Name of capacity provider. + returned: always + type: str + weight: + description: The relative percentage of the total number of launched tasks that should use the specified provider. + returned: always + type: int clusterArn: description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service. returned: always @@ -575,7 +625,7 @@ def create_service(self, service_name, cluster_name, task_definition, load_balan desired_count, client_token, role, deployment_configuration, placement_constraints, placement_strategy, health_check_grace_period_seconds, network_configuration, service_registries, launch_type, platform_version, - scheduling_strategy): + scheduling_strategy, capacity_provider_strategy): params = dict( cluster=cluster_name, @@ -607,7 +657,8 @@ def create_service(self, service_name, cluster_name, task_definition, load_balan # desired count is not required if scheduling strategy is daemon if desired_count is not None: params['desiredCount'] = desired_count - + if capacity_provider_strategy: + params['capacityProviderStrategy'] = capacity_provider_strategy if scheduling_strategy: params['schedulingStrategy'] = scheduling_strategy response = self.ecs.create_service(**params) @@ -615,7 +666,7 @@ def create_service(self, service_name, cluster_name, task_definition, load_balan def update_service(self, service_name, cluster_name, task_definition, desired_count, deployment_configuration, network_configuration, - health_check_grace_period_seconds, force_new_deployment): + health_check_grace_period_seconds, force_new_deployment, capacity_provider_strategy): params = dict( cluster=cluster_name, service=service_name, @@ -625,6 +676,8 @@ def update_service(self, service_name, cluster_name, task_definition, params['networkConfiguration'] = network_configuration if force_new_deployment: params['forceNewDeployment'] = force_new_deployment + if capacity_provider_strategy: + params['capacityProviderStrategy'] = capacity_provider_strategy if health_check_grace_period_seconds is not None: params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds # desired count is not required if scheduling strategy is daemon @@ -703,19 +756,34 @@ def main(): launch_type=dict(required=False, choices=['EC2', 'FARGATE']), platform_version=dict(required=False, type='str'), service_registries=dict(required=False, type='list', default=[], elements='dict'), - scheduling_strategy=dict(required=False, choices=['DAEMON', 'REPLICA']) + scheduling_strategy=dict(required=False, choices=['DAEMON', 'REPLICA']), + capacity_provider_strategy=dict( + required=False, + type='list', + default=[], + elements='dict', + options=dict( + capacity_provider=dict(type='str'), + weight=dict(type='int'), + base=dict(type='int') + ) + ) ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=[('state', 'present', ['task_definition']), ('launch_type', 'FARGATE', ['network_configuration'])], - required_together=[['load_balancers', 'role']]) + required_together=[['load_balancers', 'role']], + mutually_exclusive=[['launch_type', 'capacity_provider_strategy']]) if module.params['state'] == 'present' and module.params['scheduling_strategy'] == 'REPLICA': if module.params['desired_count'] is None: module.fail_json(msg='state is present, scheduling_strategy is REPLICA; missing desired_count') + if len(module.params['capacity_provider_strategy']) > 6: + module.fail_json(msg='AWS allows a maximum of six capacity providers in the strategy.') + service_mgr = EcsServiceManager(module) if module.params['network_configuration']: network_configuration = service_mgr.format_network_configuration(module.params['network_configuration']) @@ -727,6 +795,7 @@ def main(): deploymentConfiguration = snake_dict_to_camel_dict(deployment_configuration) serviceRegistries = list(map(snake_dict_to_camel_dict, module.params['service_registries'])) + capacityProviders = list(map(snake_dict_to_camel_dict, module.params['capacity_provider_strategy'])) try: existing = service_mgr.describe_service(module.params['cluster'], module.params['name']) @@ -775,7 +844,12 @@ def main(): if module.params['service_registries']: if (existing['serviceRegistries'] or []) != serviceRegistries: module.fail_json(msg="It is not possible to update the service registries of an existing service") - + if module.params['capacity_provider_strategy']: + if 'launchType' in existing.keys(): + module.fail_json(msg="It is not possible to change an existing service from launch_type to capacity_provider_strategy.") + if module.params['launch_type']: + if 'capacityProviderStrategy' in existing.keys(): + module.fail_json(msg="It is not possible to change an existing service from capacity_provider_strategy to launch_type.") if (existing['loadBalancers'] or []) != loadBalancers: module.fail_json(msg="It is not possible to update the load balancers of an existing service") @@ -787,7 +861,9 @@ def main(): deploymentConfiguration, network_configuration, module.params['health_check_grace_period_seconds'], - module.params['force_new_deployment']) + module.params['force_new_deployment'], + capacityProviders + ) else: try: @@ -806,7 +882,8 @@ def main(): serviceRegistries, module.params['launch_type'], module.params['platform_version'], - module.params['scheduling_strategy'] + module.params['scheduling_strategy'], + capacityProviders ) except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Couldn't create service")
Capacity Provider Strategy for ECS Services ### Summary Hi All, the ecs_service module does not support the capacity provider strategy. The only possibility is to use the default strategy provider for the ECS cluster removing the launch_type parameter in the task definition for the module. Please add this feature, is extremely important for the cluster autoscaler with EC2 instance. Thank you. ------- https://docs.aws.amazon.com/cli/latest/reference/ecs/create-service.html Option: --capacity-provider-strategy (list) The capacity provider strategy to use for the service. If a capacityProviderStrategy is specified, the launchType parameter must be omitted. If no capacityProviderStrategy or launchType is specified, the defaultCapacityProviderStrategy for the cluster is used. A capacity provider strategy may contain a maximum of 6 capacity providers. ### Issue Type Feature Idea ### Component Name community.aws.ecs_service ### Additional Information <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: None If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner --->
2022-05-30T14:56:29
ansible-collections/community.aws
1,188
ansible-collections__community.aws-1188
[ "1151" ]
7b8b588461f4c57abc4c1a87867906a17754bd4c
diff --git a/plugins/modules/lambda_info.py b/plugins/modules/lambda_info.py --- a/plugins/modules/lambda_info.py +++ b/plugins/modules/lambda_info.py @@ -13,16 +13,17 @@ short_description: Gathers AWS Lambda function details description: - Gathers various details related to Lambda functions, including aliases, versions and event source mappings. - - Use module M(community.aws.lambda) to manage the lambda function itself, M(community.aws.lambda_alias) to manage function aliases and - M(community.aws.lambda_event) to manage lambda event source mappings. + - Use module M(community.aws.lambda) to manage the lambda function itself, M(community.aws.lambda_alias) to manage function aliases, + M(community.aws.lambda_event) to manage lambda event source mappings, and M(community.aws.lambda_policy) to manage policy statements. options: query: description: - - Specifies the resource type for which to gather information. Leave blank to retrieve all information. + - Specifies the resource type for which to gather information. + - Defaults to C(all) when I(function_name) is specified. + - Defaults to C(config) when I(function_name) is NOT specified. choices: [ "aliases", "all", "config", "mappings", "policy", "versions", "tags" ] - default: "all" type: str function_name: description: @@ -48,17 +49,20 @@ query: all function_name: myFunction register: my_function_details + # List all versions of a function - name: List function versions community.aws.lambda_info: query: versions function_name: myFunction register: my_function_versions -# List all lambda function versions -- name: List all function + +# List all info for all functions +- name: List all functions community.aws.lambda_info: query: all register: output + - name: show Lambda information ansible.builtin.debug: msg: "{{ output['function'] }}" @@ -120,108 +124,118 @@ def fix_return(node): return node_value -def alias_details(client, module): +def alias_details(client, module, function_name): """ Returns list of aliases for a specified function. :param client: AWS API client reference (boto3) :param module: Ansible module reference + :param function_name (str): Name of Lambda function to query :return dict: """ lambda_info = dict() - function_name = module.params.get('function_name') - if function_name: - try: - lambda_info.update(aliases=_paginate(client, 'list_aliases', FunctionName=function_name)['Aliases']) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_info.update(aliases=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get aliases") - else: - module.fail_json(msg='Parameter function_name required for query=aliases.') + try: + lambda_info.update(aliases=_paginate(client, 'list_aliases', FunctionName=function_name)['Aliases']) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(aliases=[]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get aliases") - return {function_name: camel_dict_to_snake_dict(lambda_info)} + return camel_dict_to_snake_dict(lambda_info) -def all_details(client, module): +def list_lambdas(client, module): """ - Returns all lambda related facts. + Returns queried facts for a specified function (or all functions). :param client: AWS API client reference (boto3) :param module: Ansible module reference :return dict: """ - lambda_info = dict() - function_name = module.params.get('function_name') if function_name: - lambda_info[function_name] = {} - lambda_info[function_name].update(config_details(client, module)[function_name]) - lambda_info[function_name].update(alias_details(client, module)[function_name]) - lambda_info[function_name].update(policy_details(client, module)[function_name]) - lambda_info[function_name].update(version_details(client, module)[function_name]) - lambda_info[function_name].update(mapping_details(client, module)[function_name]) - lambda_info[function_name].update(tags_details(client, module)[function_name]) + # Function name is specified - retrieve info on that function + function_names = [function_name] + else: - lambda_info.update(config_details(client, module)) + # Function name is not specified - retrieve all function names + all_function_info = _paginate(client, 'list_functions')['Functions'] + function_names = [function_info['FunctionName'] for function_info in all_function_info] + + query = module.params['query'] + lambdas = dict() + + for function_name in function_names: + lambdas[function_name] = {} - return lambda_info + if query == 'all': + lambdas[function_name].update(config_details(client, module, function_name)) + lambdas[function_name].update(alias_details(client, module, function_name)) + lambdas[function_name].update(policy_details(client, module, function_name)) + lambdas[function_name].update(version_details(client, module, function_name)) + lambdas[function_name].update(mapping_details(client, module, function_name)) + lambdas[function_name].update(tags_details(client, module, function_name)) + elif query == 'config': + lambdas[function_name].update(config_details(client, module, function_name)) -def config_details(client, module): + elif query == 'aliases': + lambdas[function_name].update(alias_details(client, module, function_name)) + + elif query == 'policy': + lambdas[function_name].update(policy_details(client, module, function_name)) + + elif query == 'versions': + lambdas[function_name].update(version_details(client, module, function_name)) + + elif query == 'mappings': + lambdas[function_name].update(mapping_details(client, module, function_name)) + + elif query == 'tags': + lambdas[function_name].update(tags_details(client, module, function_name)) + + return lambdas + + +def config_details(client, module, function_name): """ - Returns configuration details for one or all lambda functions. + Returns configuration details for a lambda function. :param client: AWS API client reference (boto3) :param module: Ansible module reference + :param function_name (str): Name of Lambda function to query :return dict: """ lambda_info = dict() - function_name = module.params.get('function_name') - if function_name: - try: - lambda_info.update(client.get_function_configuration(aws_retry=True, FunctionName=function_name)) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_info.update(function={}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name)) - else: - try: - lambda_info.update(function_list=_paginate(client, 'list_functions')['Functions']) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_info.update(function_list=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get function list") - - functions = dict() - for func in lambda_info.pop('function_list', []): - func['tags'] = client.get_function(FunctionName=func['FunctionName']).get('Tags', {}) - functions[func['FunctionName']] = camel_dict_to_snake_dict(func) - return functions + try: + lambda_info.update(client.get_function_configuration(aws_retry=True, FunctionName=function_name)) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(function={}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name)) - return {function_name: camel_dict_to_snake_dict(lambda_info)} + return camel_dict_to_snake_dict(lambda_info) -def mapping_details(client, module): +def mapping_details(client, module, function_name): """ Returns all lambda event source mappings. :param client: AWS API client reference (boto3) :param module: Ansible module reference + :param function_name (str): Name of Lambda function to query :return dict: """ lambda_info = dict() params = dict() - function_name = module.params.get('function_name') - if function_name: - params['FunctionName'] = module.params.get('function_name') + params['FunctionName'] = function_name if module.params.get('event_source_arn'): params['EventSourceArn'] = module.params.get('event_source_arn') @@ -233,86 +247,74 @@ def mapping_details(client, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Trying to get source event mappings") - if function_name: - return {function_name: camel_dict_to_snake_dict(lambda_info)} - return camel_dict_to_snake_dict(lambda_info) -def policy_details(client, module): +def policy_details(client, module, function_name): """ Returns policy attached to a lambda function. :param client: AWS API client reference (boto3) :param module: Ansible module reference + :param function_name (str): Name of Lambda function to query :return dict: """ lambda_info = dict() - function_name = module.params.get('function_name') - if function_name: - try: - # get_policy returns a JSON string so must convert to dict before reassigning to its key - lambda_info.update(policy=json.loads(client.get_policy(aws_retry=True, FunctionName=function_name)['Policy'])) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_info.update(policy={}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name)) - else: - module.fail_json(msg='Parameter function_name required for query=policy.') + try: + # get_policy returns a JSON string so must convert to dict before reassigning to its key + lambda_info.update(policy=json.loads(client.get_policy(aws_retry=True, FunctionName=function_name)['Policy'])) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(policy={}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name)) - return {function_name: camel_dict_to_snake_dict(lambda_info)} + return camel_dict_to_snake_dict(lambda_info) -def version_details(client, module): +def version_details(client, module, function_name): """ Returns all lambda function versions. :param client: AWS API client reference (boto3) :param module: Ansible module reference + :param function_name (str): Name of Lambda function to query :return dict: """ lambda_info = dict() - function_name = module.params.get('function_name') - if function_name: - try: - lambda_info.update(versions=_paginate(client, 'list_versions_by_function', FunctionName=function_name)['Versions']) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_info.update(versions=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name)) - else: - module.fail_json(msg='Parameter function_name required for query=versions.') + try: + lambda_info.update(versions=_paginate(client, 'list_versions_by_function', FunctionName=function_name)['Versions']) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(versions=[]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name)) - return {function_name: camel_dict_to_snake_dict(lambda_info)} + return camel_dict_to_snake_dict(lambda_info) -def tags_details(client, module): +def tags_details(client, module, function_name): """ - Returns tag details for one or all lambda functions. + Returns tag details for a lambda function. :param client: AWS API client reference (boto3) :param module: Ansible module reference + :param function_name (str): Name of Lambda function to query :return dict: """ lambda_info = dict() - function_name = module.params.get('function_name') - if function_name: - try: - lambda_info.update(tags=client.get_function(aws_retry=True, FunctionName=function_name).get('Tags', {})) - except is_boto3_error_code('ResourceNotFoundException'): - lambda_info.update(function={}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} tags".format(function_name)) - else: - module.fail_json(msg='Parameter function_name required for query=tags.') + try: + lambda_info.update(tags=client.get_function(aws_retry=True, FunctionName=function_name).get('Tags', {})) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(function={}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get {0} tags".format(function_name)) - return {function_name: camel_dict_to_snake_dict(lambda_info)} + return camel_dict_to_snake_dict(lambda_info) def main(): @@ -323,7 +325,7 @@ def main(): """ argument_spec = dict( function_name=dict(required=False, default=None, aliases=['function', 'name']), - query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions', 'tags'], default='all'), + query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions', 'tags'], default=None), event_source_arn=dict(required=False, default=None), ) @@ -344,20 +346,18 @@ def main(): if len(function_name) > 64: module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) - client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) + # create default values for query if not specified. + # if function name exists, query should default to 'all'. + # if function name does not exist, query should default to 'config' to limit the runtime when listing all lambdas. + if not module.params.get('query'): + if function_name: + module.params['query'] = 'all' + else: + module.params['query'] = 'config' - invocations = dict( - aliases='alias_details', - all='all_details', - config='config_details', - mappings='mapping_details', - policy='policy_details', - versions='version_details', - tags='tags_details', - ) + client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) - this_module_function = globals()[invocations[module.params['query']]] - all_facts = fix_return(this_module_function(client, module)) + all_facts = fix_return(list_lambdas(client, module)) results = dict(function=all_facts, changed=False)
diff --git a/tests/integration/targets/lambda/tasks/main.yml b/tests/integration/targets/lambda/tasks/main.yml --- a/tests/integration/targets/lambda/tasks/main.yml +++ b/tests/integration/targets/lambda/tasks/main.yml @@ -252,10 +252,9 @@ - result.configuration.runtime == 'python3.6' - result.configuration.tracing_config.mode == 'PassThrough' - # Query the Lambda - - name: lambda_info | Gather all infos for given lambda function + # Test lambda_info + - name: lambda_info | Gather all infos for all lambda functions lambda_info: - name: '{{ lambda_function_name }}' query: all register: lambda_infos_all check_mode: yes @@ -263,17 +262,54 @@ assert: that: - lambda_infos_all is not failed + - lambda_infos_all.function | length > 0 - lambda_infos_all.function[lambda_function_name].function_name == lambda_function_name - lambda_infos_all.function[lambda_function_name].runtime == "python3.6" + - lambda_infos_all.function[lambda_function_name].description == "" + - lambda_infos_all.function[lambda_function_name].function_arn is defined + - lambda_infos_all.function[lambda_function_name].handler == "mini_lambda.handler" - lambda_infos_all.function[lambda_function_name].versions is defined - lambda_infos_all.function[lambda_function_name].aliases is defined - lambda_infos_all.function[lambda_function_name].policy is defined - lambda_infos_all.function[lambda_function_name].mappings is defined - - lambda_infos_all.function[lambda_function_name].description == "" - - lambda_infos_all.function[lambda_function_name].function_arn is defined - - lambda_infos_all.function[lambda_function_name].handler == "mini_lambda.handler" - lambda_infos_all.function[lambda_function_name].tags is defined + - name: lambda_info | Ensure default query value is 'config' when function name omitted + lambda_info: + register: lambda_infos_query_config + check_mode: yes + - name: lambda_info | Assert successfull retrieval of all information + assert: + that: + - lambda_infos_query_config is not failed + - lambda_infos_query_config.function | length > 0 + - lambda_infos_query_config.function[lambda_function_name].function_name == lambda_function_name + - lambda_infos_query_config.function[lambda_function_name].runtime == "python3.6" + - lambda_infos_query_config.function[lambda_function_name].description == "" + - lambda_infos_query_config.function[lambda_function_name].function_arn is defined + - lambda_infos_query_config.function[lambda_function_name].handler == "mini_lambda.handler" + - lambda_infos_query_config.function[lambda_function_name].versions is not defined + - lambda_infos_query_config.function[lambda_function_name].aliases is not defined + - lambda_infos_query_config.function[lambda_function_name].policy is not defined + - lambda_infos_query_config.function[lambda_function_name].mappings is not defined + - lambda_infos_query_config.function[lambda_function_name].tags is not defined + + - name: lambda_info | Ensure default query value is 'all' when function name specified + lambda_info: + name: '{{ lambda_function_name }}' + register: lambda_infos_query_all + - name: lambda_info | Assert successfull retrieval of all information + assert: + that: + - lambda_infos_query_all is not failed + - lambda_infos_query_all.function | length == 1 + - lambda_infos_query_all.function[lambda_function_name].versions|length > 0 + - lambda_infos_query_all.function[lambda_function_name].function_name is defined + - lambda_infos_query_all.function[lambda_function_name].policy is defined + - lambda_infos_query_all.function[lambda_function_name].aliases is defined + - lambda_infos_query_all.function[lambda_function_name].mappings is defined + - lambda_infos_query_all.function[lambda_function_name].tags is defined + - name: lambda_info | Gather version infos for given lambda function lambda_info: name: '{{ lambda_function_name }}' @@ -283,8 +319,13 @@ assert: that: - lambda_infos_versions is not failed + - lambda_infos_versions.function | length == 1 - lambda_infos_versions.function[lambda_function_name].versions|length > 0 - lambda_infos_versions.function[lambda_function_name].function_name is undefined + - lambda_infos_versions.function[lambda_function_name].policy is undefined + - lambda_infos_versions.function[lambda_function_name].aliases is undefined + - lambda_infos_versions.function[lambda_function_name].mappings is undefined + - lambda_infos_versions.function[lambda_function_name].tags is undefined - name: lambda_info | Gather config infos for given lambda function lambda_info: @@ -295,9 +336,14 @@ assert: that: - lambda_infos_config is not failed + - lambda_infos_config.function | length == 1 - lambda_infos_config.function[lambda_function_name].function_name == lambda_function_name - lambda_infos_config.function[lambda_function_name].description is defined - lambda_infos_config.function[lambda_function_name].versions is undefined + - lambda_infos_config.function[lambda_function_name].policy is undefined + - lambda_infos_config.function[lambda_function_name].aliases is undefined + - lambda_infos_config.function[lambda_function_name].mappings is undefined + - lambda_infos_config.function[lambda_function_name].tags is undefined - name: lambda_info | Gather policy infos for given lambda function lambda_info: @@ -308,8 +354,13 @@ assert: that: - lambda_infos_policy is not failed + - lambda_infos_policy.function | length == 1 - lambda_infos_policy.function[lambda_function_name].policy is defined - lambda_infos_policy.function[lambda_function_name].versions is undefined + - lambda_infos_policy.function[lambda_function_name].function_name is undefined + - lambda_infos_policy.function[lambda_function_name].aliases is undefined + - lambda_infos_policy.function[lambda_function_name].mappings is undefined + - lambda_infos_policy.function[lambda_function_name].tags is undefined - name: lambda_info | Gather aliases infos for given lambda function lambda_info: @@ -320,7 +371,13 @@ assert: that: - lambda_infos_aliases is not failed + - lambda_infos_aliases.function | length == 1 - lambda_infos_aliases.function[lambda_function_name].aliases is defined + - lambda_infos_aliases.function[lambda_function_name].versions is undefined + - lambda_infos_aliases.function[lambda_function_name].function_name is undefined + - lambda_infos_aliases.function[lambda_function_name].policy is undefined + - lambda_infos_aliases.function[lambda_function_name].mappings is undefined + - lambda_infos_aliases.function[lambda_function_name].tags is undefined - name: lambda_info | Gather mappings infos for given lambda function lambda_info: @@ -331,7 +388,13 @@ assert: that: - lambda_infos_mappings is not failed + - lambda_infos_mappings.function | length == 1 - lambda_infos_mappings.function[lambda_function_name].mappings is defined + - lambda_infos_mappings.function[lambda_function_name].versions is undefined + - lambda_infos_mappings.function[lambda_function_name].function_name is undefined + - lambda_infos_mappings.function[lambda_function_name].aliases is undefined + - lambda_infos_mappings.function[lambda_function_name].policy is undefined + - lambda_infos_mappings.function[lambda_function_name].tags is undefined # More Lambda update tests - name: test state=present with all nullable variables explicitly set to null @@ -523,11 +586,19 @@ - result is not failed always: - - name: ensure function is absent at end of test + + - name: ensure functions are absent at end of test lambda: - name: '{{lambda_function_name}}' + name: "{{ item }}" state: absent ignore_errors: true + with_items: + - "{{ lambda_function_name }}" + - "{{ lambda_function_name }}_1" + - "{{ lambda_function_name }}_2" + - "{{ lambda_function_name }}_3" + - "{{ lambda_function_name }}_4" + - name: ensure role has been removed at end of test iam_role: name: '{{ lambda_role_name }}'
lambda_info - only queries config info when function_name is omitted ### Summary When trying to retrieve info on all lambda functions (by omitting `function_name`), module acts as if `query = config` ### Issue Type Bug Report ### Component Name lambda_info ### Ansible Version ```console (paste below) $ ansible --version ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment _No response_ ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) # Query the Lambda - name: lambda_info | Gather all infos for all lambda functions lambda_info: register: lambda_infos_all check_mode: yes - name: lambda_info | Assert successfull retrieval of all information assert: that: - lambda_infos_all | length > 0 - lambda_infos_all is not failed - lambda_infos_all.function[lambda_function_name].function_name == lambda_function_name - lambda_infos_all.function[lambda_function_name].runtime == "python3.6" - lambda_infos_all.function[lambda_function_name].versions is defined - lambda_infos_all.function[lambda_function_name].aliases is defined - lambda_infos_all.function[lambda_function_name].policy is defined - lambda_infos_all.function[lambda_function_name].mappings is defined - lambda_infos_all.function[lambda_function_name].description == "" - lambda_infos_all.function[lambda_function_name].function_arn is defined - lambda_infos_all.function[lambda_function_name].handler == "mini_lambda.handler" - lambda_infos_all.function[lambda_function_name].tags is defined ``` ### Expected Results ```yaml (paste below) "function": { "70f9dc46f894": { "aliases": [], "code_sha256": "LC/DkOB9wKYLWMSMqYoGPw8yyXUVHcDcdShg/MUKyYM=", "code_size": 854, "description": "", "function_arn": "arn:aws:lambda:us-east-1:721066863947:function:70f9dc46f894", "function_name": "70f9dc46f894", "handler": "mini_lambda.handler", "last_modified": "2022-05-19T19:46:15.375+0000", "mappings": [], "memory_size": 128, "package_type": "Zip", "policy": {}, "response_metadata": { "http_headers": { "connection": "keep-alive", "content-length": "986", "content-type": "application/json", "date": "Thu, 19 May 2022 19:46:18 GMT", "x-amzn-requestid": "0b50adea-e1ed-4f25-975f-f0eec9478282" }, "http_status_code": 200, "request_id": "0b50adea-e1ed-4f25-975f-f0eec9478282", "retry_attempts": 0 }, "revision_id": "20bbfe78-8589-46ca-8fb3-6ff089ea7bab", "role": "arn:aws:iam::721066863947:role/ansible-test-70f9dc46f894-lambda", "runtime": "python3.6", "state": "Pending", "state_reason": "The function is being created.", "state_reason_code": "Creating", "tags": {}, "timeout": 3, "tracing_config": { "mode": "PassThrough" }, "version": "$LATEST", "versions": [ { "code_sha256": "LC/DkOB9wKYLWMSMqYoGPw8yyXUVHcDcdShg/MUKyYM=", "code_size": 854, "description": "", "function_arn": "arn:aws:lambda:us-east-1:721066863947:function:70f9dc46f894:$LATEST", "function_name": "70f9dc46f894", "handler": "mini_lambda.handler", "last_modified": "2022-05-19T19:46:15.375+0000", "memory_size": 128, "package_type": "Zip", "revision_id": "20bbfe78-8589-46ca-8fb3-6ff089ea7bab", "role": "arn:aws:iam::721066863947:role/ansible-test-70f9dc46f894-lambda", "runtime": "python3.6", "timeout": 3, "tracing_config": { "mode": "PassThrough" }, "version": "$LATEST" }, { "code_sha256": "LC/DkOB9wKYLWMSMqYoGPw8yyXUVHcDcdShg/MUKyYM=", "code_size": 854, "description": "", "function_arn": "arn:aws:lambda:us-east-1:721066863947:function:70f9dc46f894:1", "function_name": "70f9dc46f894", "handler": "mini_lambda.handler", "last_modified": "2022-05-19T19:46:15.375+0000", "memory_size": 128, "package_type": "Zip", "revision_id": "23d33d24-983b-47e2-92cd-142bc18b1833", "role": "arn:aws:iam::721066863947:role/ansible-test-70f9dc46f894-lambda", "runtime": "python3.6", "timeout": 3, "tracing_config": { "mode": "PassThrough" }, "version": "1" } ] } ``` ### Actual Results ```yaml (paste below) "function": { "7a603343767f": { "code_sha256": "HcAK+Oux100a8L18ww1SdRC1NgwU4CTn0RHIj65dHO4=", "code_size": 854, "description": "", "function_arn": "arn:aws:lambda:us-east-1:721066863947:function:7a603343767f", "function_name": "7a603343767f", "handler": "mini_lambda.handler", "last_modified": "2022-05-19T19:59:25.000+0000", "memory_size": 128, "package_type": "Zip", "revision_id": "d1d89e30-797d-453f-92ae-efc1f6ab7c34", "role": "arn:aws:iam::721066863947:role/ansible-test-7a603343767f-lambda", "runtime": "python3.6", "tags": { "camel_case": "ACamelCaseValue", "snake_case": "a_snake_case_value", "spaced key": "A value with spaces" }, "timeout": 3, "tracing_config": { "mode": "PassThrough" }, "version": "$LATEST" } ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/lambda_info.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/lambda_info.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @markuman @pjodouin @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify --->
2022-06-01T05:21:08
ansible-collections/community.aws
1,195
ansible-collections__community.aws-1195
[ "836" ]
b11ffaed2b3450f6fee9721878090da404401021
diff --git a/plugins/modules/sns_topic.py b/plugins/modules/sns_topic.py --- a/plugins/modules/sns_topic.py +++ b/plugins/modules/sns_topic.py @@ -26,8 +26,11 @@ type: str topic_type: description: - - The type of topic that should be created. Either Standard for FIFO (first-in, first-out) - choices: ['standard', 'fifo'] + - The type of topic that should be created. Either Standard for FIFO (first-in, first-out). + - Some regions, including GovCloud regions do not support FIFO topics. + Use a default value of 'standard' or omit the option if the region + does not support FIFO topics. + choices: ["standard", "fifo"] default: 'standard' type: str version_added: 2.0.0 @@ -363,9 +366,11 @@ def __init__(self, self.attributes_set = [] def _create_topic(self): - attributes = {'FifoTopic': 'false'} + attributes = {} tags = [] + # NOTE: Never set FifoTopic = False. Some regions (including GovCloud) + # don't support the attribute being set, even to False. if self.topic_type == 'fifo': attributes['FifoTopic'] = 'true' if not self.name.endswith('.fifo'): @@ -373,7 +378,9 @@ def _create_topic(self): if not self.check_mode: try: - response = self.connection.create_topic(Name=self.name, Attributes=attributes, Tags=tags) + response = self.connection.create_topic(Name=self.name, + Attributes=attributes, + Tags=tags) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't create topic %s" % self.name) self.topic_arn = response['TopicArn'] @@ -506,7 +513,6 @@ def ensure_gone(self): def main(): - # We're kinda stuck with CamelCase here, it would be nice to switch to # snake_case, but we'd need to purge out the alias entries http_retry_args = dict(
community.aws.sns_topic does not work in GovCloud ### Summary The community.aws.sns_topic does not work in the GovCloud Region. ### Issue Type Bug Report ### Component Name sns_topic ### Ansible Version ``` ansible 2.9.27 config file = /etc/ansible/ansible.cfg configured module search path = ['/home/ec2-user/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python3.6/site-packages/ansible executable location = /usr/bin/ansible python version = 3.6.8 (default, Mar 18 2021, 08:58:41) [GCC 8.4.1 20200928 (Red Hat 8.4.1-1)] ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list ``` This command doesn't work anymore I don't think. I only have the community.aws collection installed to test this issue. ### AWS SDK versions ``` pip3 show boto boto3 botocore Name: boto3 Version: 1.20.24 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: None License: Apache License 2.0 Location: /home/ec2-user/.local/lib/python3.6/site-packages Requires: s3transfer, jmespath, botocore --- Name: botocore Version: 1.23.24 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: None License: Apache License 2.0 Location: /home/ec2-user/.local/lib/python3.6/site-packages Requires: python-dateutil, jmespath, urllib3 ``` ### Configuration Empty ### OS / Environment Red Hat Enterprise Linux release 8.4 (Ootpa) ### Steps to Reproduce ``` --- - hosts: localhost gather_facts: true tasks: - name: Create SNS delegate_to: localhost community.aws.sns_topic: name: "example" state: present display_name: "example sns topic" ec2_url: "https://sns.us-gov-west-1.amazonaws.com" topic_type: standard ``` ``` ansible-playbook -vvv -i localhost sns.yaml ``` ### Expected Results I expect an SNS topic to be created in my AWS GovCloud Account ### Actual Results ``` The full traceback is: Traceback (most recent call last): File "/tmp/ansible_community.aws.sns_topic_payload_04_kg752/ansible_community.aws.sns_topic_payload.zip/ansible_collections/community/aws/plugins/modules/sns_topic.py", line 378, in _create_topic File "/home/ec2-user/.local/lib/python3.6/site-packages/botocore/client.py", line 391, in _api_call return self._make_api_call(operation_name, kwargs) File "/home/ec2-user/.local/lib/python3.6/site-packages/botocore/client.py", line 719, in _make_api_call raise error_class(parsed_response, operation_name) botocore.errorfactory.InvalidParameterException: An error occurred (InvalidParameter) when calling the CreateTopic operation: Invalid parameter: Attributes Reason: Unknown attribute FifoTopic fatal: [localhost -> localhost]: FAILED! => { "boto3_version": "1.20.24", "botocore_version": "1.23.24", "changed": false, "error": { "code": "InvalidParameter", "message": "Invalid parameter: Attributes Reason: Unknown attribute FifoTopic", "type": "Sender" }, "invocation": { "module_args": { "aws_access_key": null, "aws_ca_bundle": null, "aws_config": null, "aws_secret_key": null, "debug_botocore_endpoint_logs": false, "delivery_policy": null, "display_name": "example sns topic", "ec2_url": "https://sns.us-gov-west-1.amazonaws.com", "name": "example", "policy": null, "profile": null, "purge_subscriptions": true, "region": null, "security_token": null, "state": "present", "subscriptions": [], "topic_type": "standard", "validate_certs": true } }, "msg": "Couldn't create topic example: An error occurred (InvalidParameter) when calling the CreateTopic operation: Invalid parameter: Attributes Reason: Unknown attribute FifoTopic", "response_metadata": { "http_headers": { "content-length": "312", "content-type": "text/xml", "date": "Thu, 16 Dec 2021 23:35:12 GMT", "x-amzn-requestid": "6a6cc2e8-a524-5182-8158-98dd6a553688" }, "http_status_code": 400, "request_id": "6a6cc2e8-a524-5182-8158-98dd6a553688", "retry_attempts": 0 } } ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
I suspect the issue is here: https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/sns_topic.py#L368 This appears to add a FifoTopic attrribute, set to false by default. Perhaps the SNS API in GovCloud does not have this parameter and is the reason for the error? If I comment out the attributes line and set it to empty, the topic is created successfully ``` #attributes = {'FifoTopic': 'false'} attributes = {} ``` @dmc5179 that is correct, [FIFO topics are not supported in GovCloud](https://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-sns.html). Fix would consist in parsing the `ec2_url` or (even better) determining endpoint (commercial VS GovCloud) and pop the `fifo` attribute when the endpoint is GovCloud. cc @jillr @joelthompson @markuman @nand0p @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> We tried out what @dmc5179 did too, and it worked. Do we actually NEED that attribute if it's false? Can it just be removed altogether if not set? Otherwise, checking the endpoint somehow as @Razique suggested and removing the attribute altogether just for GovCloud.
2022-06-02T00:01:19
ansible-collections/community.aws
1,197
ansible-collections__community.aws-1197
[ "1075" ]
b11ffaed2b3450f6fee9721878090da404401021
diff --git a/plugins/modules/ec2_customer_gateway.py b/plugins/modules/ec2_customer_gateway.py --- a/plugins/modules/ec2_customer_gateway.py +++ b/plugins/modules/ec2_customer_gateway.py @@ -23,7 +23,8 @@ options: bgp_asn: description: - - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present). + - Border Gateway Protocol (BGP) Autonomous System Number (ASN). + - Defaults to C(65000) if not specified when I(state=present). type: int ip_address: description:
ec2_customer_gateway: bgp_asn is not required ### Summary The ec2_customer_gateway module has incorrect documentation for the bgp_asn parameter. It says the ASN must be passed when state=present, but the code defaults to 25000 if the parameter is absent. See the ensure_cgw_present() method: ``` def ensure_cgw_present(self, bgp_asn, ip_address): if not bgp_asn: bgp_asn = 65000 response = self.ec2.create_customer_gateway( DryRun=False, Type='ipsec.1', PublicIp=ip_address, BgpAsn=bgp_asn, ) return response ### Issue Type Documentation Report ### Component Name ec2_customer_gateway ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.12.4] config file = None configured module search path = ['/home/neil/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/lib/python3.10/site-packages/ansible ansible collection location = /home/neil/.ansible/collections:/usr/share/ansible/collections executable location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/bin/ansible python version = 3.10.1 (main, Jan 10 2022, 00:00:00) [GCC 11.2.1 20211203 (Red Hat 11.2.1-7)] jinja version = 3.1.1 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment main branch, as of 2022-04-18. ### Additional Information Suggested rewording: ``` options: bgp_asn: description: - Border Gateway Protocol (BGP) Autonomous System Number (ASN), defaults to 25000. type: int ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/ec2_customer_gateway.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/ec2_customer_gateway.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @MichaelBaydoun @jillr @markuman @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> @neilkatin Thank you for raising this. `bgp_asn` shouldn't be a required parameter anymore since it defaults to 65000 as reported in the boto3 documentation. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.create_customer_gateway Would you be willing to open a pull request to fix that inconsistency?
2022-06-02T03:21:06
ansible-collections/community.aws
1,206
ansible-collections__community.aws-1206
[ "1075" ]
f23fad23e7d3187d3b111220fad72e37ee08574d
diff --git a/plugins/modules/ec2_customer_gateway.py b/plugins/modules/ec2_customer_gateway.py --- a/plugins/modules/ec2_customer_gateway.py +++ b/plugins/modules/ec2_customer_gateway.py @@ -23,7 +23,8 @@ options: bgp_asn: description: - - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present). + - Border Gateway Protocol (BGP) Autonomous System Number (ASN). + - Defaults to C(65000) if not specified when I(state=present). type: int ip_address: description:
ec2_customer_gateway: bgp_asn is not required ### Summary The ec2_customer_gateway module has incorrect documentation for the bgp_asn parameter. It says the ASN must be passed when state=present, but the code defaults to 25000 if the parameter is absent. See the ensure_cgw_present() method: ``` def ensure_cgw_present(self, bgp_asn, ip_address): if not bgp_asn: bgp_asn = 65000 response = self.ec2.create_customer_gateway( DryRun=False, Type='ipsec.1', PublicIp=ip_address, BgpAsn=bgp_asn, ) return response ### Issue Type Documentation Report ### Component Name ec2_customer_gateway ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.12.4] config file = None configured module search path = ['/home/neil/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/lib/python3.10/site-packages/ansible ansible collection location = /home/neil/.ansible/collections:/usr/share/ansible/collections executable location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/bin/ansible python version = 3.10.1 (main, Jan 10 2022, 00:00:00) [GCC 11.2.1 20211203 (Red Hat 11.2.1-7)] jinja version = 3.1.1 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment main branch, as of 2022-04-18. ### Additional Information Suggested rewording: ``` options: bgp_asn: description: - Border Gateway Protocol (BGP) Autonomous System Number (ASN), defaults to 25000. type: int ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/ec2_customer_gateway.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/ec2_customer_gateway.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @MichaelBaydoun @jillr @markuman @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> @neilkatin Thank you for raising this. `bgp_asn` shouldn't be a required parameter anymore since it defaults to 65000 as reported in the boto3 documentation. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.create_customer_gateway Would you be willing to open a pull request to fix that inconsistency?
2022-06-03T12:56:37
ansible-collections/community.aws
1,207
ansible-collections__community.aws-1207
[ "1075" ]
0f5cb18869c057fcff79f88d16b751f8da838b64
diff --git a/plugins/modules/ec2_customer_gateway.py b/plugins/modules/ec2_customer_gateway.py --- a/plugins/modules/ec2_customer_gateway.py +++ b/plugins/modules/ec2_customer_gateway.py @@ -23,7 +23,8 @@ options: bgp_asn: description: - - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present). + - Border Gateway Protocol (BGP) Autonomous System Number (ASN). + - Defaults to C(65000) if not specified when I(state=present). type: int ip_address: description:
ec2_customer_gateway: bgp_asn is not required ### Summary The ec2_customer_gateway module has incorrect documentation for the bgp_asn parameter. It says the ASN must be passed when state=present, but the code defaults to 25000 if the parameter is absent. See the ensure_cgw_present() method: ``` def ensure_cgw_present(self, bgp_asn, ip_address): if not bgp_asn: bgp_asn = 65000 response = self.ec2.create_customer_gateway( DryRun=False, Type='ipsec.1', PublicIp=ip_address, BgpAsn=bgp_asn, ) return response ### Issue Type Documentation Report ### Component Name ec2_customer_gateway ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.12.4] config file = None configured module search path = ['/home/neil/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/lib/python3.10/site-packages/ansible ansible collection location = /home/neil/.ansible/collections:/usr/share/ansible/collections executable location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/bin/ansible python version = 3.10.1 (main, Jan 10 2022, 00:00:00) [GCC 11.2.1 20211203 (Red Hat 11.2.1-7)] jinja version = 3.1.1 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment main branch, as of 2022-04-18. ### Additional Information Suggested rewording: ``` options: bgp_asn: description: - Border Gateway Protocol (BGP) Autonomous System Number (ASN), defaults to 25000. type: int ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/ec2_customer_gateway.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/ec2_customer_gateway.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @MichaelBaydoun @jillr @markuman @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> @neilkatin Thank you for raising this. `bgp_asn` shouldn't be a required parameter anymore since it defaults to 65000 as reported in the boto3 documentation. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.create_customer_gateway Would you be willing to open a pull request to fix that inconsistency?
2022-06-03T12:56:50
ansible-collections/community.aws
1,215
ansible-collections__community.aws-1215
[ "921" ]
b6ed8d514021422f43bddfd2b270918893abc583
diff --git a/plugins/modules/ecs_service.py b/plugins/modules/ecs_service.py --- a/plugins/modules/ecs_service.py +++ b/plugins/modules/ecs_service.py @@ -101,6 +101,16 @@ minimum_healthy_percent: type: int description: A lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. + deployment_circuit_breaker: + type: dict + description: The deployment circuit breaker determines whether a service deployment will fail if the service can't reach a steady state. + suboptions: + enable: + type: bool + description: If enabled, a service deployment will transition to a failed state and stop launching new tasks. + rollback: + type: bool + description: If enabled, ECS will roll back your service to the last completed deployment after a failure. placement_constraints: description: - The placement constraints for the tasks in the service. @@ -272,6 +282,18 @@ - type: binpack field: memory +# With deployment circuit breaker (added in version 4.0) +- community.aws.ecs_service: + state: present + name: test-service + cluster: test-cluster + task_definition: test-task-definition + desired_count: 3 + deployment_configuration: + deployment_circuit_breaker: + enable: True + rollback: True + # With capacity_provider_strategy (added in version 4.0) - community.aws.ecs_service: state: present @@ -378,6 +400,19 @@ description: minimumHealthyPercent param returned: always type: int + deploymentCircuitBreaker: + description: dictionary of deploymentCircuitBreaker + returned: always + type: complex + contains: + enable: + description: The state of the circuit breaker feature. + returned: always + type: bool + rollback: + description: The state of the rollback feature of the circuit breaker. + returned: always + type: bool events: description: list of service events returned: always @@ -494,6 +529,19 @@ description: minimumHealthyPercent param returned: always type: int + deploymentCircuitBreaker: + description: dictionary of deploymentCircuitBreaker + returned: always + type: complex + contains: + enable: + description: The state of the circuit breaker feature. + returned: always + type: bool + rollback: + description: The state of the rollback feature of the circuit breaker. + returned: always + type: bool events: description: list of service events returned: always @@ -535,7 +583,8 @@ DEPLOYMENT_CONFIGURATION_TYPE_MAP = { 'maximum_percent': 'int', - 'minimum_healthy_percent': 'int' + 'minimum_healthy_percent': 'int', + 'deployment_circuit_breaker': 'dict', } from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
diff --git a/tests/integration/targets/ecs_cluster/defaults/main.yml b/tests/integration/targets/ecs_cluster/defaults/main.yml --- a/tests/integration/targets/ecs_cluster/defaults/main.yml +++ b/tests/integration/targets/ecs_cluster/defaults/main.yml @@ -24,6 +24,9 @@ ecs_task_containers: ecs_service_deployment_configuration: minimum_healthy_percent: 0 maximum_percent: 100 + deployment_circuit_breaker: + enable: true + rollback: true ecs_service_placement_strategy: - type: binpack field: memory diff --git a/tests/integration/targets/ecs_cluster/tasks/main.yml b/tests/integration/targets/ecs_cluster/tasks/main.yml --- a/tests/integration/targets/ecs_cluster/tasks/main.yml +++ b/tests/integration/targets/ecs_cluster/tasks/main.yml @@ -236,6 +236,13 @@ that: - ecs_service.changed + - name: check that ECS service was created with deployment_circuit_breaker + assert: + that: + - ecs_service.service.deploymentCircuitBreaker + - ecs_service.service.deploymentCircuitBreaker.enable + - ecs_service.service.deploymentCircuitBreaker.rollback + - name: create same ECS service definition (should not change) ecs_service: state: present
circuit breaker settings missing in ecs_service module ### Summary AWS API provides ability to set deployment configuration for ecs service. Currently ansible module community.aws.ecs_service implements only `maximumPercent` and `minimumHealthyPercent` options but `deploymentCircuitBreaker` is missing. Requesting to add this much needed missing option. ### Issue Type Feature Idea ### Component Name community.aws.ecs_service ### Additional Information <!--- Paste example playbooks or commands between quotes below --> as described in https://docs.aws.amazon.com/cli/latest/reference/ecs/update-service.html yaml should be: ``` ecs_service: <..> deployment_configuration: minimum_percent: x minimum_healthy_percent: y deploymentCircuitBreaker: enabled: yes rollback: yes ``` JSON syntax: ``` "deployment_configuration": { "deploymentCircuitBreaker": { "enable": true|false, "rollback": true|false }, "maximumPercent": integer, "minimumHealthyPercent": integer } ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/ecs_service.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/ecs_service.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @Java1Guy @jillr @kaczynskid @markuman @s-hertel @tremble @zacblazic [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> This already works as an undocumented feature. I can add the documentation and examples. Then maybe we just add ```'deployment_circuit_breaker': 'dict'``` to this https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/ecs_service.py#L486 and I think that's it? Do you see anything else? Working example: ``` - name: Build an ECS service. community.aws.ecs_service: state: "present" region: "{{ AWS_REGION }}" validate_certs: False aws_access_key: "{{ survey_access_key_id }}" aws_secret_key: "{{ survey_secret_access_key }}" aws_security_token: "{{ survey_session_token }}" name: service2 cluster: my_cluster_name task_definition: my-task-def desired_count: 1 launch_type: FARGATE network_configuration: assign_public_ip: no security_groups: - sg-abcd subnets: - subnet-1 - subnet-2 - subnet-3 - subnet-4 deployment_configuration: minimum_healthy_percent: 1 deployment_circuit_breaker: enable: True rollback: True ```
2022-06-04T15:03:28
ansible-collections/community.aws
1,225
ansible-collections__community.aws-1225
[ "921" ]
c4440624b5d41709208d6b265157a772bea8e2c3
diff --git a/plugins/modules/ecs_service.py b/plugins/modules/ecs_service.py --- a/plugins/modules/ecs_service.py +++ b/plugins/modules/ecs_service.py @@ -101,6 +101,16 @@ minimum_healthy_percent: type: int description: A lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. + deployment_circuit_breaker: + type: dict + description: The deployment circuit breaker determines whether a service deployment will fail if the service can't reach a steady state. + suboptions: + enable: + type: bool + description: If enabled, a service deployment will transition to a failed state and stop launching new tasks. + rollback: + type: bool + description: If enabled, ECS will roll back your service to the last completed deployment after a failure. placement_constraints: description: - The placement constraints for the tasks in the service. @@ -328,6 +338,19 @@ description: minimumHealthyPercent param returned: always type: int + deploymentCircuitBreaker: + description: dictionary of deploymentCircuitBreaker + returned: always + type: complex + contains: + enable: + description: The state of the circuit breaker feature. + returned: always + type: bool + rollback: + description: The state of the rollback feature of the circuit breaker. + returned: always + type: bool events: description: list of service events returned: always @@ -444,6 +467,19 @@ description: minimumHealthyPercent param returned: always type: int + deploymentCircuitBreaker: + description: dictionary of deploymentCircuitBreaker + returned: always + type: complex + contains: + enable: + description: The state of the circuit breaker feature. + returned: always + type: bool + rollback: + description: The state of the rollback feature of the circuit breaker. + returned: always + type: bool events: description: list of service events returned: always @@ -485,7 +521,8 @@ DEPLOYMENT_CONFIGURATION_TYPE_MAP = { 'maximum_percent': 'int', - 'minimum_healthy_percent': 'int' + 'minimum_healthy_percent': 'int', + 'deployment_circuit_breaker': 'dict', } from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
diff --git a/tests/integration/targets/ecs_cluster/defaults/main.yml b/tests/integration/targets/ecs_cluster/defaults/main.yml --- a/tests/integration/targets/ecs_cluster/defaults/main.yml +++ b/tests/integration/targets/ecs_cluster/defaults/main.yml @@ -24,6 +24,9 @@ ecs_task_containers: ecs_service_deployment_configuration: minimum_healthy_percent: 0 maximum_percent: 100 + deployment_circuit_breaker: + enable: true + rollback: true ecs_service_placement_strategy: - type: binpack field: memory diff --git a/tests/integration/targets/ecs_cluster/tasks/main.yml b/tests/integration/targets/ecs_cluster/tasks/main.yml --- a/tests/integration/targets/ecs_cluster/tasks/main.yml +++ b/tests/integration/targets/ecs_cluster/tasks/main.yml @@ -225,16 +225,21 @@ role: "ecsServiceRole" register: ecs_service - - name: check that placement constraint has been applied + - name: check that ECS service creation changed assert: that: - ecs_service.changed - - "ecs_service.service.placementConstraints[0].type == 'distinctInstance'" - - name: check that ECS service creation changed + - name: check that placement constraint has been applied assert: that: - - ecs_service.changed + - "ecs_service.service.placementConstraints[0].type == 'distinctInstance'" + + - name: check that ECS service was created with deployment_circuit_breaker + assert: + that: + - ecs_service.service.deploymentConfiguration.deploymentCircuitBreaker.enable + - ecs_service.service.deploymentConfiguration.deploymentCircuitBreaker.rollback - name: create same ECS service definition (should not change) ecs_service:
circuit breaker settings missing in ecs_service module ### Summary AWS API provides ability to set deployment configuration for ecs service. Currently ansible module community.aws.ecs_service implements only `maximumPercent` and `minimumHealthyPercent` options but `deploymentCircuitBreaker` is missing. Requesting to add this much needed missing option. ### Issue Type Feature Idea ### Component Name community.aws.ecs_service ### Additional Information <!--- Paste example playbooks or commands between quotes below --> as described in https://docs.aws.amazon.com/cli/latest/reference/ecs/update-service.html yaml should be: ``` ecs_service: <..> deployment_configuration: minimum_percent: x minimum_healthy_percent: y deploymentCircuitBreaker: enabled: yes rollback: yes ``` JSON syntax: ``` "deployment_configuration": { "deploymentCircuitBreaker": { "enable": true|false, "rollback": true|false }, "maximumPercent": integer, "minimumHealthyPercent": integer } ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/ecs_service.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/ecs_service.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @Java1Guy @jillr @kaczynskid @markuman @s-hertel @tremble @zacblazic [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> This already works as an undocumented feature. I can add the documentation and examples. Then maybe we just add ```'deployment_circuit_breaker': 'dict'``` to this https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/ecs_service.py#L486 and I think that's it? Do you see anything else? Working example: ``` - name: Build an ECS service. community.aws.ecs_service: state: "present" region: "{{ AWS_REGION }}" validate_certs: False aws_access_key: "{{ survey_access_key_id }}" aws_secret_key: "{{ survey_secret_access_key }}" aws_security_token: "{{ survey_session_token }}" name: service2 cluster: my_cluster_name task_definition: my-task-def desired_count: 1 launch_type: FARGATE network_configuration: assign_public_ip: no security_groups: - sg-abcd subnets: - subnet-1 - subnet-2 - subnet-3 - subnet-4 deployment_configuration: minimum_healthy_percent: 1 deployment_circuit_breaker: enable: True rollback: True ```
2022-06-07T12:18:19
ansible-collections/community.aws
1,226
ansible-collections__community.aws-1226
[ "921" ]
957945f207b974e1b405c7c2324034fff9dcaabd
diff --git a/plugins/modules/ecs_service.py b/plugins/modules/ecs_service.py --- a/plugins/modules/ecs_service.py +++ b/plugins/modules/ecs_service.py @@ -101,6 +101,16 @@ minimum_healthy_percent: type: int description: A lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. + deployment_circuit_breaker: + type: dict + description: The deployment circuit breaker determines whether a service deployment will fail if the service can't reach a steady state. + suboptions: + enable: + type: bool + description: If enabled, a service deployment will transition to a failed state and stop launching new tasks. + rollback: + type: bool + description: If enabled, ECS will roll back your service to the last completed deployment after a failure. placement_constraints: description: - The placement constraints for the tasks in the service. @@ -328,6 +338,19 @@ description: minimumHealthyPercent param returned: always type: int + deploymentCircuitBreaker: + description: dictionary of deploymentCircuitBreaker + returned: always + type: complex + contains: + enable: + description: The state of the circuit breaker feature. + returned: always + type: bool + rollback: + description: The state of the rollback feature of the circuit breaker. + returned: always + type: bool events: description: list of service events returned: always @@ -444,6 +467,19 @@ description: minimumHealthyPercent param returned: always type: int + deploymentCircuitBreaker: + description: dictionary of deploymentCircuitBreaker + returned: always + type: complex + contains: + enable: + description: The state of the circuit breaker feature. + returned: always + type: bool + rollback: + description: The state of the rollback feature of the circuit breaker. + returned: always + type: bool events: description: list of service events returned: always @@ -485,7 +521,8 @@ DEPLOYMENT_CONFIGURATION_TYPE_MAP = { 'maximum_percent': 'int', - 'minimum_healthy_percent': 'int' + 'minimum_healthy_percent': 'int', + 'deployment_circuit_breaker': 'dict', } from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
diff --git a/tests/integration/targets/ecs_cluster/defaults/main.yml b/tests/integration/targets/ecs_cluster/defaults/main.yml --- a/tests/integration/targets/ecs_cluster/defaults/main.yml +++ b/tests/integration/targets/ecs_cluster/defaults/main.yml @@ -24,6 +24,9 @@ ecs_task_containers: ecs_service_deployment_configuration: minimum_healthy_percent: 0 maximum_percent: 100 + deployment_circuit_breaker: + enable: true + rollback: true ecs_service_placement_strategy: - type: binpack field: memory diff --git a/tests/integration/targets/ecs_cluster/tasks/main.yml b/tests/integration/targets/ecs_cluster/tasks/main.yml --- a/tests/integration/targets/ecs_cluster/tasks/main.yml +++ b/tests/integration/targets/ecs_cluster/tasks/main.yml @@ -225,16 +225,21 @@ role: "ecsServiceRole" register: ecs_service - - name: check that placement constraint has been applied + - name: check that ECS service creation changed assert: that: - ecs_service.changed - - "ecs_service.service.placementConstraints[0].type == 'distinctInstance'" - - name: check that ECS service creation changed + - name: check that placement constraint has been applied assert: that: - - ecs_service.changed + - "ecs_service.service.placementConstraints[0].type == 'distinctInstance'" + + - name: check that ECS service was created with deployment_circuit_breaker + assert: + that: + - ecs_service.service.deploymentConfiguration.deploymentCircuitBreaker.enable + - ecs_service.service.deploymentConfiguration.deploymentCircuitBreaker.rollback - name: create same ECS service definition (should not change) ecs_service:
circuit breaker settings missing in ecs_service module ### Summary AWS API provides ability to set deployment configuration for ecs service. Currently ansible module community.aws.ecs_service implements only `maximumPercent` and `minimumHealthyPercent` options but `deploymentCircuitBreaker` is missing. Requesting to add this much needed missing option. ### Issue Type Feature Idea ### Component Name community.aws.ecs_service ### Additional Information <!--- Paste example playbooks or commands between quotes below --> as described in https://docs.aws.amazon.com/cli/latest/reference/ecs/update-service.html yaml should be: ``` ecs_service: <..> deployment_configuration: minimum_percent: x minimum_healthy_percent: y deploymentCircuitBreaker: enabled: yes rollback: yes ``` JSON syntax: ``` "deployment_configuration": { "deploymentCircuitBreaker": { "enable": true|false, "rollback": true|false }, "maximumPercent": integer, "minimumHealthyPercent": integer } ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/ecs_service.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/ecs_service.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @Java1Guy @jillr @kaczynskid @markuman @s-hertel @tremble @zacblazic [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> This already works as an undocumented feature. I can add the documentation and examples. Then maybe we just add ```'deployment_circuit_breaker': 'dict'``` to this https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/ecs_service.py#L486 and I think that's it? Do you see anything else? Working example: ``` - name: Build an ECS service. community.aws.ecs_service: state: "present" region: "{{ AWS_REGION }}" validate_certs: False aws_access_key: "{{ survey_access_key_id }}" aws_secret_key: "{{ survey_secret_access_key }}" aws_security_token: "{{ survey_session_token }}" name: service2 cluster: my_cluster_name task_definition: my-task-def desired_count: 1 launch_type: FARGATE network_configuration: assign_public_ip: no security_groups: - sg-abcd subnets: - subnet-1 - subnet-2 - subnet-3 - subnet-4 deployment_configuration: minimum_healthy_percent: 1 deployment_circuit_breaker: enable: True rollback: True ```
2022-06-07T12:20:44
ansible-collections/community.aws
1,275
ansible-collections__community.aws-1275
[ "836" ]
7460c6627632941993b0659327cf80f02dc6ac07
diff --git a/plugins/modules/sns_topic.py b/plugins/modules/sns_topic.py --- a/plugins/modules/sns_topic.py +++ b/plugins/modules/sns_topic.py @@ -26,8 +26,11 @@ type: str topic_type: description: - - The type of topic that should be created. Either Standard for FIFO (first-in, first-out) - choices: ['standard', 'fifo'] + - The type of topic that should be created. Either Standard for FIFO (first-in, first-out). + - Some regions, including GovCloud regions do not support FIFO topics. + Use a default value of 'standard' or omit the option if the region + does not support FIFO topics. + choices: ["standard", "fifo"] default: 'standard' type: str version_added: 2.0.0 @@ -363,9 +366,11 @@ def __init__(self, self.attributes_set = [] def _create_topic(self): - attributes = {'FifoTopic': 'false'} + attributes = {} tags = [] + # NOTE: Never set FifoTopic = False. Some regions (including GovCloud) + # don't support the attribute being set, even to False. if self.topic_type == 'fifo': attributes['FifoTopic'] = 'true' if not self.name.endswith('.fifo'): @@ -373,7 +378,9 @@ def _create_topic(self): if not self.check_mode: try: - response = self.connection.create_topic(Name=self.name, Attributes=attributes, Tags=tags) + response = self.connection.create_topic(Name=self.name, + Attributes=attributes, + Tags=tags) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't create topic %s" % self.name) self.topic_arn = response['TopicArn'] @@ -506,7 +513,6 @@ def ensure_gone(self): def main(): - # We're kinda stuck with CamelCase here, it would be nice to switch to # snake_case, but we'd need to purge out the alias entries http_retry_args = dict(
community.aws.sns_topic does not work in GovCloud ### Summary The community.aws.sns_topic does not work in the GovCloud Region. ### Issue Type Bug Report ### Component Name sns_topic ### Ansible Version ``` ansible 2.9.27 config file = /etc/ansible/ansible.cfg configured module search path = ['/home/ec2-user/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python3.6/site-packages/ansible executable location = /usr/bin/ansible python version = 3.6.8 (default, Mar 18 2021, 08:58:41) [GCC 8.4.1 20200928 (Red Hat 8.4.1-1)] ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list ``` This command doesn't work anymore I don't think. I only have the community.aws collection installed to test this issue. ### AWS SDK versions ``` pip3 show boto boto3 botocore Name: boto3 Version: 1.20.24 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: None License: Apache License 2.0 Location: /home/ec2-user/.local/lib/python3.6/site-packages Requires: s3transfer, jmespath, botocore --- Name: botocore Version: 1.23.24 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: None License: Apache License 2.0 Location: /home/ec2-user/.local/lib/python3.6/site-packages Requires: python-dateutil, jmespath, urllib3 ``` ### Configuration Empty ### OS / Environment Red Hat Enterprise Linux release 8.4 (Ootpa) ### Steps to Reproduce ``` --- - hosts: localhost gather_facts: true tasks: - name: Create SNS delegate_to: localhost community.aws.sns_topic: name: "example" state: present display_name: "example sns topic" ec2_url: "https://sns.us-gov-west-1.amazonaws.com" topic_type: standard ``` ``` ansible-playbook -vvv -i localhost sns.yaml ``` ### Expected Results I expect an SNS topic to be created in my AWS GovCloud Account ### Actual Results ``` The full traceback is: Traceback (most recent call last): File "/tmp/ansible_community.aws.sns_topic_payload_04_kg752/ansible_community.aws.sns_topic_payload.zip/ansible_collections/community/aws/plugins/modules/sns_topic.py", line 378, in _create_topic File "/home/ec2-user/.local/lib/python3.6/site-packages/botocore/client.py", line 391, in _api_call return self._make_api_call(operation_name, kwargs) File "/home/ec2-user/.local/lib/python3.6/site-packages/botocore/client.py", line 719, in _make_api_call raise error_class(parsed_response, operation_name) botocore.errorfactory.InvalidParameterException: An error occurred (InvalidParameter) when calling the CreateTopic operation: Invalid parameter: Attributes Reason: Unknown attribute FifoTopic fatal: [localhost -> localhost]: FAILED! => { "boto3_version": "1.20.24", "botocore_version": "1.23.24", "changed": false, "error": { "code": "InvalidParameter", "message": "Invalid parameter: Attributes Reason: Unknown attribute FifoTopic", "type": "Sender" }, "invocation": { "module_args": { "aws_access_key": null, "aws_ca_bundle": null, "aws_config": null, "aws_secret_key": null, "debug_botocore_endpoint_logs": false, "delivery_policy": null, "display_name": "example sns topic", "ec2_url": "https://sns.us-gov-west-1.amazonaws.com", "name": "example", "policy": null, "profile": null, "purge_subscriptions": true, "region": null, "security_token": null, "state": "present", "subscriptions": [], "topic_type": "standard", "validate_certs": true } }, "msg": "Couldn't create topic example: An error occurred (InvalidParameter) when calling the CreateTopic operation: Invalid parameter: Attributes Reason: Unknown attribute FifoTopic", "response_metadata": { "http_headers": { "content-length": "312", "content-type": "text/xml", "date": "Thu, 16 Dec 2021 23:35:12 GMT", "x-amzn-requestid": "6a6cc2e8-a524-5182-8158-98dd6a553688" }, "http_status_code": 400, "request_id": "6a6cc2e8-a524-5182-8158-98dd6a553688", "retry_attempts": 0 } } ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
I suspect the issue is here: https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/sns_topic.py#L368 This appears to add a FifoTopic attrribute, set to false by default. Perhaps the SNS API in GovCloud does not have this parameter and is the reason for the error? If I comment out the attributes line and set it to empty, the topic is created successfully ``` #attributes = {'FifoTopic': 'false'} attributes = {} ``` @dmc5179 that is correct, [FIFO topics are not supported in GovCloud](https://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-sns.html). Fix would consist in parsing the `ec2_url` or (even better) determining endpoint (commercial VS GovCloud) and pop the `fifo` attribute when the endpoint is GovCloud. cc @jillr @joelthompson @markuman @nand0p @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> We tried out what @dmc5179 did too, and it worked. Do we actually NEED that attribute if it's false? Can it just be removed altogether if not set? Otherwise, checking the endpoint somehow as @Razique suggested and removing the attribute altogether just for GovCloud. @dmc5179 mind trying the fix that I released in the pull request?
2022-06-29T07:38:54
ansible-collections/community.aws
1,290
ansible-collections__community.aws-1290
[ "1190" ]
24259ab5c8a28b1bdc648a17b79cca1fd65cb372
diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -534,7 +534,12 @@ def _flush_stderr(self, subprocess): def _get_url(self, client_method, bucket_name, out_path, http_method, profile_name, extra_args=None): ''' Generate URL for get_object / put_object ''' - region_name = self.get_option('region') or 'us-east-1' + + bucket_location = boto3.client('s3').get_bucket_location( + Bucket=(self.get_option('bucket_name')), + ) + region_name = bucket_location['LocationConstraint'] + client = self._get_boto_client('s3', region_name=region_name, profile_name=profile_name) params = {'Bucket': bucket_name, 'Key': out_path} if extra_args is not None:
SSM connection doesn't use regional S3 endpoint ### Summary When using `ansible_connection: aws_ssm` the url used to download data from a bucket is always `*.s3.amazonaws.com` regardless of the region specified in `ansible_aws_ssm_region`. This causes issues since in restricted environments the EC2 instance has to use the S3 gateway endpoint which is only available using the region specific url, for example `*.s3.eu-central-1.amazonaws.com` ### Issue Type Bug Report ### Component Name s3 ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.12.6] config file = /etc/ansible/ansible.cfg configured module search path = ['/home/ssm-user/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python3/dist-packages/ansible ansible collection location = /home/ssm-user/.ansible/collections:/usr/share/ansible/collections executable location = /usr/bin/ansible python version = 3.10.4 (main, Apr 2 2022, 09:04:19) [GCC 11.2.0] jinja version = 3.0.3 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore Name: boto3 Version: 1.24.0 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.8/dist-packages Requires: s3transfer, botocore, jmespath Required-by: --- Name: botocore Version: 1.27.0 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.8/dist-packages Requires: python-dateutil, jmespath, urllib3 Required-by: s3transfer, boto3 ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment Ubuntu 22 ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml ll: hosts: test: ansible_connection: aws_ssm ansible_aws_ssm_instance_id: "i-...." vars: ansible_aws_ssm_bucket_name: my-bucket ansible_aws_ssm_region: eu-central-1 ``` ### Expected Results Ansible should use the region specific s3 url for download ### Actual Results ```console (paste below) EXEC curl 'https://my-bucket.s3.amazonaws.com/... ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: None If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner --->
2022-06-30T12:03:25
ansible-collections/community.aws
1,291
ansible-collections__community.aws-1291
[ "1190" ]
56d80dd17edfdbebf21fad527ba366f4d1a53bca
diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -534,7 +534,12 @@ def _flush_stderr(self, subprocess): def _get_url(self, client_method, bucket_name, out_path, http_method, profile_name, extra_args=None): ''' Generate URL for get_object / put_object ''' - region_name = self.get_option('region') or 'us-east-1' + + bucket_location = boto3.client('s3').get_bucket_location( + Bucket=(self.get_option('bucket_name')), + ) + region_name = bucket_location['LocationConstraint'] + client = self._get_boto_client('s3', region_name=region_name, profile_name=profile_name) params = {'Bucket': bucket_name, 'Key': out_path} if extra_args is not None:
SSM connection doesn't use regional S3 endpoint ### Summary When using `ansible_connection: aws_ssm` the url used to download data from a bucket is always `*.s3.amazonaws.com` regardless of the region specified in `ansible_aws_ssm_region`. This causes issues since in restricted environments the EC2 instance has to use the S3 gateway endpoint which is only available using the region specific url, for example `*.s3.eu-central-1.amazonaws.com` ### Issue Type Bug Report ### Component Name s3 ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.12.6] config file = /etc/ansible/ansible.cfg configured module search path = ['/home/ssm-user/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python3/dist-packages/ansible ansible collection location = /home/ssm-user/.ansible/collections:/usr/share/ansible/collections executable location = /usr/bin/ansible python version = 3.10.4 (main, Apr 2 2022, 09:04:19) [GCC 11.2.0] jinja version = 3.0.3 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore Name: boto3 Version: 1.24.0 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.8/dist-packages Requires: s3transfer, botocore, jmespath Required-by: --- Name: botocore Version: 1.27.0 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.8/dist-packages Requires: python-dateutil, jmespath, urllib3 Required-by: s3transfer, boto3 ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment Ubuntu 22 ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml ll: hosts: test: ansible_connection: aws_ssm ansible_aws_ssm_instance_id: "i-...." vars: ansible_aws_ssm_bucket_name: my-bucket ansible_aws_ssm_region: eu-central-1 ``` ### Expected Results Ansible should use the region specific s3 url for download ### Actual Results ```console (paste below) EXEC curl 'https://my-bucket.s3.amazonaws.com/... ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: None If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner --->
2022-06-30T12:03:34
ansible-collections/community.aws
1,293
ansible-collections__community.aws-1293
[ "250" ]
b18c203b98950b9cbd6dbf66d237bf1a179504e4
diff --git a/plugins/modules/lightsail_static_ip.py b/plugins/modules/lightsail_static_ip.py new file mode 100644 --- /dev/null +++ b/plugins/modules/lightsail_static_ip.py @@ -0,0 +1,148 @@ +#!/usr/bin/python + +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: lightsail_static_ip +version_added: 4.1.0 +short_description: Manage static IP addresses in AWS Lightsail +description: + - Manage static IP addresses in AWS Lightsail. +author: + - "Daniel Cotton (@danielcotton)" +options: + state: + description: + - Describes the desired state. + default: present + choices: ['present', 'absent'] + type: str + name: + description: Name of the static IP. + required: true + type: str +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 +''' + + +EXAMPLES = ''' +- name: Provision a Lightsail static IP + community.aws.lightsail_static_ip: + state: present + name: my_static_ip + register: my_ip + +- name: Remove a static IP + community.aws.lightsail_static_ip: + state: absent + name: my_static_ip +''' + +RETURN = ''' +static_ip: + description: static_ipinstance data + returned: always + type: dict + sample: + arn: "arn:aws:lightsail:ap-southeast-2:184297340509:StaticIp/d8f47672-c261-4443-a484-4a2ec983db9a" + created_at: "2021-02-28T00:04:05.202000+10:30" + ip_address: "192.0.2.5" + is_attached: false + location: + availability_zone: all + region_name: ap-southeast-2 + name: "static_ip" + resource_type: StaticIp + support_code: "677585553206/192.0.2.5" +''' + +try: + import botocore +except ImportError: + # will be caught by AnsibleAWSModule + pass + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + + +def find_static_ip_info(module, client, static_ip_name, fail_if_not_found=False): + + try: + res = client.get_static_ip(staticIpName=static_ip_name) + except is_boto3_error_code('NotFoundException') as e: + if fail_if_not_found: + module.fail_json_aws(e) + return None + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + return res['staticIp'] + + +def create_static_ip(module, client, static_ip_name): + + inst = find_static_ip_info(module, client, static_ip_name) + if inst: + module.exit_json(changed=False, static_ip=camel_dict_to_snake_dict(inst)) + else: + create_params = {'staticIpName': static_ip_name} + + try: + client.allocate_static_ip(**create_params) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + inst = find_static_ip_info(module, client, static_ip_name, fail_if_not_found=True) + + module.exit_json(changed=True, static_ip=camel_dict_to_snake_dict(inst)) + + +def delete_static_ip(module, client, static_ip_name): + + inst = find_static_ip_info(module, client, static_ip_name) + if inst is None: + module.exit_json(changed=False, static_ip={}) + + changed = False + try: + client.release_static_ip(staticIpName=static_ip_name) + changed = True + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + module.exit_json(changed=changed, static_ip=camel_dict_to_snake_dict(inst)) + + +def main(): + + argument_spec = dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec) + + client = module.client('lightsail') + + name = module.params.get('name') + state = module.params.get('state') + + if state == 'present': + create_static_ip(module, client, name) + elif state == 'absent': + delete_static_ip(module, client, name) + + +if __name__ == '__main__': + main()
diff --git a/tests/integration/targets/lightsail_static_ip/aliases b/tests/integration/targets/lightsail_static_ip/aliases new file mode 100644 --- /dev/null +++ b/tests/integration/targets/lightsail_static_ip/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/tests/integration/targets/lightsail_static_ip/defaults/main.yml b/tests/integration/targets/lightsail_static_ip/defaults/main.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/lightsail_static_ip/defaults/main.yml @@ -0,0 +1 @@ +static_ip_name: "{{ resource_prefix }}_static_ip" diff --git a/tests/integration/targets/lightsail_static_ip/tasks/main.yml b/tests/integration/targets/lightsail_static_ip/tasks/main.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/lightsail_static_ip/tasks/main.yml @@ -0,0 +1,96 @@ +--- + +- module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key | default(omit) }}' + aws_secret_key: '{{ aws_secret_key | default(omit) }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region | default(omit) }}' + + block: + + # ==== Tests =================================================== + + - name: Create a new static IP + lightsail_static_ip: + name: "{{ static_ip_name }}" + register: result + + - assert: + that: + - result.changed == True + - '"static_ip" in result' + - '"arn" in result.static_ip' + - '"created_at" in result.static_ip' + - '"ip_address" in result.static_ip' + - '"is_attached" in result.static_ip' + - '"location" in result.static_ip' + - '"name" in result.static_ip' + - '"resource_type" in result.static_ip' + - '"support_code" in result.static_ip' + - result.static_ip.arn.startswith("arn:") + - result.static_ip.name == static_ip_name + - result.static_ip.resource_type == 'StaticIp' + - result.static_ip.is_attached == false + - result.static_ip.ip_address | ansible.utils.ipaddr + - '"availability_zone" in result.static_ip.location' + - '"region_name" in result.static_ip.location' + + - set_fact: + lightsail_ip_arn: '{{ result.static_ip.arn }}' + lightsail_ip_address: '{{ result.static_ip.ip_address }}' + + - name: Make sure create is idempotent + lightsail_static_ip: + name: "{{ static_ip_name }}" + register: result + + - assert: + that: + - result.changed == False + - '"static_ip" in result' + - '"arn" in result.static_ip' + - '"created_at" in result.static_ip' + - '"ip_address" in result.static_ip' + - '"is_attached" in result.static_ip' + - '"location" in result.static_ip' + - '"name" in result.static_ip' + - '"resource_type" in result.static_ip' + - '"support_code" in result.static_ip' + - result.static_ip.arn == lightsail_ip_arn + - result.static_ip.name == static_ip_name + - result.static_ip.resource_type == 'StaticIp' + - result.static_ip.is_attached == false + - result.static_ip.ip_address == lightsail_ip_address + - '"availability_zone" in result.static_ip.location' + - '"region_name" in result.static_ip.location' + + - name: Delete the static IP + lightsail_static_ip: + name: "{{ static_ip_name }}" + state: absent + register: result + + - assert: + that: + - result.changed == True + + - name: Make sure deletion is idempotent + lightsail_static_ip: + name: "{{ static_ip_name }}" + state: absent + register: result + + - assert: + that: + - result.changed == False + + # ==== Cleanup ==================================================== + + always: + + - name: Cleanup - delete static IP + lightsail_static_ip: + name: "{{ static_ip_name }}" + state: absent + ignore_errors: yes
Add a module to manage AWS Lightsail Static IPs <!--- Verify first that your feature was not already discussed on GitHub --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY Issue #174 requests adding the ability to manage firewall and static IP settings in a Lightsail VPS. I suggest that it may be beneficial to have a module that can manage static IPs for Lightsail (which are managed seperately from VMs). ##### ISSUE TYPE - Feature Idea ##### COMPONENT NAME community.aws.lightsail ##### ADDITIONAL INFORMATION In Lightsail, static IP addresses are managed separately from VPSes. You create and manage static IPs, and can attach them to a VPS as per #174. A module that can mange these IPs would be a prerequisite for end-to-end configuration management for Lightsail. I already have a basic version of this and am planning on submitting a PR in the next couple of weeks once it's cleaned up & tested Relevant API Documentation: * https://docs.aws.amazon.com/lightsail/2016-11-28/api-reference/API_AllocateStaticIp.html * https://docs.aws.amazon.com/lightsail/2016-11-28/api-reference/API_GetStaticIps.html * https://docs.aws.amazon.com/lightsail/2016-11-28/api-reference/API_ReleaseStaticIp.html Possible Playbook notation: ```yaml - name: Create a new Lightsail static IP community.aws.lightsail_static_ip: state: present name: static_ip_1 aws_access_key: XXXXXXXX ```
Files identified in the description: None If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner --->
2022-07-01T12:20:07
ansible-collections/community.aws
1,302
ansible-collections__community.aws-1302
[ "193" ]
9d0bdb2c46b9f2446f610df10f19112ed451cd16
diff --git a/plugins/modules/sns_topic.py b/plugins/modules/sns_topic.py --- a/plugins/modules/sns_topic.py +++ b/plugins/modules/sns_topic.py @@ -133,6 +133,10 @@ protocol: description: Protocol of subscription. required: true + attributes: + description: Attributes of subscription. Only supports RawMessageDelievery for SQS endpoints. + default: {} + version_added: "4.1.0" type: list elements: dict default: [] @@ -358,6 +362,8 @@ def __init__(self, self.subscriptions_existing = [] self.subscriptions_deleted = [] self.subscriptions_added = [] + self.subscriptions_attributes_set = [] + self.desired_subscription_attributes = dict() self.purge_subscriptions = purge_subscriptions self.check_mode = check_mode self.topic_created = False @@ -455,6 +461,45 @@ def _set_topic_subs(self): self.module.fail_json_aws(e, msg="Couldn't subscribe to topic %s" % self.topic_arn) return changed + def _init_desired_subscription_attributes(self): + for sub in self.subscriptions: + sub_key = (sub['protocol'], canonicalize_endpoint(sub['protocol'], sub['endpoint'])) + tmp_dict = sub.get('attributes', {}) + # aws sdk expects values to be strings + # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sns.html#SNS.Client.set_subscription_attributes + for k, v in tmp_dict.items(): + tmp_dict[k] = str(v) + + self.desired_subscription_attributes[sub_key] = tmp_dict + + def _set_topic_subs_attributes(self): + changed = False + for sub in list_topic_subscriptions(self.connection, self.module, self.topic_arn): + sub_key = (sub['Protocol'], sub['Endpoint']) + sub_arn = sub['SubscriptionArn'] + if sub_key not in self.desired_subscription_attributes: + # subscription isn't defined in desired, skipping + continue + + try: + sub_current_attributes = self.connection.get_subscription_attributes(SubscriptionArn=sub_arn)['Attributes'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, "Couldn't get subscription attributes for subscription %s" % sub_arn) + + raw_message = self.desired_subscription_attributes[sub_key].get('RawMessageDelivery') + if raw_message is not None and 'RawMessageDelivery' in sub_current_attributes: + if sub_current_attributes['RawMessageDelivery'].lower() != raw_message.lower(): + changed = True + if not self.check_mode: + try: + self.connection.set_subscription_attributes(SubscriptionArn=sub_arn, + AttributeName='RawMessageDelivery', + AttributeValue=raw_message) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, "Couldn't set RawMessageDelivery subscription attribute") + + return changed + def _delete_subscriptions(self): # NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days # https://forums.aws.amazon.com/thread.jspa?threadID=85993 @@ -496,6 +541,13 @@ def ensure_ok(self): elif self.display_name or self.policy or self.delivery_policy: self.module.fail_json(msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account") changed |= self._set_topic_subs() + + self._init_desired_subscription_attributes() + if self.topic_arn in list_topics(self.connection, self.module): + changed |= self._set_topic_subs_attributes() + elif any(self.desired_subscription_attributes.values()): + self.module.fail_json(msg="Cannot set subscription attributes for SNS topics not owned by this account") + return changed def ensure_gone(self):
diff --git a/tests/integration/targets/sns_topic/defaults/main.yml b/tests/integration/targets/sns_topic/defaults/main.yml --- a/tests/integration/targets/sns_topic/defaults/main.yml +++ b/tests/integration/targets/sns_topic/defaults/main.yml @@ -1,8 +1,12 @@ # we hash the resource_prefix to get a shorter, unique string sns_topic_topic_name: "ansible-test-{{ tiny_prefix }}-topic" +sns_sqs_subscription_attributes: {} sns_topic_subscriptions: - endpoint: "{{ sns_topic_subscriber_arn }}" protocol: "lambda" + - endpoint: "{{ sns_topic_subscriber_sqs_arn }}" + protocol: sqs + attributes: "{{ sns_sqs_subscription_attributes }}" sns_topic_third_party_topic_arn: "arn:aws:sns:us-east-1:806199016981:AmazonIpSpaceChanged" sns_topic_third_party_region: "{{ sns_topic_third_party_topic_arn.split(':')[3] }}" @@ -10,3 +14,5 @@ sns_topic_third_party_region: "{{ sns_topic_third_party_topic_arn.split(':')[3] sns_topic_lambda_function: "sns_topic_lambda" sns_topic_lambda_name: "ansible-test-{{ tiny_prefix }}-{{ sns_topic_lambda_function }}" sns_topic_lambda_role: "ansible-test-{{ tiny_prefix }}-sns-lambda" + +sns_topic_sqs_name: "ansible-test-{{ tiny_prefix }}-sns" diff --git a/tests/integration/targets/sns_topic/tasks/main.yml b/tests/integration/targets/sns_topic/tasks/main.yml --- a/tests/integration/targets/sns_topic/tasks/main.yml +++ b/tests/integration/targets/sns_topic/tasks/main.yml @@ -251,6 +251,14 @@ - delivery_policy.http.defaultHealthyRetryPolicy.maxDelayTarget == 40 - delivery_policy.http.defaultHealthyRetryPolicy.numRetries == 6 + - name: create SQS queue for subscribing + sqs_queue: + name: '{{ sns_topic_sqs_name }}' + register: sqs_result + + - set_fact: + sns_topic_subscriber_sqs_arn: '{{ sqs_result.queue_arn }}' + - name: create temp dir tempfile: state: directory @@ -287,7 +295,37 @@ assert: that: - sns_topic_subscribe.changed - - sns_topic_subscribe.sns_topic.subscriptions|length == 1 + - sns_topic_subscribe.sns_topic.subscriptions|length == 2 + + - name: enable raw message delivery for sqs subscription (attributes) + set_fact: + sns_sqs_subscription_attributes: + RawMessageDelivery: true + + - name: update topic subscriptions - raw message enabled + sns_topic: + name: '{{ sns_topic_topic_name }}' + display_name: My new topic name + purge_subscriptions: false + subscriptions: '{{ sns_topic_subscriptions }}' + register: sns_topic_subscribe_update_raw_on + + - name: assert sqs subscription was updated + assert: + that: + - sns_topic_subscribe_update_raw_on.changed + + - name: rerun topic subscriptions with raw message enabled - expect no changes + sns_topic: + name: '{{ sns_topic_topic_name }}' + display_name: My new topic name + purge_subscriptions: false + subscriptions: '{{ sns_topic_subscriptions }}' + register: rerun_sns_topic_subscribe_update_raw_on + - name: assert no changes after rerun + assert: + that: + - not rerun_sns_topic_subscribe_update_raw_on.changed - name: run again with purge_subscriptions set to false sns_topic: @@ -300,7 +338,7 @@ assert: that: - not sns_topic_no_purge.changed - - sns_topic_no_purge.sns_topic.subscriptions|length == 1 + - sns_topic_no_purge.sns_topic.subscriptions|length == 2 - name: run again with purge_subscriptions set to true sns_topic: @@ -319,6 +357,10 @@ name: '{{ sns_topic_topic_name }}' state: absent + - name: remove subscription attributes before dealing with third party topic + set_fact: + sns_sqs_subscription_attributes: {} + - name: no-op with third party topic (effectively get existing subscriptions) sns_topic: name: '{{ sns_topic_third_party_topic_arn }}' @@ -336,7 +378,7 @@ assert: that: - third_party_topic_subscribe is changed - - (third_party_topic_subscribe.sns_topic.subscriptions|length) - (third_party_topic.sns_topic.subscriptions|length) == 1 + - (third_party_topic_subscribe.sns_topic.subscriptions|length) - (third_party_topic.sns_topic.subscriptions|length) == 2 - name: attempt to change name of third party topic sns_topic: @@ -412,6 +454,12 @@ state: absent ignore_errors: true + - name: remove SQS queue + sqs_queue: + name: '{{ sns_topic_sqs_name }}' + state: absent + ignore_errors: true + - name: remove tempdir file: path: '{{ tempdir.path }}'
sns_topic - subscriptions section lacks setting the raw message delivery option <!--- Verify first that your feature was not already discussed on GitHub --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Describe the new feature/improvement briefly below --> The sns_topic module doesn't allow setting the raw message delivery option for subscriptions. ##### ISSUE TYPE - Feature Idea ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> sns_topic ##### ADDITIONAL INFORMATION <!--- Describe how the feature would be used, why it is needed and what it would solve --> Get a new option within the subscriptions section with the name of raw_message with a default value of disabled. We need the raw message delivery option to be enabled on all of our subscriptions. Not having the ability to set this with Ansible requires us to manually set this option to true for all our subscriptions across all our topics. <!--- Paste example playbooks or commands between quotes below --> ```yaml subscriptions: - endpoint: "{{ item.1.queue_arn }}" protocol: "sqs" raw_message: "enabled" ``` <!--- HINT: You can also paste gist.github.com links for larger files -->
Files identified in the description: * [`plugins/modules/sns_topic.py`](https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/sns_topic.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @joelthompson @nand0p @s-hertel @tremble @willthames @wimnat [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> Any updates on this issue? cc @markuman [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> Hi @canidam thx for comming back to this issue. Unfortunately no one is currently working on this feature request at the moment. Just some notes about the implementation that is necessary for this feature. The requested option is an attribute of the subscription: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sns.html#SNS.Client.subscribe and a subscription can have multiple attributes. Therefore, the implementation should will looks more like this I guess ```yml subscriptions: - endpoint: "{{ item.1.queue_arn }}" protocol: "sqs" attributes: raw_message_delivery: yes ``` @markuman thank you. I can add it and open PR, is the project open for it? > I can add it and open PR, is the project open for it? @canidam Most certainly! There's some links to get you started in our readme: https://github.com/ansible-collections/community.aws#contributing-to-this-collection In general we'll need 3 things: - The change (plugins/modules/sns_topic.py) - An update to the integration test (tests/integration/targets/sns_topic) - Changelog fragment (changelogs/fragments/my_change.yml)
2022-07-03T15:02:18
ansible-collections/community.aws
1,303
ansible-collections__community.aws-1303
[ "338" ]
f25a79d87a568253a16df1b20694176d395e3eac
diff --git a/plugins/modules/ecs_service.py b/plugins/modules/ecs_service.py --- a/plugins/modules/ecs_service.py +++ b/plugins/modules/ecs_service.py @@ -12,16 +12,16 @@ version_added: 1.0.0 short_description: Create, terminate, start or stop a service in ECS description: - - Creates or terminates ECS. services. + - Creates or terminates ECS services. notes: - The service role specified must be assumable. (i.e. have a trust relationship for the ecs service, ecs.amazonaws.com) - For details of the parameters and returns see U(https://boto3.readthedocs.io/en/latest/reference/services/ecs.html). - An IAM role must have been previously created. author: - - "Mark Chance (@Java1Guy)" - - "Darek Kaczynski (@kaczynskid)" - - "Stephane Maarek (@simplesteph)" - - "Zac Blazic (@zacblazic)" + - "Mark Chance (@Java1Guy)" + - "Darek Kaczynski (@kaczynskid)" + - "Stephane Maarek (@simplesteph)" + - "Zac Blazic (@zacblazic)" options: state: description: @@ -46,11 +46,15 @@ description: - The task definition the service will run. - This parameter is required when I(state=present). + - This parameter is ignored when updating a service with a C(CODE_DEPLOY) deployment controller in which case + the task definition is managed by Code Pipeline and cannot be updated. required: false type: str load_balancers: description: - The list of ELBs defined for this service. + - Load balancers for an existing service cannot be updated, and it is an error to do so. + - When the deployment controller is CODE_DEPLOY changes to this value are simply ignored, and do not cause an error. required: false type: list elements: dict @@ -90,6 +94,17 @@ required: false type: bool default: false + deployment_controller: + description: + - The deployment controller to use for the service. If no deploymenet controller is specified, the ECS controller is used. + required: false + version_added: 4.1.0 + type: dict + suboptions: + type: + type: str + choices: ["ECS", "CODE_DEPLOY", "EXTERNAL"] + description: The deployment controller type to use. deployment_configuration: description: - Optional parameters that control the deployment_configuration. @@ -238,9 +253,8 @@ default: false version_added: 4.1.0 extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - + - amazon.aws.aws + - amazon.aws.ec2 ''' EXAMPLES = r''' @@ -590,6 +604,10 @@ ''' import time +DEPLOYMENT_CONTROLLER_TYPE_MAP = { + 'type': 'str', +} + DEPLOYMENT_CONFIGURATION_TYPE_MAP = { 'maximum_percent': 'int', 'minimum_healthy_percent': 'int', @@ -664,7 +682,8 @@ def is_matching_service(self, expected, existing): # but the user is just entering # ansible-fargate-nginx:3 if expected['task_definition'] != existing['taskDefinition'].split('/')[-1]: - return False + if existing['deploymentController']['type'] != 'CODE_DEPLOY': + return False if expected.get('health_check_grace_period_seconds'): if expected.get('health_check_grace_period_seconds') != existing.get('healthCheckGracePeriodSeconds'): @@ -682,7 +701,7 @@ def is_matching_service(self, expected, existing): return True def create_service(self, service_name, cluster_name, task_definition, load_balancers, - desired_count, client_token, role, deployment_configuration, + desired_count, client_token, role, deployment_controller, deployment_configuration, placement_constraints, placement_strategy, health_check_grace_period_seconds, network_configuration, service_registries, launch_type, platform_version, scheduling_strategy, capacity_provider_strategy): @@ -699,6 +718,8 @@ def create_service(self, service_name, cluster_name, task_definition, load_balan ) if network_configuration: params['networkConfiguration'] = network_configuration + if deployment_controller: + params['deploymentController'] = deployment_controller if launch_type: params['launchType'] = launch_type if platform_version: @@ -786,6 +807,7 @@ def main(): repeat=dict(required=False, type='int', default=10), force_new_deployment=dict(required=False, default=False, type='bool'), force_deletion=dict(required=False, default=False, type='bool'), + deployment_controller=dict(required=False, default={}, type='dict'), deployment_configuration=dict(required=False, default={}, type='dict'), wait=dict(required=False, default=False, type='bool'), placement_constraints=dict( @@ -851,6 +873,11 @@ def main(): else: network_configuration = None + deployment_controller = map_complex_type(module.params['deployment_controller'], + DEPLOYMENT_CONTROLLER_TYPE_MAP) + + deploymentController = snake_dict_to_camel_dict(deployment_controller) + deployment_configuration = map_complex_type(module.params['deployment_configuration'], DEPLOYMENT_CONFIGURATION_TYPE_MAP) @@ -912,12 +939,19 @@ def main(): if 'capacityProviderStrategy' in existing.keys(): module.fail_json(msg="It is not possible to change an existing service from capacity_provider_strategy to launch_type.") if (existing['loadBalancers'] or []) != loadBalancers: - module.fail_json(msg="It is not possible to update the load balancers of an existing service") + if existing['deploymentController']['type'] != 'CODE_DEPLOY': + module.fail_json(msg="It is not possible to update the load balancers of an existing service") + + if existing.get('deploymentController', {}).get('type', None) == 'CODE_DEPLOY': + task_definition = '' + network_configuration = [] + else: + task_definition = module.params['task_definition'] # update required response = service_mgr.update_service(module.params['name'], module.params['cluster'], - module.params['task_definition'], + task_definition, module.params['desired_count'], deploymentConfiguration, network_configuration, @@ -935,6 +969,7 @@ def main(): module.params['desired_count'], clientToken, role, + deploymentController, deploymentConfiguration, module.params['placement_constraints'], module.params['placement_strategy'],
community.aws.ecs_service should support deploymentController parameter ##### SUMMARY [More on deploymentController](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service_definition_parameters.html) parameter. [What this does](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html): An Amazon ECS deployment type determines the deployment strategy that your service uses. There are three deployment types: rolling update, blue/green, and external. ##### COMPONENT NAME community.aws.ecs_service
Files identified in the description: None If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> @berenddeboer: Greetings! Thanks for taking the time to open this issue. In order for the community to handle your issue effectively, we need a bit more information. Here are the items we could not find in your description: - component name Please set the description of this issue with this template: https://raw.githubusercontent.com/ansible/ansible/devel/.github/ISSUE_TEMPLATE/bug_report.md [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: issue_missing_data --->
2022-07-04T07:54:34
ansible-collections/community.aws
1,324
ansible-collections__community.aws-1324
[ "88" ]
809698ab2edc639e6690d1c3ea517dc9547606f4
diff --git a/plugins/modules/route53_health_check.py b/plugins/modules/route53_health_check.py --- a/plugins/modules/route53_health_check.py +++ b/plugins/modules/route53_health_check.py @@ -44,7 +44,7 @@ description: - The type of health check that you want to create, which indicates how Amazon Route 53 determines whether an endpoint is healthy. - required: true + - Once health_check is created, type can not be changed. choices: [ 'HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP' ] type: str resource_path: @@ -86,6 +86,28 @@ - Will default to C(3) if not specified on creation. choices: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ] type: int + health_check_name: + description: + - Name of the Health Check. + - Used together with I(use_unique_names) to set/make use of I(health_check_name) as a unique identifier. + type: str + required: False + aliases: ['name'] + version_added: 4.1.0 + use_unique_names: + description: + - Used together with I(health_check_name) to set/make use of I(health_check_name) as a unique identifier. + type: bool + required: False + version_added: 4.1.0 + health_check_id: + description: + - ID of the health check to be update or deleted. + - If provided, a health check can be updated or deleted based on the ID as unique identifier. + type: str + required: False + aliases: ['id'] + version_added: 4.1.0 author: - "zimbatm (@zimbatm)" notes: @@ -120,10 +142,35 @@ weight: 100 health_check: "{{ my_health_check.health_check.id }}" +- name: create a simple health check with health_check_name as unique identifier + community.aws.route53_health_check: + state: present + health_check_name: ansible + fqdn: ansible.com + port: 443 + type: HTTPS + use_unique_names: true + - name: Delete health-check community.aws.route53_health_check: state: absent fqdn: host1.example.com + +- name: Update Health check by ID - update ip_address + community.aws.route53_health_check: + id: 12345678-abcd-abcd-abcd-0fxxxxxxxxxx + ip_address: 1.2.3.4 + +- name: Update Health check by ID - update port + community.aws.route53_health_check: + id: 12345678-abcd-abcd-abcd-0fxxxxxxxxxx + ip_address: 8080 + +- name: Delete Health check by ID + community.aws.route53_health_check: + state: absent + id: 12345678-abcd-abcd-abcd-0fxxxxxxxxxx + ''' RETURN = r''' @@ -249,7 +296,6 @@ def find_health_check(ip_addr, fqdn, hc_type, request_interval, port): # Additionally, we can't properly wrap the paginator, so retrying means # starting from scratch with a paginator results = _list_health_checks() - while True: for check in results.get('HealthChecks'): config = check.get('HealthCheckConfig') @@ -268,6 +314,20 @@ def find_health_check(ip_addr, fqdn, hc_type, request_interval, port): return None +def get_existing_checks_with_name(): + results = _list_health_checks() + health_checks_with_name = {} + while True: + for check in results.get('HealthChecks'): + if 'Name' in describe_health_check(check['Id'])['tags']: + check_name = describe_health_check(check['Id'])['tags']['Name'] + health_checks_with_name[check_name] = check + if results.get('IsTruncated', False): + results = _list_health_checks(Marker=results.get('NextMarker')) + else: + return health_checks_with_name + + def delete_health_check(check_id): if not check_id: return False, None @@ -348,10 +408,14 @@ def create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_ def update_health_check(existing_check): - # In theory it's also possible to update the IPAddress, Port and - # FullyQualifiedDomainName, however, because we use these in lieu of a - # 'Name' to uniquely identify the health check this isn't currently - # supported. If we accepted an ID it would be possible to modify them. + # It's possible to update following parameters + # - ResourcePath + # - SearchString + # - FailureThreshold + # - Disabled + # - IPAddress + # - Port + # - FullyQualifiedDomainName changes = dict() existing_config = existing_check.get('HealthCheckConfig') @@ -372,10 +436,23 @@ def update_health_check(existing_check): if disabled is not None and disabled != existing_config.get('Disabled'): changes['Disabled'] = module.params.get('disabled') + # If updating based on Health Check ID or health_check_name, we can update + if module.params.get('health_check_id') or module.params.get('use_unique_names'): + ip_address = module.params.get('ip_address', None) + if ip_address is not None and ip_address != existing_config.get('IPAddress'): + changes['IPAddress'] = module.params.get('ip_address') + + port = module.params.get('port', None) + if port is not None and port != existing_config.get('Port'): + changes['Port'] = module.params.get('port') + + fqdn = module.params.get('fqdn', None) + if fqdn is not None and fqdn != existing_config.get('FullyQualifiedDomainName'): + changes['FullyQualifiedDomainName'] = module.params.get('fqdn') + # No changes... if not changes: return False, None - if module.check_mode: return True, 'update' @@ -419,7 +496,7 @@ def main(): disabled=dict(type='bool'), ip_address=dict(), port=dict(type='int'), - type=dict(required=True, choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']), + type=dict(choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']), resource_path=dict(), fqdn=dict(), string_match=dict(), @@ -427,16 +504,27 @@ def main(): failure_threshold=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), tags=dict(type='dict', aliases=['resource_tags']), purge_tags=dict(type='bool'), + health_check_id=dict(type='str', aliases=['id'], required=False), + health_check_name=dict(type='str', aliases=['name'], required=False), + use_unique_names=dict(type='bool', required=False), ) args_one_of = [ - ['ip_address', 'fqdn'], + ['ip_address', 'fqdn', 'health_check_id'], ] args_if = [ ['type', 'TCP', ('port',)], ] + args_required_together = [ + ['use_unique_names', 'health_check_name'], + ] + + args_mutually_exclusive = [ + ['health_check_id', 'health_check_name'] + ] + global module global client @@ -444,6 +532,8 @@ def main(): argument_spec=argument_spec, required_one_of=args_one_of, required_if=args_if, + required_together=args_required_together, + mutually_exclusive=args_mutually_exclusive, supports_check_mode=True, ) @@ -455,6 +545,9 @@ def main(): version='5.0.0', collection_name='community.aws') module.params['purge_tags'] = False + if not module.params.get('health_check_id') and not module.params.get('type'): + module.fail_json(msg="parameter 'type' is required if not updating or deleting health check by ID.") + state_in = module.params.get('state') ip_addr_in = module.params.get('ip_address') port_in = module.params.get('port') @@ -464,6 +557,8 @@ def main(): string_match_in = module.params.get('string_match') request_interval_in = module.params.get('request_interval') failure_threshold_in = module.params.get('failure_threshold') + health_check_name = module.params.get('health_check_name') + tags = module.params.get('tags') # Default port if port_in is None: @@ -484,22 +579,66 @@ def main(): action = None check_id = None - existing_check = find_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) - - if existing_check: - check_id = existing_check.get('Id') - + if module.params.get('use_unique_names') or module.params.get('health_check_id'): + module.deprecate( + 'The health_check_name is currently non required parameter.' + ' This behavior will change and health_check_name ' + ' will change to required=True and use_unique_names will change to default=True in release 6.0.0.', + version='6.0.0', collection_name='community.aws') + + # If update or delete Health Check based on ID + update_delete_by_id = False + if module.params.get('health_check_id'): + update_delete_by_id = True + id_to_update_delete = module.params.get('health_check_id') + try: + existing_check = client.get_health_check(HealthCheckId=id_to_update_delete)['HealthCheck'] + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.exit_json(changed=False, msg='The specified health check with ID: {0} does not exist'.format(id_to_update_delete)) + else: + existing_check = find_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) + if existing_check: + check_id = existing_check.get('Id') + + # Delete Health Check if state_in == 'absent': - changed, action = delete_health_check(check_id) + if update_delete_by_id: + changed, action = delete_health_check(id_to_update_delete) + else: + changed, action = delete_health_check(check_id) check_id = None + + # Create Health Check elif state_in == 'present': - if existing_check is None: + if existing_check is None and not module.params.get('use_unique_names') and not update_delete_by_id: changed, action, check_id = create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) + + # Update Health Check else: - changed, action = update_health_check(existing_check) + # If health_check_name is a unique identifier + if module.params.get('use_unique_names'): + existing_checks_with_name = get_existing_checks_with_name() + # update the health_check if another health check with same name exists + if health_check_name in existing_checks_with_name: + changed, action = update_health_check(existing_checks_with_name[health_check_name]) + else: + # create a new health_check if another health check with same name does not exists + changed, action, check_id = create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) + # Add tag to add name to health check + if check_id: + if not tags: + tags = {} + tags['Name'] = health_check_name + + else: + if update_delete_by_id: + changed, action = update_health_check(existing_check) + else: + changed, action = update_health_check(existing_check) + if check_id: changed |= manage_tags(module, client, 'healthcheck', check_id, - module.params.get('tags'), module.params.get('purge_tags')) + tags, module.params.get('purge_tags')) health_check = describe_health_check(id=check_id) health_check['action'] = action
diff --git a/tests/integration/targets/route53_health_check/defaults/main.yml b/tests/integration/targets/route53_health_check/defaults/main.yml --- a/tests/integration/targets/route53_health_check/defaults/main.yml +++ b/tests/integration/targets/route53_health_check/defaults/main.yml @@ -11,6 +11,7 @@ #ip_address: We allocate an EIP due to route53 restrictions fqdn: '{{ tiny_prefix }}.route53-health.ansible.test' +fqdn_1: '{{ tiny_prefix }}-1.route53-health.ansible.test' port: 8080 type: 'TCP' request_interval: 30 @@ -27,7 +28,9 @@ failure_threshold_updated: 1 # for string_match we need an _STR_MATCH type type_https_match: 'HTTPS_STR_MATCH' type_http_match: 'HTTP_STR_MATCH' +type_http: 'HTTP' resource_path: '/health.php' +resource_path_1: '/new-health.php' resource_path_updated: '/healthz' string_match: 'Hello' string_match_updated: 'Hello World' diff --git a/tests/integration/targets/route53_health_check/tasks/create_multiple_health_checks.yml b/tests/integration/targets/route53_health_check/tasks/create_multiple_health_checks.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/route53_health_check/tasks/create_multiple_health_checks.yml @@ -0,0 +1,134 @@ +--- +- block: + - name: 'Create multiple HTTP health checks with different resource_path - check_mode' + route53_health_check: + state: present + name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found' + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http }}' + resource_path: '{{ item }}' + use_unique_names: true + register: create_check + check_mode: true + with_items: + - '{{ resource_path }}' + - '{{ resource_path_1 }}' + + - name: 'Check result - Create a HTTP health check - check_mode' + assert: + that: + - create_check is not failed + - create_check is changed + - '"route53:CreateHealthCheck" not in create_check.results[0].resource_actions' + - '"route53:CreateHealthCheck" not in create_check.results[1].resource_actions' + + - name: 'Create multiple HTTP health checks with different resource_path' + route53_health_check: + state: present + name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found' + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http }}' + resource_path: '{{ item }}' + use_unique_names: true + register: create_result + with_items: + - '{{ resource_path }}' + - '{{ resource_path_1 }}' + + - name: Get ID's for health_checks created in above task + set_fact: + health_check_1_id: "{{ create_result.results[0].health_check.id }}" + health_check_2_id: "{{ create_result.results[1].health_check.id }}" + + - name: Get health_check 1 info + community.aws.route53_info: + query: health_check + health_check_id: "{{ health_check_1_id }}" + health_check_method: details + register: health_check_1_info + + - name: Get health_check 2 info + community.aws.route53_info: + query: health_check + health_check_id: "{{ health_check_2_id }}" + health_check_method: details + register: health_check_2_info + + - name: 'Check result - Create multiple HTTP health check' + assert: + that: + - create_result is not failed + - create_result is changed + - '"route53:UpdateHealthCheck" not in create_result.results[0].resource_actions' + - '"route53:UpdateHealthCheck" not in create_result.results[1].resource_actions' + - health_check_1_id != health_check_2_id + - health_check_1_info.HealthCheck.HealthCheckConfig.ResourcePath == '{{ resource_path }}' + - health_check_2_info.HealthCheck.HealthCheckConfig.ResourcePath == '{{ resource_path_1 }}' + + - name: 'Create multiple HTTP health checks with different resource_path - idempotency - check_mode' + route53_health_check: + state: present + name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found' + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http }}' + resource_path: '{{ item }}' + use_unique_names: true + register: create_idem_check + check_mode: true + with_items: + - '{{ resource_path }}' + - '{{ resource_path_1 }}' + + - name: 'Check result - Create multiple HTTP health check - idempotency - check_mode' + assert: + that: + - create_idem_check is not failed + - create_idem_check is not changed + - '"route53:CreateHealthCheck" not in create_idem_check.results[0].resource_actions' + - '"route53:CreateHealthCheck" not in create_idem_check.results[1].resource_actions' + - '"route53:UpdateHealthCheck" not in create_idem_check.results[0].resource_actions' + - '"route53:UpdateHealthCheck" not in create_idem_check.results[1].resource_actions' + + - name: 'Create multiple HTTP health checks with different resource_path - idempotency' + route53_health_check: + state: present + name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found' + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http }}' + resource_path: '{{ item }}' + use_unique_names: true + register: create_idem + check_mode: true + with_items: + - '{{ resource_path }}' + - '{{ resource_path_1 }}' + + - name: 'Check result - Create multiple HTTP health check - idempotency - check_mode' + assert: + that: + - create_idem is not failed + - create_idem is not changed + - '"route53:CreateHealthCheck" not in create_idem.results[0].resource_actions' + - '"route53:CreateHealthCheck" not in create_idem.results[1].resource_actions' + - '"route53:UpdateHealthCheck" not in create_idem.results[0].resource_actions' + - '"route53:UpdateHealthCheck" not in create_idem.results[1].resource_actions' + + always: + # Cleanup starts here + - name: 'Delete multiple HTTP health checks with different resource_path' + route53_health_check: + state: absent + name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found' + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http }}' + resource_path: '{{ item }}' + use_unique_names: true + register: delete_result + with_items: + - '{{ resource_path }}' + - '{{ resource_path_1 }}' diff --git a/tests/integration/targets/route53_health_check/tasks/main.yml b/tests/integration/targets/route53_health_check/tasks/main.yml --- a/tests/integration/targets/route53_health_check/tasks/main.yml +++ b/tests/integration/targets/route53_health_check/tasks/main.yml @@ -32,6 +32,12 @@ - set_fact: ip_address: '{{ eip.public_ip }}' + - name: Run tests for creating multiple health checks with name as unique identifier + include_tasks: create_multiple_health_checks.yml + + - name: Run tests for update and delete health check by ID + include_tasks: update_delete_by_id.yml + # Minimum possible definition - name: 'Create a TCP health check - check_mode' route53_health_check: diff --git a/tests/integration/targets/route53_health_check/tasks/update_delete_by_id.yml b/tests/integration/targets/route53_health_check/tasks/update_delete_by_id.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/route53_health_check/tasks/update_delete_by_id.yml @@ -0,0 +1,303 @@ +--- +- block: + - name: 'Create HTTP health check for use in this test' + route53_health_check: + state: present + name: '{{ tiny_prefix }}-test-update-delete-by-id' + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http }}' + resource_path: '{{ resource_path }}' + fqdn: '{{ fqdn }}' + use_unique_names: true + register: create_result + + - name: 'Check result - Create HTTP health check' + assert: + that: + - create_result is not failed + - create_result is changed + - '"route53:CreateHealthCheck" in create_result.resource_actions' + + - name: Get ID for health_checks created in above task + set_fact: + health_check_id: "{{ create_result.health_check.id }}" + + - name: Get health_check info + community.aws.route53_info: + query: health_check + health_check_id: "{{ health_check_id }}" + health_check_method: details + register: health_check_info + + # Update Health Check by ID Tests + - name: 'Update Health Check by ID - Update Port - check_mode' + route53_health_check: + id: "{{ health_check_id }}" + port: 8888 + register: update_result + check_mode: true + + - name: 'Check result - Update Health Check Port - check_mode' + assert: + that: + - update_result is not failed + - update_result is changed + - '"route53:UpdateHealthCheck" not in update_result.resource_actions' + + - name: 'Update Health Check by ID - Update Port' + route53_health_check: + id: "{{ health_check_id }}" + port: 8888 + register: update_result + + - name: Get health_check info + community.aws.route53_info: + query: health_check + health_check_id: "{{ health_check_id }}" + health_check_method: details + register: health_check_info + + - name: 'Check result - Update Health Check Port' + assert: + that: + - update_result is not failed + - update_result is changed + - health_check_info.HealthCheck.HealthCheckConfig.Port == 8888 + + + - name: 'Update Health Check by ID - Update Port - idempotency - check_mode' + route53_health_check: + id: "{{ health_check_id }}" + port: 8888 + register: update_result + check_mode: true + + - name: 'Check result - Update Health Check Port - idempotency - check_mode' + assert: + that: + - update_result is not failed + - update_result is not changed + - '"route53:UpdateHealthCheck" not in update_result.resource_actions' + + - name: 'Update Health Check by ID - Update Port - idempotency' + route53_health_check: + id: "{{ health_check_id }}" + port: 8888 + register: update_result + + - name: 'Check result - Update Health Check Port - idempotency' + assert: + that: + - update_result is not failed + - update_result is not changed + - '"route53:UpdateHealthCheck" not in update_result.resource_actions' + + ## + - name: 'Update Health Check by ID - Update IP address and FQDN - check_mode' + route53_health_check: + id: "{{ health_check_id }}" + ip_address: 1.2.3.4 + fqdn: '{{ fqdn_1 }}' + register: update_result + check_mode: true + + - name: 'Check result - Update Health Check IP address and FQDN - check_mode' + assert: + that: + - update_result is not failed + - update_result is changed + - '"route53:UpdateHealthCheck" not in update_result.resource_actions' + + - name: 'Update Health Check by ID - Update IP address and FQDN' + route53_health_check: + id: "{{ health_check_id }}" + ip_address: 1.2.3.4 + fqdn: '{{ fqdn_1 }}' + register: update_result + + - name: Get health_check info + community.aws.route53_info: + query: health_check + health_check_id: "{{ health_check_id }}" + health_check_method: details + register: health_check_info + + - name: 'Check result - Update Health Check IP address and FQDN' + assert: + that: + - update_result is not failed + - update_result is changed + - health_check_info.HealthCheck.HealthCheckConfig.IPAddress == '1.2.3.4' + - health_check_info.HealthCheck.HealthCheckConfig.FullyQualifiedDomainName == "{{ fqdn_1 }}" + + + - name: 'Update Health Check by ID - Update IP address and FQDN - idempotency - check_mode' + route53_health_check: + id: "{{ health_check_id }}" + ip_address: 1.2.3.4 + fqdn: '{{ fqdn_1 }}' + register: update_result + check_mode: true + + - name: 'Check result - Update Health Check IP address and FQDN - idempotency - check_mode' + assert: + that: + - update_result is not failed + - update_result is not changed + - '"route53:UpdateHealthCheck" not in update_result.resource_actions' + + - name: 'Update Health Check by ID - Update IP address and FQDN - idempotency' + route53_health_check: + id: "{{ health_check_id }}" + ip_address: 1.2.3.4 + fqdn: '{{ fqdn_1 }}' + register: update_result + + - name: 'Check result - Update Health Check IP address and FQDN - idempotency' + assert: + that: + - update_result is not failed + - update_result is not changed + - '"route53:UpdateHealthCheck" not in update_result.resource_actions' + + # Update Health Check (Port) by name + + - name: 'Update Health Check by name - Update Port - check_mode' + route53_health_check: + state: present + port: 8080 + type: '{{ type_http }}' + fqdn: '{{ fqdn }}' + health_check_name: '{{ tiny_prefix }}-test-update-delete-by-id' + use_unique_names: true + register: update_result + check_mode: true + + - name: 'Check result - Update Health Check Port - check_mode' + assert: + that: + - update_result is not failed + - update_result is changed + - '"route53:UpdateHealthCheck" not in update_result.resource_actions' + + - name: 'Update Health Check by name - Update Port' + route53_health_check: + state: present + port: 8080 + type: '{{ type_http }}' + fqdn: '{{ fqdn }}' + health_check_name: '{{ tiny_prefix }}-test-update-delete-by-id' + use_unique_names: true + register: update_result + + - name: Get health_check info + community.aws.route53_info: + query: health_check + health_check_id: "{{ health_check_id }}" + health_check_method: details + register: health_check_info + + - name: 'Check result - Update Health Check Port' + assert: + that: + - update_result is not failed + - update_result is changed + - health_check_info.HealthCheck.HealthCheckConfig.Port == 8080 + + - name: 'Update Health Check by name - Update Port - idempotency - check_mode' + route53_health_check: + state: present + port: 8080 + type: '{{ type_http }}' + fqdn: '{{ fqdn }}' + health_check_name: '{{ tiny_prefix }}-test-update-delete-by-id' + use_unique_names: true + register: update_result + check_mode: true + + - name: 'Check result - Update Health Check Port - idempotency - check_mode' + assert: + that: + - update_result is not failed + - update_result is not changed + - '"route53:UpdateHealthCheck" not in update_result.resource_actions' + + - name: 'Update Health Check by name - Update Port - idempotency' + route53_health_check: + state: present + port: 8080 + type: '{{ type_http }}' + fqdn: '{{ fqdn }}' + health_check_name: '{{ tiny_prefix }}-test-update-delete-by-id' + use_unique_names: true + register: update_result + + - name: 'Check result - Update Health Check Port - idempotency' + assert: + that: + - update_result is not failed + - update_result is not changed + - '"route53:UpdateHealthCheck" not in update_result.resource_actions' + + # Delete Health Check by ID Tests + - name: Delete Health check by ID - check_mode + route53_health_check: + state: absent + id: "{{ health_check_id }}" + register: delete_result + check_mode: true + + - name: 'Check result - Delete Health Check by ID -check_mode' + assert: + that: + - delete_result is not failed + - delete_result is changed + - '"route53:DeleteHealthCheck" not in delete_result.resource_actions' + + - name: Delete Health check by ID + route53_health_check: + state: absent + id: "{{ health_check_id }}" + register: delete_result + + - name: 'Check result - Delete Health Check by ID' + assert: + that: + - delete_result is not failed + - delete_result is changed + - '"route53:DeleteHealthCheck" in delete_result.resource_actions' + + - name: Delete Health check by ID - idempotency - check_mode + route53_health_check: + state: absent + id: "{{ health_check_id }}" + register: delete_result + check_mode: true + + - name: 'Check result - Delete Health Check by ID -idempotency -check_mode' + assert: + that: + - delete_result is not failed + - delete_result is not changed + - '"route53:DeleteHealthCheck" not in delete_result.resource_actions' + + - name: Delete Health check by ID - idempotency + route53_health_check: + state: absent + id: "{{ health_check_id }}" + register: delete_result + + - name: 'Check result - Delete Health Check by ID -idempotency' + assert: + that: + - delete_result is not failed + - delete_result is not changed + - '"route53:DeleteHealthCheck" not in delete_result.resource_actions' + + # cleanup + always: + - name: Delete Health check by ID + route53_health_check: + state: absent + id: "{{ health_check_id }}"
route53_health_check resource_path uniqueness ignored <!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> Moving [issue](https://github.com/ansible/ansible/issues/23110) from ansible repository to community.aws collections. Tried to create multiple route53 healthchecks to the same server but different resource paths using with_items . Instead of creating one health check for each resource path, only the first is created. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible 2.2.2.0 ``` ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml - name: set up route53 health checks connection: local become: false route53_health_check: failure_threshold: 3 ip_address: '{{ ip_address }}' port: 80 request_interval: 30 resource_path: '{{ item }}' state: present type: HTTP register: result with_items: - /some_status1 - /other_status2 ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> 2 health checks created result contains health_check.ids for each health check ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> 1 health check created result contains two items, both having the same health_check.id <!--- Paste verbatim command output between quotes --> ```paste below ```
Files identified in the description: None If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner --->
2022-07-07T07:21:40
ansible-collections/community.aws
1,379
ansible-collections__community.aws-1379
[ "1378" ]
1ac84c305b3853477cb78af35aa89902ac98f1fb
diff --git a/plugins/modules/route53.py b/plugins/modules/route53.py --- a/plugins/modules/route53.py +++ b/plugins/modules/route53.py @@ -628,7 +628,7 @@ def main(): if command_in == 'create' or command_in == 'delete': if alias_in and len(value_in) != 1: module.fail_json(msg="parameter 'value' must contain a single dns name for alias records") - if not any([weight_in, region_in, failover_in, geo_location]) and identifier_in is not None: + if (weight_in is None and region_in is None and failover_in is None and geo_location is None) and identifier_in is not None: module.fail_json(msg="You have specified identifier which makes sense only if you specify one of: weight, region, geo_location or failover.") retry_decorator = AWSRetry.jittered_backoff(
diff --git a/tests/integration/targets/route53/tasks/main.yml b/tests/integration/targets/route53/tasks/main.yml --- a/tests/integration/targets/route53/tasks/main.yml +++ b/tests/integration/targets/route53/tasks/main.yml @@ -635,6 +635,42 @@ - weighted_record is not failed - weighted_record is not changed + - name: 'Create a zero weighted record' + route53: + state: present + zone: '{{ zone_one }}' + record: 'zero_weighted.{{ zone_one }}' + type: CNAME + value: 'zid_test.{{ zone_one }}' + overwrite: True + identifier: "host1@www" + weight: 0 + region: '{{ omit }}' + register: weighted_record + - name: 'This should be changed' + assert: + that: + - weighted_record is not failed + - weighted_record is changed + + - name: 'Re-Create a zero weighted record' + route53: + state: present + zone: '{{ zone_one }}' + record: 'zero_weighted.{{ zone_one }}' + type: CNAME + value: 'zid_test.{{ zone_one }}' + overwrite: True + identifier: "host1@www" + weight: 0 + region: '{{ omit }}' + register: weighted_record + - name: 'This should not be changed' + assert: + that: + - weighted_record is not failed + - weighted_record is not changed + #Test Geo Location - Continent Code - name: Create a record with geo_location - continent_code (check_mode) route53:
Route53 module no longer supports 0 weights ### Summary When trying to create a Route53 DNS record with a weight of 0, I receive the error: "You have specified identifier which makes sense only if you specify one of: weight, region, geo_location or failover." This appears to have been caused by this commit: https://github.com/ansible-collections/community.aws/commit/59f06ed666a5f97adb5f75e086fad0660c49f35f#diff-39a0039c8f787ef918e13c9e34de717b42890a4baada9448cd80a76242bffee4L570-R632 this is changing the weight comparison from `weight_in is None` to `not any([weight_in...`. A weight of zero returns false to the first comparison, but true to the latter. ### Issue Type Bug Report ### Component Name route53 ### Ansible Version ```console (paste below) ansible [core 2.13.2] config file = /etc/ansible/ansible.cfg configured module search path = ['/home/centos/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.8/site-packages/ansible ansible collection location = /home/centos/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.8.13 (default, Jul 26 2022, 16:15:53) [GCC 4.8.5 20150623 (Red Hat 4.8.5-44)] jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) Collection Version ----------------------------- ------- amazon.aws 3.3.1 ansible.netcommon 3.0.1 ansible.posix 1.4.0 ansible.utils 2.6.1 ansible.windows 1.10.0 arista.eos 5.0.1 awx.awx 21.2.0 azure.azcollection 1.13.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.3.0 cisco.aci 2.2.0 cisco.asa 3.1.0 cisco.dnac 6.5.0 cisco.intersight 1.0.19 cisco.ios 3.2.0 cisco.iosxr 3.2.0 cisco.ise 2.5.0 cisco.meraki 2.10.0 cisco.mso 2.0.0 cisco.nso 1.0.3 cisco.nxos 3.1.0 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.2 community.aws 3.4.0 community.azure 1.1.0 community.ciscosmb 1.0.5 community.crypto 2.4.0 community.digitalocean 1.21.0 community.dns 2.2.1 community.docker 2.7.0 community.fortios 1.0.0 community.general 5.3.0 community.google 1.0.0 community.grafana 1.5.0 community.hashi_vault 3.0.0 community.hrobot 1.4.0 community.libvirt 1.1.0 community.mongodb 1.4.1 community.mysql 3.3.0 community.network 4.0.1 community.okd 2.2.0 community.postgresql 2.1.5 community.proxysql 1.4.0 community.rabbitmq 1.2.1 community.routeros 2.1.0 community.sap 1.0.0 community.sap_libs 1.1.0 community.skydive 1.0.0 community.sops 1.2.3 community.vmware 2.7.0 community.windows 1.10.0 community.zabbix 1.7.0 containers.podman 1.9.4 cyberark.conjur 1.1.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 1.1.1 dellemc.openmanage 5.5.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.18.0 fortinet.fortimanager 2.1.5 fortinet.fortios 2.1.6 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.8.1 hpe.nimble 1.1.4 ibm.qradar 2.0.0 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.3.0 inspur.sm 2.0.0 junipernetworks.junos 3.1.0 kubernetes.core 2.3.2 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.18.0 netapp.elementsw 21.7.0 netapp.ontap 21.20.0 netapp.storagegrid 21.10.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.0 netbox.netbox 3.7.1 ngine_io.cloudstack 2.2.4 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.8.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.1.0 purestorage.flasharray 1.13.0 purestorage.flashblade 1.9.0 purestorage.fusion 1.0.2 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 2.0.0 t_systems_mms.icinga_director 1.30.0 theforeman.foreman 3.4.0 vmware.vmware_rest 2.2.0 vyos.vyos 3.0.1 wti.remote 1.0.4 ``` ### AWS SDK versions ```console (paste below) Name: boto Version: 2.49.0 Summary: Amazon Web Services Library Home-page: https://github.com/boto/boto/ Author: Mitch Garnaat Author-email: [email protected] License: MIT Location: /usr/local/lib/python3.8/site-packages Requires: Required-by: --- Name: boto3 Version: 1.24.37 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.8/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.27.37 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.8/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: awscli, boto3, s3transfer ``` ### Configuration ```console (paste below) [DEPRECATION WARNING]: [defaults]callback_whitelist option, normalizing names to new standard, use callbacks_enabled instead. This feature will be removed from ansible-core in version 2.15. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg. CALLBACKS_ENABLED(/etc/ansible/ansible.cfg) = ['profile_tasks'] DEFAULT_GATHERING(/etc/ansible/ansible.cfg) = smart DEFAULT_GATHER_TIMEOUT(/etc/ansible/ansible.cfg) = 60 DEFAULT_HASH_BEHAVIOUR(/etc/ansible/ansible.cfg) = merge ``` ### OS / Environment CentOS 7 and Mac OS 12.5 ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml - name: Bug demo hosts: localhost tasks: - name: Set 0 weight for old env route53: wait: yes ttl: '5' type: 'CNAME' identifier: old overwrite: yes record: 'record.example.com.' zone: 'example.com.' value: 'record-old.example.com.' weight: '0' state: present ``` ### Expected Results I expect the DNS record to be created with a weight of zero. ### Actual Results ```console (paste below) TASK [Set 0 weight for old env] *********************************************************************************************** fatal: [localhost]: FAILED! => {"changed": false, "msg": "You have specified identifier which makes sense only if you specify one of: weight, region, geo_location or failover."} ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/route53.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/route53.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @jimbydamonk @markuman @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify --->
2022-08-01T14:58:09
ansible-collections/community.aws
1,381
ansible-collections__community.aws-1381
[ "1378" ]
078c94c660d98bdd7a36ce5d59038597db89aff0
diff --git a/plugins/modules/route53.py b/plugins/modules/route53.py --- a/plugins/modules/route53.py +++ b/plugins/modules/route53.py @@ -628,7 +628,7 @@ def main(): if command_in == 'create' or command_in == 'delete': if alias_in and len(value_in) != 1: module.fail_json(msg="parameter 'value' must contain a single dns name for alias records") - if not any([weight_in, region_in, failover_in, geo_location]) and identifier_in is not None: + if (weight_in is None and region_in is None and failover_in is None and geo_location is None) and identifier_in is not None: module.fail_json(msg="You have specified identifier which makes sense only if you specify one of: weight, region, geo_location or failover.") retry_decorator = AWSRetry.jittered_backoff(
diff --git a/tests/integration/targets/route53/tasks/main.yml b/tests/integration/targets/route53/tasks/main.yml --- a/tests/integration/targets/route53/tasks/main.yml +++ b/tests/integration/targets/route53/tasks/main.yml @@ -635,6 +635,42 @@ - weighted_record is not failed - weighted_record is not changed + - name: 'Create a zero weighted record' + route53: + state: present + zone: '{{ zone_one }}' + record: 'zero_weighted.{{ zone_one }}' + type: CNAME + value: 'zid_test.{{ zone_one }}' + overwrite: True + identifier: "host1@www" + weight: 0 + region: '{{ omit }}' + register: weighted_record + - name: 'This should be changed' + assert: + that: + - weighted_record is not failed + - weighted_record is changed + + - name: 'Re-Create a zero weighted record' + route53: + state: present + zone: '{{ zone_one }}' + record: 'zero_weighted.{{ zone_one }}' + type: CNAME + value: 'zid_test.{{ zone_one }}' + overwrite: True + identifier: "host1@www" + weight: 0 + region: '{{ omit }}' + register: weighted_record + - name: 'This should not be changed' + assert: + that: + - weighted_record is not failed + - weighted_record is not changed + #Test Geo Location - Continent Code - name: Create a record with geo_location - continent_code (check_mode) route53:
Route53 module no longer supports 0 weights ### Summary When trying to create a Route53 DNS record with a weight of 0, I receive the error: "You have specified identifier which makes sense only if you specify one of: weight, region, geo_location or failover." This appears to have been caused by this commit: https://github.com/ansible-collections/community.aws/commit/59f06ed666a5f97adb5f75e086fad0660c49f35f#diff-39a0039c8f787ef918e13c9e34de717b42890a4baada9448cd80a76242bffee4L570-R632 this is changing the weight comparison from `weight_in is None` to `not any([weight_in...`. A weight of zero returns false to the first comparison, but true to the latter. ### Issue Type Bug Report ### Component Name route53 ### Ansible Version ```console (paste below) ansible [core 2.13.2] config file = /etc/ansible/ansible.cfg configured module search path = ['/home/centos/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.8/site-packages/ansible ansible collection location = /home/centos/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.8.13 (default, Jul 26 2022, 16:15:53) [GCC 4.8.5 20150623 (Red Hat 4.8.5-44)] jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) Collection Version ----------------------------- ------- amazon.aws 3.3.1 ansible.netcommon 3.0.1 ansible.posix 1.4.0 ansible.utils 2.6.1 ansible.windows 1.10.0 arista.eos 5.0.1 awx.awx 21.2.0 azure.azcollection 1.13.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.3.0 cisco.aci 2.2.0 cisco.asa 3.1.0 cisco.dnac 6.5.0 cisco.intersight 1.0.19 cisco.ios 3.2.0 cisco.iosxr 3.2.0 cisco.ise 2.5.0 cisco.meraki 2.10.0 cisco.mso 2.0.0 cisco.nso 1.0.3 cisco.nxos 3.1.0 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.2 community.aws 3.4.0 community.azure 1.1.0 community.ciscosmb 1.0.5 community.crypto 2.4.0 community.digitalocean 1.21.0 community.dns 2.2.1 community.docker 2.7.0 community.fortios 1.0.0 community.general 5.3.0 community.google 1.0.0 community.grafana 1.5.0 community.hashi_vault 3.0.0 community.hrobot 1.4.0 community.libvirt 1.1.0 community.mongodb 1.4.1 community.mysql 3.3.0 community.network 4.0.1 community.okd 2.2.0 community.postgresql 2.1.5 community.proxysql 1.4.0 community.rabbitmq 1.2.1 community.routeros 2.1.0 community.sap 1.0.0 community.sap_libs 1.1.0 community.skydive 1.0.0 community.sops 1.2.3 community.vmware 2.7.0 community.windows 1.10.0 community.zabbix 1.7.0 containers.podman 1.9.4 cyberark.conjur 1.1.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 1.1.1 dellemc.openmanage 5.5.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.18.0 fortinet.fortimanager 2.1.5 fortinet.fortios 2.1.6 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.8.1 hpe.nimble 1.1.4 ibm.qradar 2.0.0 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.3.0 inspur.sm 2.0.0 junipernetworks.junos 3.1.0 kubernetes.core 2.3.2 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.18.0 netapp.elementsw 21.7.0 netapp.ontap 21.20.0 netapp.storagegrid 21.10.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.0 netbox.netbox 3.7.1 ngine_io.cloudstack 2.2.4 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.8.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.1.0 purestorage.flasharray 1.13.0 purestorage.flashblade 1.9.0 purestorage.fusion 1.0.2 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 2.0.0 t_systems_mms.icinga_director 1.30.0 theforeman.foreman 3.4.0 vmware.vmware_rest 2.2.0 vyos.vyos 3.0.1 wti.remote 1.0.4 ``` ### AWS SDK versions ```console (paste below) Name: boto Version: 2.49.0 Summary: Amazon Web Services Library Home-page: https://github.com/boto/boto/ Author: Mitch Garnaat Author-email: [email protected] License: MIT Location: /usr/local/lib/python3.8/site-packages Requires: Required-by: --- Name: boto3 Version: 1.24.37 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.8/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.27.37 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.8/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: awscli, boto3, s3transfer ``` ### Configuration ```console (paste below) [DEPRECATION WARNING]: [defaults]callback_whitelist option, normalizing names to new standard, use callbacks_enabled instead. This feature will be removed from ansible-core in version 2.15. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg. CALLBACKS_ENABLED(/etc/ansible/ansible.cfg) = ['profile_tasks'] DEFAULT_GATHERING(/etc/ansible/ansible.cfg) = smart DEFAULT_GATHER_TIMEOUT(/etc/ansible/ansible.cfg) = 60 DEFAULT_HASH_BEHAVIOUR(/etc/ansible/ansible.cfg) = merge ``` ### OS / Environment CentOS 7 and Mac OS 12.5 ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml - name: Bug demo hosts: localhost tasks: - name: Set 0 weight for old env route53: wait: yes ttl: '5' type: 'CNAME' identifier: old overwrite: yes record: 'record.example.com.' zone: 'example.com.' value: 'record-old.example.com.' weight: '0' state: present ``` ### Expected Results I expect the DNS record to be created with a weight of zero. ### Actual Results ```console (paste below) TASK [Set 0 weight for old env] *********************************************************************************************** fatal: [localhost]: FAILED! => {"changed": false, "msg": "You have specified identifier which makes sense only if you specify one of: weight, region, geo_location or failover."} ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/route53.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/route53.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @jimbydamonk @markuman @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify --->
2022-08-02T09:46:14
ansible-collections/community.aws
1,382
ansible-collections__community.aws-1382
[ "1378" ]
eced0bbd1bfb535917847f06b383d955346d15d3
diff --git a/plugins/modules/route53.py b/plugins/modules/route53.py --- a/plugins/modules/route53.py +++ b/plugins/modules/route53.py @@ -628,7 +628,7 @@ def main(): if command_in == 'create' or command_in == 'delete': if alias_in and len(value_in) != 1: module.fail_json(msg="parameter 'value' must contain a single dns name for alias records") - if not any([weight_in, region_in, failover_in, geo_location]) and identifier_in is not None: + if (weight_in is None and region_in is None and failover_in is None and geo_location is None) and identifier_in is not None: module.fail_json(msg="You have specified identifier which makes sense only if you specify one of: weight, region, geo_location or failover.") retry_decorator = AWSRetry.jittered_backoff(
diff --git a/tests/integration/targets/route53/tasks/main.yml b/tests/integration/targets/route53/tasks/main.yml --- a/tests/integration/targets/route53/tasks/main.yml +++ b/tests/integration/targets/route53/tasks/main.yml @@ -635,6 +635,42 @@ - weighted_record is not failed - weighted_record is not changed + - name: 'Create a zero weighted record' + route53: + state: present + zone: '{{ zone_one }}' + record: 'zero_weighted.{{ zone_one }}' + type: CNAME + value: 'zid_test.{{ zone_one }}' + overwrite: True + identifier: "host1@www" + weight: 0 + region: '{{ omit }}' + register: weighted_record + - name: 'This should be changed' + assert: + that: + - weighted_record is not failed + - weighted_record is changed + + - name: 'Re-Create a zero weighted record' + route53: + state: present + zone: '{{ zone_one }}' + record: 'zero_weighted.{{ zone_one }}' + type: CNAME + value: 'zid_test.{{ zone_one }}' + overwrite: True + identifier: "host1@www" + weight: 0 + region: '{{ omit }}' + register: weighted_record + - name: 'This should not be changed' + assert: + that: + - weighted_record is not failed + - weighted_record is not changed + #Test Geo Location - Continent Code - name: Create a record with geo_location - continent_code (check_mode) route53:
Route53 module no longer supports 0 weights ### Summary When trying to create a Route53 DNS record with a weight of 0, I receive the error: "You have specified identifier which makes sense only if you specify one of: weight, region, geo_location or failover." This appears to have been caused by this commit: https://github.com/ansible-collections/community.aws/commit/59f06ed666a5f97adb5f75e086fad0660c49f35f#diff-39a0039c8f787ef918e13c9e34de717b42890a4baada9448cd80a76242bffee4L570-R632 this is changing the weight comparison from `weight_in is None` to `not any([weight_in...`. A weight of zero returns false to the first comparison, but true to the latter. ### Issue Type Bug Report ### Component Name route53 ### Ansible Version ```console (paste below) ansible [core 2.13.2] config file = /etc/ansible/ansible.cfg configured module search path = ['/home/centos/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.8/site-packages/ansible ansible collection location = /home/centos/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.8.13 (default, Jul 26 2022, 16:15:53) [GCC 4.8.5 20150623 (Red Hat 4.8.5-44)] jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) Collection Version ----------------------------- ------- amazon.aws 3.3.1 ansible.netcommon 3.0.1 ansible.posix 1.4.0 ansible.utils 2.6.1 ansible.windows 1.10.0 arista.eos 5.0.1 awx.awx 21.2.0 azure.azcollection 1.13.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.3.0 cisco.aci 2.2.0 cisco.asa 3.1.0 cisco.dnac 6.5.0 cisco.intersight 1.0.19 cisco.ios 3.2.0 cisco.iosxr 3.2.0 cisco.ise 2.5.0 cisco.meraki 2.10.0 cisco.mso 2.0.0 cisco.nso 1.0.3 cisco.nxos 3.1.0 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.2 community.aws 3.4.0 community.azure 1.1.0 community.ciscosmb 1.0.5 community.crypto 2.4.0 community.digitalocean 1.21.0 community.dns 2.2.1 community.docker 2.7.0 community.fortios 1.0.0 community.general 5.3.0 community.google 1.0.0 community.grafana 1.5.0 community.hashi_vault 3.0.0 community.hrobot 1.4.0 community.libvirt 1.1.0 community.mongodb 1.4.1 community.mysql 3.3.0 community.network 4.0.1 community.okd 2.2.0 community.postgresql 2.1.5 community.proxysql 1.4.0 community.rabbitmq 1.2.1 community.routeros 2.1.0 community.sap 1.0.0 community.sap_libs 1.1.0 community.skydive 1.0.0 community.sops 1.2.3 community.vmware 2.7.0 community.windows 1.10.0 community.zabbix 1.7.0 containers.podman 1.9.4 cyberark.conjur 1.1.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 1.1.1 dellemc.openmanage 5.5.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.18.0 fortinet.fortimanager 2.1.5 fortinet.fortios 2.1.6 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.8.1 hpe.nimble 1.1.4 ibm.qradar 2.0.0 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.3.0 inspur.sm 2.0.0 junipernetworks.junos 3.1.0 kubernetes.core 2.3.2 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.18.0 netapp.elementsw 21.7.0 netapp.ontap 21.20.0 netapp.storagegrid 21.10.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.0 netbox.netbox 3.7.1 ngine_io.cloudstack 2.2.4 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.8.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.1.0 purestorage.flasharray 1.13.0 purestorage.flashblade 1.9.0 purestorage.fusion 1.0.2 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 2.0.0 t_systems_mms.icinga_director 1.30.0 theforeman.foreman 3.4.0 vmware.vmware_rest 2.2.0 vyos.vyos 3.0.1 wti.remote 1.0.4 ``` ### AWS SDK versions ```console (paste below) Name: boto Version: 2.49.0 Summary: Amazon Web Services Library Home-page: https://github.com/boto/boto/ Author: Mitch Garnaat Author-email: [email protected] License: MIT Location: /usr/local/lib/python3.8/site-packages Requires: Required-by: --- Name: boto3 Version: 1.24.37 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.8/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.27.37 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.8/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: awscli, boto3, s3transfer ``` ### Configuration ```console (paste below) [DEPRECATION WARNING]: [defaults]callback_whitelist option, normalizing names to new standard, use callbacks_enabled instead. This feature will be removed from ansible-core in version 2.15. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg. CALLBACKS_ENABLED(/etc/ansible/ansible.cfg) = ['profile_tasks'] DEFAULT_GATHERING(/etc/ansible/ansible.cfg) = smart DEFAULT_GATHER_TIMEOUT(/etc/ansible/ansible.cfg) = 60 DEFAULT_HASH_BEHAVIOUR(/etc/ansible/ansible.cfg) = merge ``` ### OS / Environment CentOS 7 and Mac OS 12.5 ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml - name: Bug demo hosts: localhost tasks: - name: Set 0 weight for old env route53: wait: yes ttl: '5' type: 'CNAME' identifier: old overwrite: yes record: 'record.example.com.' zone: 'example.com.' value: 'record-old.example.com.' weight: '0' state: present ``` ### Expected Results I expect the DNS record to be created with a weight of zero. ### Actual Results ```console (paste below) TASK [Set 0 weight for old env] *********************************************************************************************** fatal: [localhost]: FAILED! => {"changed": false, "msg": "You have specified identifier which makes sense only if you specify one of: weight, region, geo_location or failover."} ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/route53.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/route53.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @jimbydamonk @markuman @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify --->
2022-08-02T09:46:24
ansible-collections/community.aws
1,428
ansible-collections__community.aws-1428
[ "1413" ]
56d26c883f636225c67cd5af20418b162be5b9be
diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -534,12 +534,14 @@ def _flush_stderr(self, subprocess): def _get_url(self, client_method, bucket_name, out_path, http_method, profile_name, extra_args=None): ''' Generate URL for get_object / put_object ''' - bucket_location = boto3.client('s3').get_bucket_location( + region_name = self.get_option('region') or 'us-east-1' + + bucket_location = self._get_boto_client('s3', region_name=region_name, profile_name=profile_name).get_bucket_location( Bucket=(self.get_option('bucket_name')), ) - region_name = bucket_location['LocationConstraint'] + bucket_region_name = bucket_location['LocationConstraint'] - client = self._get_boto_client('s3', region_name=region_name, profile_name=profile_name) + client = self._get_boto_client('s3', region_name=bucket_region_name, profile_name=profile_name) params = {'Bucket': bucket_name, 'Key': out_path} if extra_args is not None: params.update(extra_args)
ssm connection caught exception(Unable to locate credentials) ### Summary After upgrading `community.aws` to `4.1.1` and `amazon.aws` to `4.1.0`, my playbooks are not able to connect via ssm anymore. Extract of the logs ``` redirecting (type: connection) ansible.builtin.aws_ssm to community.aws.aws_ssm wait_for_connection: attempting ping module test <i-redacted> ESTABLISH SSM CONNECTION TO: i-redacted <i-redacted> SSM CONNECTION ID: redacted <i-redacted> EXEC ( umask 77 && mkdir -p "` echo /tmp/.ansible-ssm `"&& mkdir "` echo /tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330 `" && echo ansible-tmp-1660641811.7266405-78-7676503826330="` echo /tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330 `" ) <i-redacted> (0, 'ansible-tmp-1660641811.7266405-78-7676503826330=/tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330\r\r', '') Using module file /usr/local/lib/python3.10/dist-packages/ansible/modules/ping.py <i-redacted> PUT /home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve TO /tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330/AnsiballZ_ping.py <i-redacted> ssm_retry: attempt: 0, caught exception(Unable to locate credentials) from cmd (/home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve...), pausing for 0 seconds <i-redacted> CLOSING SSM CONNECTION TO: i-redacted <i-redacted> ssm_retry: attempt: 1, caught exception(Unable to locate credentials) from cmd (/home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve...), pausing for 1 seconds <i-redacted> ssm_retry: attempt: 2, caught exception(Unable to locate credentials) from cmd (/home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve...), pausing for 3 seconds ``` reverting to `community.aws` to `4.0.0` and `amazon.aws` to `4.0.0` works. ### Issue Type Bug Report ### Component Name aws_ssm ### Ansible Version ```console (paste below) $ ansible --version 2.13.1 ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list Collection Version ----------------- ------- amazon.aws 4.0.0 ansible.posix 1.4.0 community.aws 4.0.0 community.docker 2.7.0 community.general 5.2.0 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore Executing pip show boto boto3 botocore WARNING: Package(s) not found: boto Name: boto3 Version: 1.24.24 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.10/dist-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.27.47 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.10/dist-packages Requires: jmespath, python-dateutil, urllib3 Required-by: boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment _No response_ ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) - name: Wait for instance to be ready hosts: all serial: 1 gather_facts: no tasks: - name: Wait for instance to be ready wait_for_connection: ``` ### Expected Results Expect a connection to the instance ### Actual Results ```console (paste below) redirecting (type: connection) ansible.builtin.aws_ssm to community.aws.aws_ssm wait_for_connection: attempting ping module test <i-redacted> ESTABLISH SSM CONNECTION TO: i-redacted <i-redacted> SSM CONNECTION ID: redacted <i-redacted> EXEC ( umask 77 && mkdir -p "` echo /tmp/.ansible-ssm `"&& mkdir "` echo /tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330 `" && echo ansible-tmp-1660641811.7266405-78-7676503826330="` echo /tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330 `" ) <i-redacted> (0, 'ansible-tmp-1660641811.7266405-78-7676503826330=/tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330\r\r', '') Using module file /usr/local/lib/python3.10/dist-packages/ansible/modules/ping.py <i-redacted> PUT /home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve TO /tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330/AnsiballZ_ping.py <i-redacted> ssm_retry: attempt: 0, caught exception(Unable to locate credentials) from cmd (/home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve...), pausing for 0 seconds <i-redacted> CLOSING SSM CONNECTION TO: i-redacted <i-redacted> ssm_retry: attempt: 1, caught exception(Unable to locate credentials) from cmd (/home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve...), pausing for 1 seconds <i-redacted> ssm_retry: attempt: 2, caught exception(Unable to locate credentials) from cmd (/home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve...), pausing for 3 seconds ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/connection/aws_ssm.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/connection/aws_ssm.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> I am also experiencing this issue. This is caused by https://github.com/ansible-collections/community.aws/pull/1176, which creates and uses an S3 boto3 client without the credentials provided to the plugin.
2022-08-30T00:21:51
ansible-collections/community.aws
1,435
ansible-collections__community.aws-1435
[ "1434" ]
de5b804abaae27421646a13602976b5a52712047
diff --git a/plugins/modules/cloudfront_distribution.py b/plugins/modules/cloudfront_distribution.py --- a/plugins/modules/cloudfront_distribution.py +++ b/plugins/modules/cloudfront_distribution.py @@ -167,7 +167,16 @@ origin_keepalive_timeout: description: A keep-alive timeout (in seconds). type: int - + connection_attempts: + description: The number of times that CloudFront attempts to connect to the origin. + The minimum number is C(1), the maximum is C(3). + type: int + default: 3 + connection_timeout: + description: The number of seconds that CloudFront waits when trying to establish a connection to the origin. + The minimum timeout is C(1) second, the maximum is C(10) seconds. + type: int + default: 10 purge_origins: description: Whether to remove any origins that aren't listed in I(origins). default: false @@ -1276,6 +1285,16 @@ returned: always type: str sample: '' + connection_attempts: + description: The number of times that CloudFront attempts to connect to the origin. + returned: always + type: int + sample: 3 + connection_timeout: + description: The number of seconds that CloudFront waits when trying to establish a connection to the origin. + returned: always + type: int + sample: 10 s3_origin_config: description: Origin access identity configuration for S3 Origin. returned: when s3_origin_access_identity_enabled is true
Each `origin` can have connection attempts & timeout ### Summary As seen in https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_Origin.html on each origin from a `CustomOriginConfig` there is the possibility to specify `ConnectionAttempts` and `ConnectionTimeout`. ### Issue Type Documentation Report ### Component Name cloudfront_distribution ### Ansible Version ```ansible [core 2.13.3] config file = None configured module search path = ['/home/gpreda/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.10/site-packages/ansible ansible collection location = /home/gpreda/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.10.6 (main, Aug 2 2022, 00:00:00) [GCC 12.1.1 20220507 (Red Hat 12.1.1-1)] jinja version = 3.0.3 libyaml = False ``` ### Collection Versions ```$ ansible-galaxy collection list # /usr/local/lib/python3.10/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 4.1.0 ... community.aws 4.1.1 ... ``` ### Configuration ```$ ansible-config dump --only-changed DEFAULT_ACTION_PLUGIN_PATH(env: ANSIBLE_ACTION_PLUGINS) = ['/usr/local/lib/python3.9/site-packages/ara/plugins/action'] DEFAULT_CALLBACK_PLUGIN_PATH(env: ANSIBLE_CALLBACK_PLUGINS) = ['/usr/local/lib/python3.9/site-packages/ara/plugins/callback'] DEFAULT_LOOKUP_PLUGIN_PATH(env: ANSIBLE_LOOKUP_PLUGINS) = ['/usr/local/lib/python3.9/site-packages/ara/plugins/lookup'] ``` ### OS / Environment Fedora release 36, Centos Stream 8 ### Additional Information It's just that these details are missing from the documentation. I've managed to successfully create a CF distribution w/ the correct settings w/ the following minimal playbook: ``` --- # Standards: 1.2 - name: Try to create a CF distribution w/ ConnectionAttempts and ConnectionTimeout hosts: localhost tasks: - name: create a distribution with an origin, logging and default cache behavior community.aws.cloudfront_distribution: profile: example-account state: present caller_reference: test 1 origins: - id: 'my-test-origin-1234' domain_name: www.example.com custom_origin_config: http_port: 80 https_port: 443 origin_keepalive_timeout: 10 origin_read_timeout: 5 connection_attempts: 2 connection_timeout: 5 enabled: true comment: Test CloudFront distribution - ConnectionAttempts and ConnectionTimeout register: result - name: debug debug: var: result ``` I'll prepare a PR for the docs. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/cloudfront_distribution.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/cloudfront_distribution.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @markuman @s-hertel @tremble @wilvk [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify --->
2022-09-05T11:49:24
ansible-collections/community.aws
1,555
ansible-collections__community.aws-1555
[ "1547" ]
021c0019d3bc99b4e02f6c8c7e054f3392209550
diff --git a/plugins/modules/wafv2_rule_group_info.py b/plugins/modules/wafv2_rule_group_info.py --- a/plugins/modules/wafv2_rule_group_info.py +++ b/plugins/modules/wafv2_rule_group_info.py @@ -15,11 +15,6 @@ description: - Get informations about existing wafv2 rule groups. options: - state: - description: - - This option does nothing, has been deprecated, and will be removed in a release after 2022-12-01. - required: false - type: str name: description: - The name of the rule group. @@ -43,7 +38,6 @@ - name: rule group info community.aws.wafv2_rule_group_info: name: test02 - state: present scope: REGIONAL ''' @@ -119,7 +113,6 @@ def get_rule_group(wafv2, name, scope, id, fail_json_aws): def main(): arg_spec = dict( - state=dict(type='str', required=False), name=dict(type='str', required=True), scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']) ) @@ -129,17 +122,11 @@ def main(): supports_check_mode=True ) - state = module.params.get("state") name = module.params.get("name") scope = module.params.get("scope") wafv2 = module.client('wafv2') - if state: - module.deprecate( - 'The state parameter does nothing, has been deprecated, and will be removed in a future release.', - version='6.0.0', collection_name='community.aws') - # check if rule group exists response = wafv2_list_rule_groups(wafv2, scope, module.fail_json_aws) id = None
diff --git a/tests/integration/targets/wafv2/tasks/rule_group.yml b/tests/integration/targets/wafv2/tasks/rule_group.yml --- a/tests/integration/targets/wafv2/tasks/rule_group.yml +++ b/tests/integration/targets/wafv2/tasks/rule_group.yml @@ -79,7 +79,6 @@ - name: rule group info wafv2_rule_group_info: name: "{{ rule_group_name }}" - state: present scope: REGIONAL register: out @@ -554,7 +553,6 @@ - name: rule group info wafv2_rule_group_info: name: "{{ rule_group_name }}" - state: present scope: REGIONAL register: out @@ -671,7 +669,6 @@ - name: rule group info wafv2_rule_group_info: name: "{{ rule_group_name }}" - state: present scope: REGIONAL register: out diff --git a/tests/integration/targets/wafv2_rule_group/tasks/main.yml b/tests/integration/targets/wafv2_rule_group/tasks/main.yml --- a/tests/integration/targets/wafv2_rule_group/tasks/main.yml +++ b/tests/integration/targets/wafv2_rule_group/tasks/main.yml @@ -87,7 +87,6 @@ - name: rule group info wafv2_rule_group_info: name: "{{ rule_group_name }}" - state: present scope: REGIONAL register: out @@ -562,7 +561,6 @@ - name: rule group info wafv2_rule_group_info: name: "{{ rule_group_name }}" - state: present scope: REGIONAL register: out @@ -679,7 +677,6 @@ - name: rule group info wafv2_rule_group_info: name: "{{ rule_group_name }}" - state: present scope: REGIONAL register: out diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -1,3 +1,2 @@ plugins/modules/cloudfront_distribution_info.py pylint:unnecessary-comprehension # (new test) Should be an easy fix, but testing is a challenge - test are broken and aliases require a wildcard cert in ACM plugins/modules/codebuild_project.py pylint:collection-deprecated-version # https://github.com/ansible-collections/community.aws/issues/1546 -plugins/modules/wafv2_rule_group_info.py pylint:collection-deprecated-version # https://github.com/ansible-collections/community.aws/issues/1547 diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -1,3 +1,2 @@ plugins/modules/cloudfront_distribution_info.py pylint:unnecessary-comprehension # (new test) Should be an easy fix, but testing is a challenge - test are broken and aliases require a wildcard cert in ACM plugins/modules/codebuild_project.py pylint:collection-deprecated-version # https://github.com/ansible-collections/community.aws/issues/1546 -plugins/modules/wafv2_rule_group_info.py pylint:collection-deprecated-version # https://github.com/ansible-collections/community.aws/issues/1547 diff --git a/tests/sanity/ignore-2.13.txt b/tests/sanity/ignore-2.13.txt --- a/tests/sanity/ignore-2.13.txt +++ b/tests/sanity/ignore-2.13.txt @@ -1,3 +1,2 @@ plugins/modules/cloudfront_distribution_info.py pylint:unnecessary-comprehension # (new test) Should be an easy fix, but testing is a challenge - test are broken and aliases require a wildcard cert in ACM plugins/modules/codebuild_project.py pylint:collection-deprecated-version # https://github.com/ansible-collections/community.aws/issues/1546 -plugins/modules/wafv2_rule_group_info.py pylint:collection-deprecated-version # https://github.com/ansible-collections/community.aws/issues/1547 diff --git a/tests/sanity/ignore-2.14.txt b/tests/sanity/ignore-2.14.txt --- a/tests/sanity/ignore-2.14.txt +++ b/tests/sanity/ignore-2.14.txt @@ -1,3 +1,2 @@ plugins/modules/cloudfront_distribution_info.py pylint:unnecessary-comprehension # (new test) Should be an easy fix, but testing is a challenge - test are broken and aliases require a wildcard cert in ACM plugins/modules/codebuild_project.py pylint:collection-deprecated-version # https://github.com/ansible-collections/community.aws/issues/1546 -plugins/modules/wafv2_rule_group_info.py pylint:collection-deprecated-version # https://github.com/ansible-collections/community.aws/issues/1547 diff --git a/tests/sanity/ignore-2.15.txt b/tests/sanity/ignore-2.15.txt --- a/tests/sanity/ignore-2.15.txt +++ b/tests/sanity/ignore-2.15.txt @@ -1,3 +1,2 @@ plugins/modules/cloudfront_distribution_info.py pylint:unnecessary-comprehension # (new test) Should be an easy fix, but testing is a challenge - test are broken and aliases require a wildcard cert in ACM plugins/modules/codebuild_project.py pylint:collection-deprecated-version # https://github.com/ansible-collections/community.aws/issues/1546 -plugins/modules/wafv2_rule_group_info.py pylint:collection-deprecated-version # https://github.com/ansible-collections/community.aws/issues/1547
[6.0.0] wafv2_rule_group_info - Remove deprecated `state` argument ### Summary The `state` argument does nothing and never has (likely a copy&paste mistake). Remove it for consistency ### Issue Type Feature Idea ### Component Name plugins/modules/wafv2_rule_group_info.py ### Additional Information Originally deprecated as part of #1210 ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/wafv2_rule_group_info.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/wafv2_rule_group_info.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @markuman @s-hertel [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify --->
2022-10-08T17:17:03
ansible-collections/community.aws
1,563
ansible-collections__community.aws-1563
[ "1413" ]
310e4b5bf78acdff1ea3a271795d1f520cbbde32
diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -534,12 +534,14 @@ def _flush_stderr(self, subprocess): def _get_url(self, client_method, bucket_name, out_path, http_method, profile_name, extra_args=None): ''' Generate URL for get_object / put_object ''' - bucket_location = boto3.client('s3').get_bucket_location( + region_name = self.get_option('region') or 'us-east-1' + + bucket_location = self._get_boto_client('s3', region_name=region_name, profile_name=profile_name).get_bucket_location( Bucket=(self.get_option('bucket_name')), ) - region_name = bucket_location['LocationConstraint'] + bucket_region_name = bucket_location['LocationConstraint'] - client = self._get_boto_client('s3', region_name=region_name, profile_name=profile_name) + client = self._get_boto_client('s3', region_name=bucket_region_name, profile_name=profile_name) params = {'Bucket': bucket_name, 'Key': out_path} if extra_args is not None: params.update(extra_args)
ssm connection caught exception(Unable to locate credentials) ### Summary After upgrading `community.aws` to `4.1.1` and `amazon.aws` to `4.1.0`, my playbooks are not able to connect via ssm anymore. Extract of the logs ``` redirecting (type: connection) ansible.builtin.aws_ssm to community.aws.aws_ssm wait_for_connection: attempting ping module test <i-redacted> ESTABLISH SSM CONNECTION TO: i-redacted <i-redacted> SSM CONNECTION ID: redacted <i-redacted> EXEC ( umask 77 && mkdir -p "` echo /tmp/.ansible-ssm `"&& mkdir "` echo /tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330 `" && echo ansible-tmp-1660641811.7266405-78-7676503826330="` echo /tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330 `" ) <i-redacted> (0, 'ansible-tmp-1660641811.7266405-78-7676503826330=/tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330\r\r', '') Using module file /usr/local/lib/python3.10/dist-packages/ansible/modules/ping.py <i-redacted> PUT /home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve TO /tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330/AnsiballZ_ping.py <i-redacted> ssm_retry: attempt: 0, caught exception(Unable to locate credentials) from cmd (/home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve...), pausing for 0 seconds <i-redacted> CLOSING SSM CONNECTION TO: i-redacted <i-redacted> ssm_retry: attempt: 1, caught exception(Unable to locate credentials) from cmd (/home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve...), pausing for 1 seconds <i-redacted> ssm_retry: attempt: 2, caught exception(Unable to locate credentials) from cmd (/home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve...), pausing for 3 seconds ``` reverting to `community.aws` to `4.0.0` and `amazon.aws` to `4.0.0` works. ### Issue Type Bug Report ### Component Name aws_ssm ### Ansible Version ```console (paste below) $ ansible --version 2.13.1 ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list Collection Version ----------------- ------- amazon.aws 4.0.0 ansible.posix 1.4.0 community.aws 4.0.0 community.docker 2.7.0 community.general 5.2.0 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore Executing pip show boto boto3 botocore WARNING: Package(s) not found: boto Name: boto3 Version: 1.24.24 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.10/dist-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.27.47 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.10/dist-packages Requires: jmespath, python-dateutil, urllib3 Required-by: boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment _No response_ ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) - name: Wait for instance to be ready hosts: all serial: 1 gather_facts: no tasks: - name: Wait for instance to be ready wait_for_connection: ``` ### Expected Results Expect a connection to the instance ### Actual Results ```console (paste below) redirecting (type: connection) ansible.builtin.aws_ssm to community.aws.aws_ssm wait_for_connection: attempting ping module test <i-redacted> ESTABLISH SSM CONNECTION TO: i-redacted <i-redacted> SSM CONNECTION ID: redacted <i-redacted> EXEC ( umask 77 && mkdir -p "` echo /tmp/.ansible-ssm `"&& mkdir "` echo /tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330 `" && echo ansible-tmp-1660641811.7266405-78-7676503826330="` echo /tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330 `" ) <i-redacted> (0, 'ansible-tmp-1660641811.7266405-78-7676503826330=/tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330\r\r', '') Using module file /usr/local/lib/python3.10/dist-packages/ansible/modules/ping.py <i-redacted> PUT /home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve TO /tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330/AnsiballZ_ping.py <i-redacted> ssm_retry: attempt: 0, caught exception(Unable to locate credentials) from cmd (/home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve...), pausing for 0 seconds <i-redacted> CLOSING SSM CONNECTION TO: i-redacted <i-redacted> ssm_retry: attempt: 1, caught exception(Unable to locate credentials) from cmd (/home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve...), pausing for 1 seconds <i-redacted> ssm_retry: attempt: 2, caught exception(Unable to locate credentials) from cmd (/home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve...), pausing for 3 seconds ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/connection/aws_ssm.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/connection/aws_ssm.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> I am also experiencing this issue. This is caused by https://github.com/ansible-collections/community.aws/pull/1176, which creates and uses an S3 boto3 client without the credentials provided to the plugin.
2022-10-18T13:44:53
ansible-collections/community.aws
1,564
ansible-collections__community.aws-1564
[ "1413" ]
d437578d17470b9831af0a649f282020529525c1
diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -534,12 +534,14 @@ def _flush_stderr(self, subprocess): def _get_url(self, client_method, bucket_name, out_path, http_method, profile_name, extra_args=None): ''' Generate URL for get_object / put_object ''' - bucket_location = boto3.client('s3').get_bucket_location( + region_name = self.get_option('region') or 'us-east-1' + + bucket_location = self._get_boto_client('s3', region_name=region_name, profile_name=profile_name).get_bucket_location( Bucket=(self.get_option('bucket_name')), ) - region_name = bucket_location['LocationConstraint'] + bucket_region_name = bucket_location['LocationConstraint'] - client = self._get_boto_client('s3', region_name=region_name, profile_name=profile_name) + client = self._get_boto_client('s3', region_name=bucket_region_name, profile_name=profile_name) params = {'Bucket': bucket_name, 'Key': out_path} if extra_args is not None: params.update(extra_args)
ssm connection caught exception(Unable to locate credentials) ### Summary After upgrading `community.aws` to `4.1.1` and `amazon.aws` to `4.1.0`, my playbooks are not able to connect via ssm anymore. Extract of the logs ``` redirecting (type: connection) ansible.builtin.aws_ssm to community.aws.aws_ssm wait_for_connection: attempting ping module test <i-redacted> ESTABLISH SSM CONNECTION TO: i-redacted <i-redacted> SSM CONNECTION ID: redacted <i-redacted> EXEC ( umask 77 && mkdir -p "` echo /tmp/.ansible-ssm `"&& mkdir "` echo /tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330 `" && echo ansible-tmp-1660641811.7266405-78-7676503826330="` echo /tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330 `" ) <i-redacted> (0, 'ansible-tmp-1660641811.7266405-78-7676503826330=/tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330\r\r', '') Using module file /usr/local/lib/python3.10/dist-packages/ansible/modules/ping.py <i-redacted> PUT /home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve TO /tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330/AnsiballZ_ping.py <i-redacted> ssm_retry: attempt: 0, caught exception(Unable to locate credentials) from cmd (/home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve...), pausing for 0 seconds <i-redacted> CLOSING SSM CONNECTION TO: i-redacted <i-redacted> ssm_retry: attempt: 1, caught exception(Unable to locate credentials) from cmd (/home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve...), pausing for 1 seconds <i-redacted> ssm_retry: attempt: 2, caught exception(Unable to locate credentials) from cmd (/home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve...), pausing for 3 seconds ``` reverting to `community.aws` to `4.0.0` and `amazon.aws` to `4.0.0` works. ### Issue Type Bug Report ### Component Name aws_ssm ### Ansible Version ```console (paste below) $ ansible --version 2.13.1 ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list Collection Version ----------------- ------- amazon.aws 4.0.0 ansible.posix 1.4.0 community.aws 4.0.0 community.docker 2.7.0 community.general 5.2.0 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore Executing pip show boto boto3 botocore WARNING: Package(s) not found: boto Name: boto3 Version: 1.24.24 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.10/dist-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.27.47 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.10/dist-packages Requires: jmespath, python-dateutil, urllib3 Required-by: boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment _No response_ ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) - name: Wait for instance to be ready hosts: all serial: 1 gather_facts: no tasks: - name: Wait for instance to be ready wait_for_connection: ``` ### Expected Results Expect a connection to the instance ### Actual Results ```console (paste below) redirecting (type: connection) ansible.builtin.aws_ssm to community.aws.aws_ssm wait_for_connection: attempting ping module test <i-redacted> ESTABLISH SSM CONNECTION TO: i-redacted <i-redacted> SSM CONNECTION ID: redacted <i-redacted> EXEC ( umask 77 && mkdir -p "` echo /tmp/.ansible-ssm `"&& mkdir "` echo /tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330 `" && echo ansible-tmp-1660641811.7266405-78-7676503826330="` echo /tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330 `" ) <i-redacted> (0, 'ansible-tmp-1660641811.7266405-78-7676503826330=/tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330\r\r', '') Using module file /usr/local/lib/python3.10/dist-packages/ansible/modules/ping.py <i-redacted> PUT /home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve TO /tmp/.ansible-ssm/ansible-tmp-1660641811.7266405-78-7676503826330/AnsiballZ_ping.py <i-redacted> ssm_retry: attempt: 0, caught exception(Unable to locate credentials) from cmd (/home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve...), pausing for 0 seconds <i-redacted> CLOSING SSM CONNECTION TO: i-redacted <i-redacted> ssm_retry: attempt: 1, caught exception(Unable to locate credentials) from cmd (/home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve...), pausing for 1 seconds <i-redacted> ssm_retry: attempt: 2, caught exception(Unable to locate credentials) from cmd (/home/ansible/.ansible/tmp/ansible-local-74qpntduf7/tmp1nopxxve...), pausing for 3 seconds ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/connection/aws_ssm.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/connection/aws_ssm.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> I am also experiencing this issue. This is caused by https://github.com/ansible-collections/community.aws/pull/1176, which creates and uses an S3 boto3 client without the credentials provided to the plugin.
2022-10-18T13:45:25
ansible-collections/community.aws
1,589
ansible-collections__community.aws-1589
[ "290" ]
6dd4a00b8c18fe3499bad04f90c8ac7832ade8bb
diff --git a/plugins/modules/cloudfront_distribution.py b/plugins/modules/cloudfront_distribution.py --- a/plugins/modules/cloudfront_distribution.py +++ b/plugins/modules/cloudfront_distribution.py @@ -205,9 +205,25 @@ description: - The ID of the header policy that CloudFront adds to responses that it sends to viewers. type: str + cache_policy_id: + version_added: 7.1.0 + description: + - The ID of the cache policy for CloudFront to use for the default cache behavior. + - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option. + - For more information see the CloudFront documentation + at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html) + type: str + origin_request_policy_id: + version_added: 7.1.0 + description: + - The ID of the origin request policy for CloudFront to use for the default cache behavior. + - For more information see the CloudFront documentation + at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html) + type: str forwarded_values: description: - A dict that specifies how CloudFront handles query strings and cookies. + - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option. type: dict suboptions: query_string: @@ -326,9 +342,25 @@ description: - The ID of the header policy that CloudFront adds to responses that it sends to viewers. type: str + cache_policy_id: + version_added: 7.1.0 + description: + - The ID of the cache policy for CloudFront to use for the cache behavior. + - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option. + - For more information see the CloudFront documentation + at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html) + type: str + origin_request_policy_id: + version_added: 7.1.0 + description: + - The ID of the origin request policy for CloudFront to use for the cache behavior. + - For more information see the CloudFront documentation + at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html) + type: str forwarded_values: description: - A dict that specifies how CloudFront handles query strings and cookies. + - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option. type: dict suboptions: query_string: @@ -1914,7 +1946,10 @@ def validate_cache_behavior(self, config, cache_behavior, valid_origins, is_defa cache_behavior = self.validate_cache_behavior_first_level_keys( config, cache_behavior, valid_origins, is_default_cache ) - cache_behavior = self.validate_forwarded_values(config, cache_behavior.get("forwarded_values"), cache_behavior) + if cache_behavior.get("cache_policy_id") is None: + cache_behavior = self.validate_forwarded_values( + config, cache_behavior.get("forwarded_values"), cache_behavior + ) cache_behavior = self.validate_allowed_methods(config, cache_behavior.get("allowed_methods"), cache_behavior) cache_behavior = self.validate_lambda_function_associations( config, cache_behavior.get("lambda_function_associations"), cache_behavior @@ -1926,19 +1961,34 @@ def validate_cache_behavior(self, config, cache_behavior, valid_origins, is_defa return cache_behavior def validate_cache_behavior_first_level_keys(self, config, cache_behavior, valid_origins, is_default_cache): - try: - cache_behavior = self.add_key_else_change_dict_key( - cache_behavior, "min_ttl", "min_t_t_l", config.get("min_t_t_l", self.__default_cache_behavior_min_ttl) - ) - cache_behavior = self.add_key_else_change_dict_key( - cache_behavior, "max_ttl", "max_t_t_l", config.get("max_t_t_l", self.__default_cache_behavior_max_ttl) - ) - cache_behavior = self.add_key_else_change_dict_key( - cache_behavior, - "default_ttl", - "default_t_t_l", - config.get("default_t_t_l", self.__default_cache_behavior_default_ttl), + if cache_behavior.get("cache_policy_id") is not None and cache_behavior.get("forwarded_values") is not None: + if is_default_cache: + cache_behavior_name = "Default cache behavior" + else: + cache_behavior_name = f"Cache behavior for path {cache_behavior['path_pattern']}" + self.module.fail_json( + msg=f"{cache_behavior_name} cannot have both a cache_policy_id and a forwarded_values option." ) + try: + if cache_behavior.get("cache_policy_id") is None: + cache_behavior = self.add_key_else_change_dict_key( + cache_behavior, + "min_ttl", + "min_t_t_l", + config.get("min_t_t_l", self.__default_cache_behavior_min_ttl), + ) + cache_behavior = self.add_key_else_change_dict_key( + cache_behavior, + "max_ttl", + "max_t_t_l", + config.get("max_t_t_l", self.__default_cache_behavior_max_ttl), + ) + cache_behavior = self.add_key_else_change_dict_key( + cache_behavior, + "default_ttl", + "default_t_t_l", + config.get("default_t_t_l", self.__default_cache_behavior_default_ttl), + ) cache_behavior = self.add_missing_key( cache_behavior, "compress", config.get("compress", self.__default_cache_behavior_compress) )
diff --git a/tests/integration/targets/cloudfront_distribution/tasks/main.yml b/tests/integration/targets/cloudfront_distribution/tasks/main.yml --- a/tests/integration/targets/cloudfront_distribution/tasks/main.yml +++ b/tests/integration/targets/cloudfront_distribution/tasks/main.yml @@ -632,6 +632,22 @@ - result.origins['quantity'] > 0 - result.origins['items'] | selectattr('s3_origin_config', 'defined') | map(attribute='s3_origin_config') | selectattr('origin_access_identity', 'eq', origin_access_identity) | list | length == 1 + - name: update distribution to use cache_policy_id and origin_request_policy_id + cloudfront_distribution: + distribution_id: "{{ distribution_id }}" + default_cache_behavior: + cache_policy_id: "658327ea-f89d-4fab-a63d-7e88639e58f6" + origin_request_policy_id: "88a5eaf4-2fd4-4709-b370-b4c650ea3fcf" + state: present + register: update_distribution_with_cache_policies + + - name: ensure that the cache_policy_id and origin_request_policy_id was set + assert: + that: + - update_distribution_with_cache_policies.changed + - update_distribution_with_cache_policies.default_cache_behavior.cache_policy_id == '658327ea-f89d-4fab-a63d-7e88639e58f6' + - update_distribution_with_cache_policies.default_cache_behavior.origin_request_policy_id == '88a5eaf4-2fd4-4709-b370-b4c650ea3fcf' + always: # TEARDOWN STARTS HERE - name: delete the s3 bucket
cloudfront_distribution - Add support for setting cache and origin request policy ids in cache behavior ##### SUMMARY Add support for setting cache and origin request policy ids in the cache behaviors block when creating a distribution in cloudfront_distribution. ##### ISSUE TYPE - Feature Idea ##### COMPONENT NAME cloudfront_distribution ##### ADDITIONAL INFORMATION Currently we are unable to set cache policy or origin request policy ids in the cache behaviors section so that they are added to a distribution. <!--- Paste example playbooks or commands between quotes below --> Example playbook code: ```yaml cache_behaviors: - path_pattern: "*/" target_origin_id: "" field_level_encryption_id: "" cache_policy_id: "{{ cache_policy_id }}" origin_request_policy_id: "{{ origin_request_policy_id }}" ``` Example python code: ```python cache_behavior['cache_policy_id'] = cache_behavior.get('cache_policy_id', config.get('cache_policy_id')) cache_behavior['origin_request_policy_id'] = cache_behavior.get('origin_request_policy_id', config.get('origin_request_policy_id')) ``` <!--- HINT: You can also paste gist.github.com links for larger files -->
Files identified in the description: * [`plugins/modules/cloudfront_distribution.py`](https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/cloudfront_distribution.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @s-hertel @tremble @willthames @wilvk @wimnat [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> Thank you for raising this idea @sethernet. Would you like to raise a PR for this issue? Theoretically the parameter `cache_policy_id: 658327ea-f89d-4fab-a63d-7e88639e58f6` works already. But the default values for min/max/default_ttl that are added, are incompatible together.
2022-11-07T17:43:08
ansible-collections/community.aws
1,610
ansible-collections__community.aws-1610
[ "1565" ]
fad358924d91150b6278a7832c2f2028d588bd44
diff --git a/plugins/modules/ecs_taskdefinition.py b/plugins/modules/ecs_taskdefinition.py --- a/plugins/modules/ecs_taskdefinition.py +++ b/plugins/modules/ecs_taskdefinition.py @@ -466,6 +466,49 @@ description: The health check command and associated configuration parameters for the container. required: False type: dict + suboptions: + command: + description: + - A string array representing the command that the container runs to determine if it is healthy. + - > + The string array must start with CMD to run the command arguments directly, + or CMD-SHELL to run the command with the container's default shell. + - An exit code of 0 indicates success, and non-zero exit code indicates failure. + required: False + type: list + elements: str + interval: + description: + - The time period in seconds between each health check execution. + - You may specify between 5 and 300 seconds. The default value is 30 seconds. + required: False + type: int + default: 30 + retries: + description: + - The number of times to retry a failed health check before the container is considered unhealthy. + - You may specify between 1 and 10 retries. The default value is 3. + required: False + type: int + default: 3 + startPeriod: + description: + - > + The optional grace period to provide containers time to bootstrap + before failed health checks count towards the maximum number of retries. + - You can specify between 0 and 300 seconds. By default, the startPeriod is disabled. + - > + Note: If a health check succeeds within the startPeriod, + then the container is considered healthy and any subsequent failures count toward the maximum number of retries. + required: False + type: int + timeout: + description: + - The time period in seconds to wait for a health check to succeed before it is considered a failure. + - You may specify between 2 and 60 seconds. The default value is 5. + required: False + type: int + default: 5 systemControls: description: A list of namespaced kernel parameters to set in the container. required: False @@ -677,6 +720,29 @@ memory: 1GB state: present network_mode: awsvpc + +# Create Task Definition with health check +- name: Create task definition + community.aws.ecs_taskdefinition: + family: nginx + containers: + - name: nginx + essential: true + image: "nginx" + portMappings: + - containerPort: 8080 + hostPort: 8080 + cpu: 512 + memory: 1024 + healthCheck: + command: + - CMD-SHELL + - /app/healthcheck.py + interval: 60 + retries: 3 + startPeriod: 15 + timeout: 15 + state: present ''' RETURN = r''' taskdefinition:
ecs_taskdefinition healthCheck ### Summary `ecs_taskdefinition.containers` accepts `healthCheck`, which is a dictionary. However, it's not explained what the dictionary looks like, and there's no examples at all. ### Issue Type Documentation Report ### Component Name ecs_taskdefinition ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.13.2] config file = None configured module search path = ['/home/vagrant/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible ansible collection location = /home/vagrant/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.8.10 (default, Jun 22 2022, 20:18:18) [GCC 9.4.0] jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /usr/local/lib/python3.8/dist-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 3.3.1 ansible.netcommon 3.0.1 ansible.posix 1.4.0 ansible.utils 2.6.1 ansible.windows 1.10.0 arista.eos 5.0.1 awx.awx 21.4.0 azure.azcollection 1.13.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.3.0 cisco.aci 2.2.0 cisco.asa 3.1.0 cisco.dnac 6.5.2 cisco.intersight 1.0.19 cisco.ios 3.2.0 cisco.iosxr 3.2.0 cisco.ise 2.5.0 cisco.meraki 2.10.1 cisco.mso 2.0.0 cisco.nso 1.0.3 cisco.nxos 3.1.0 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.2 community.aws 3.4.0 community.azure 1.1.0 community.ciscosmb 1.0.5 community.crypto 2.4.0 community.digitalocean 1.21.0 community.dns 2.3.0 community.docker 2.7.0 community.fortios 1.0.0 community.general 5.4.0 community.google 1.0.0 community.grafana 1.5.1 community.hashi_vault 3.1.0 community.hrobot 1.5.0 community.libvirt 1.1.0 community.mongodb 1.4.2 community.mysql 3.3.0 community.network 4.0.1 community.okd 2.2.0 community.postgresql 2.2.0 community.proxysql 1.4.0 community.rabbitmq 1.2.2 community.routeros 2.2.0 community.sap 1.0.0 community.sap_libs 1.2.0 community.skydive 1.0.0 community.sops 1.2.3 community.vmware 2.7.0 community.windows 1.10.0 community.zabbix 1.7.0 containers.podman 1.9.4 cyberark.conjur 1.1.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 1.1.1 dellemc.openmanage 5.5.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.18.0 fortinet.fortimanager 2.1.5 fortinet.fortios 2.1.6 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.8.1 hpe.nimble 1.1.4 ibm.qradar 2.0.0 ibm.spectrum_virtualize 1.9.0 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.3.0 inspur.sm 2.0.0 junipernetworks.junos 3.1.0 kubernetes.core 2.3.2 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.18.0 netapp.elementsw 21.7.0 netapp.ontap 21.21.0 netapp.storagegrid 21.10.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.0 netbox.netbox 3.7.1 ngine_io.cloudstack 2.2.4 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.8.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.2.0 purestorage.flasharray 1.13.0 purestorage.flashblade 1.9.0 purestorage.fusion 1.0.2 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 2.0.0 t_systems_mms.icinga_director 1.30.1 theforeman.foreman 3.4.0 vmware.vmware_rest 2.2.0 vyos.vyos 3.0.1 wti.remote 1.0.4 ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment _No response_ ### Additional Information Clarifies what kind of dictionary the healthCheck parameter expects. Provides an example to help developers implement healthCheck more smoothly. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
For example, I tried ```yml healthCheck: command: [ "CMD-SHELL", "wget -q -O /dev/null -T 5 http://localhost:3000/ping || exit 1" ] startPeriod: 60 ``` but this did not make it into the JSON submitted to ECS. cc @Java1Guy @alinabuzachis @jillr @markuman @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> @aschereT Thank you for raising this. Would you be willing to open a PR to update the documentation and add an example? @aschereT It must look like this ```yml - name: update_taskdefinition ecs_taskdefinition: family: dplctrl state: present network_mode: awsvpc launch_type: FARGATE execution_role_arn: "arn:aws:iam::1234567890:role/ecsTaskExecutionRole" task_role_arn: "arn:aws:iam::1234567890:role/dplctrl" force_create: true region: eu-central-1 cpu: "256" memory: "512" containers: - name: dplctrl memoryReservation: 256 essential: true image: "{{ IMAGE }}" secrets: - valueFrom: arn:aws:ssm:eu-central-1:1234567890:parameter/test.website.redis_password name: REDIS_PASSWORD portMappings: - containerPort: 8000 hostPort: 8000 healthCheck: command: - CMD-SHELL - /app/healthcheck.py interval: 60 retries: 3 startPeriod: 15 timeout: 15 logConfiguration: logDriver: awslogs options: awslogs-group: /deploy.test awslogs-region: eu-central-1 awslogs-stream-prefix: ecs register: output ```
2022-12-01T20:02:54
ansible-collections/community.aws
1,611
ansible-collections__community.aws-1611
[ "1565" ]
08d2398be3c90d5a25fb29aaf517e95d0e17c70b
diff --git a/plugins/modules/ecs_taskdefinition.py b/plugins/modules/ecs_taskdefinition.py --- a/plugins/modules/ecs_taskdefinition.py +++ b/plugins/modules/ecs_taskdefinition.py @@ -466,6 +466,49 @@ description: The health check command and associated configuration parameters for the container. required: False type: dict + suboptions: + command: + description: + - A string array representing the command that the container runs to determine if it is healthy. + - > + The string array must start with CMD to run the command arguments directly, + or CMD-SHELL to run the command with the container's default shell. + - An exit code of 0 indicates success, and non-zero exit code indicates failure. + required: False + type: list + elements: str + interval: + description: + - The time period in seconds between each health check execution. + - You may specify between 5 and 300 seconds. The default value is 30 seconds. + required: False + type: int + default: 30 + retries: + description: + - The number of times to retry a failed health check before the container is considered unhealthy. + - You may specify between 1 and 10 retries. The default value is 3. + required: False + type: int + default: 3 + startPeriod: + description: + - > + The optional grace period to provide containers time to bootstrap + before failed health checks count towards the maximum number of retries. + - You can specify between 0 and 300 seconds. By default, the startPeriod is disabled. + - > + Note: If a health check succeeds within the startPeriod, + then the container is considered healthy and any subsequent failures count toward the maximum number of retries. + required: False + type: int + timeout: + description: + - The time period in seconds to wait for a health check to succeed before it is considered a failure. + - You may specify between 2 and 60 seconds. The default value is 5. + required: False + type: int + default: 5 systemControls: description: A list of namespaced kernel parameters to set in the container. required: False @@ -674,6 +717,29 @@ memory: 1GB state: present network_mode: awsvpc + +# Create Task Definition with health check +- name: Create task definition + community.aws.ecs_taskdefinition: + family: nginx + containers: + - name: nginx + essential: true + image: "nginx" + portMappings: + - containerPort: 8080 + hostPort: 8080 + cpu: 512 + memory: 1024 + healthCheck: + command: + - CMD-SHELL + - /app/healthcheck.py + interval: 60 + retries: 3 + startPeriod: 15 + timeout: 15 + state: present ''' RETURN = r''' taskdefinition:
ecs_taskdefinition healthCheck ### Summary `ecs_taskdefinition.containers` accepts `healthCheck`, which is a dictionary. However, it's not explained what the dictionary looks like, and there's no examples at all. ### Issue Type Documentation Report ### Component Name ecs_taskdefinition ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.13.2] config file = None configured module search path = ['/home/vagrant/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible ansible collection location = /home/vagrant/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.8.10 (default, Jun 22 2022, 20:18:18) [GCC 9.4.0] jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /usr/local/lib/python3.8/dist-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 3.3.1 ansible.netcommon 3.0.1 ansible.posix 1.4.0 ansible.utils 2.6.1 ansible.windows 1.10.0 arista.eos 5.0.1 awx.awx 21.4.0 azure.azcollection 1.13.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.3.0 cisco.aci 2.2.0 cisco.asa 3.1.0 cisco.dnac 6.5.2 cisco.intersight 1.0.19 cisco.ios 3.2.0 cisco.iosxr 3.2.0 cisco.ise 2.5.0 cisco.meraki 2.10.1 cisco.mso 2.0.0 cisco.nso 1.0.3 cisco.nxos 3.1.0 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.2 community.aws 3.4.0 community.azure 1.1.0 community.ciscosmb 1.0.5 community.crypto 2.4.0 community.digitalocean 1.21.0 community.dns 2.3.0 community.docker 2.7.0 community.fortios 1.0.0 community.general 5.4.0 community.google 1.0.0 community.grafana 1.5.1 community.hashi_vault 3.1.0 community.hrobot 1.5.0 community.libvirt 1.1.0 community.mongodb 1.4.2 community.mysql 3.3.0 community.network 4.0.1 community.okd 2.2.0 community.postgresql 2.2.0 community.proxysql 1.4.0 community.rabbitmq 1.2.2 community.routeros 2.2.0 community.sap 1.0.0 community.sap_libs 1.2.0 community.skydive 1.0.0 community.sops 1.2.3 community.vmware 2.7.0 community.windows 1.10.0 community.zabbix 1.7.0 containers.podman 1.9.4 cyberark.conjur 1.1.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 1.1.1 dellemc.openmanage 5.5.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.18.0 fortinet.fortimanager 2.1.5 fortinet.fortios 2.1.6 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.8.1 hpe.nimble 1.1.4 ibm.qradar 2.0.0 ibm.spectrum_virtualize 1.9.0 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.3.0 inspur.sm 2.0.0 junipernetworks.junos 3.1.0 kubernetes.core 2.3.2 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.18.0 netapp.elementsw 21.7.0 netapp.ontap 21.21.0 netapp.storagegrid 21.10.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.0 netbox.netbox 3.7.1 ngine_io.cloudstack 2.2.4 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.8.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.2.0 purestorage.flasharray 1.13.0 purestorage.flashblade 1.9.0 purestorage.fusion 1.0.2 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 2.0.0 t_systems_mms.icinga_director 1.30.1 theforeman.foreman 3.4.0 vmware.vmware_rest 2.2.0 vyos.vyos 3.0.1 wti.remote 1.0.4 ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment _No response_ ### Additional Information Clarifies what kind of dictionary the healthCheck parameter expects. Provides an example to help developers implement healthCheck more smoothly. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
For example, I tried ```yml healthCheck: command: [ "CMD-SHELL", "wget -q -O /dev/null -T 5 http://localhost:3000/ping || exit 1" ] startPeriod: 60 ``` but this did not make it into the JSON submitted to ECS. cc @Java1Guy @alinabuzachis @jillr @markuman @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> @aschereT Thank you for raising this. Would you be willing to open a PR to update the documentation and add an example? @aschereT It must look like this ```yml - name: update_taskdefinition ecs_taskdefinition: family: dplctrl state: present network_mode: awsvpc launch_type: FARGATE execution_role_arn: "arn:aws:iam::1234567890:role/ecsTaskExecutionRole" task_role_arn: "arn:aws:iam::1234567890:role/dplctrl" force_create: true region: eu-central-1 cpu: "256" memory: "512" containers: - name: dplctrl memoryReservation: 256 essential: true image: "{{ IMAGE }}" secrets: - valueFrom: arn:aws:ssm:eu-central-1:1234567890:parameter/test.website.redis_password name: REDIS_PASSWORD portMappings: - containerPort: 8000 hostPort: 8000 healthCheck: command: - CMD-SHELL - /app/healthcheck.py interval: 60 retries: 3 startPeriod: 15 timeout: 15 logConfiguration: logDriver: awslogs options: awslogs-group: /deploy.test awslogs-region: eu-central-1 awslogs-stream-prefix: ecs register: output ``` > @aschereT Thank you for raising this. Would you be willing to open a PR to update the documentation and add an example? I made the issue because I didn't know what the format is in the first place... @markuman Thanks for an example! This is what I'm looking for! I also made a PR now that I know what the format looks like. https://github.com/ansible-collections/community.aws/pull/1610
2022-12-06T10:48:26
ansible-collections/community.aws
1,612
ansible-collections__community.aws-1612
[ "1565" ]
1246c4ca4591c49519de9a6eed2d176a63ca2438
diff --git a/plugins/modules/ecs_taskdefinition.py b/plugins/modules/ecs_taskdefinition.py --- a/plugins/modules/ecs_taskdefinition.py +++ b/plugins/modules/ecs_taskdefinition.py @@ -466,6 +466,49 @@ description: The health check command and associated configuration parameters for the container. required: False type: dict + suboptions: + command: + description: + - A string array representing the command that the container runs to determine if it is healthy. + - > + The string array must start with CMD to run the command arguments directly, + or CMD-SHELL to run the command with the container's default shell. + - An exit code of 0 indicates success, and non-zero exit code indicates failure. + required: False + type: list + elements: str + interval: + description: + - The time period in seconds between each health check execution. + - You may specify between 5 and 300 seconds. The default value is 30 seconds. + required: False + type: int + default: 30 + retries: + description: + - The number of times to retry a failed health check before the container is considered unhealthy. + - You may specify between 1 and 10 retries. The default value is 3. + required: False + type: int + default: 3 + startPeriod: + description: + - > + The optional grace period to provide containers time to bootstrap + before failed health checks count towards the maximum number of retries. + - You can specify between 0 and 300 seconds. By default, the startPeriod is disabled. + - > + Note: If a health check succeeds within the startPeriod, + then the container is considered healthy and any subsequent failures count toward the maximum number of retries. + required: False + type: int + timeout: + description: + - The time period in seconds to wait for a health check to succeed before it is considered a failure. + - You may specify between 2 and 60 seconds. The default value is 5. + required: False + type: int + default: 5 systemControls: description: A list of namespaced kernel parameters to set in the container. required: False @@ -677,6 +720,29 @@ memory: 1GB state: present network_mode: awsvpc + +# Create Task Definition with health check +- name: Create task definition + community.aws.ecs_taskdefinition: + family: nginx + containers: + - name: nginx + essential: true + image: "nginx" + portMappings: + - containerPort: 8080 + hostPort: 8080 + cpu: 512 + memory: 1024 + healthCheck: + command: + - CMD-SHELL + - /app/healthcheck.py + interval: 60 + retries: 3 + startPeriod: 15 + timeout: 15 + state: present ''' RETURN = r''' taskdefinition:
ecs_taskdefinition healthCheck ### Summary `ecs_taskdefinition.containers` accepts `healthCheck`, which is a dictionary. However, it's not explained what the dictionary looks like, and there's no examples at all. ### Issue Type Documentation Report ### Component Name ecs_taskdefinition ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.13.2] config file = None configured module search path = ['/home/vagrant/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible ansible collection location = /home/vagrant/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.8.10 (default, Jun 22 2022, 20:18:18) [GCC 9.4.0] jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /usr/local/lib/python3.8/dist-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 3.3.1 ansible.netcommon 3.0.1 ansible.posix 1.4.0 ansible.utils 2.6.1 ansible.windows 1.10.0 arista.eos 5.0.1 awx.awx 21.4.0 azure.azcollection 1.13.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.3.0 cisco.aci 2.2.0 cisco.asa 3.1.0 cisco.dnac 6.5.2 cisco.intersight 1.0.19 cisco.ios 3.2.0 cisco.iosxr 3.2.0 cisco.ise 2.5.0 cisco.meraki 2.10.1 cisco.mso 2.0.0 cisco.nso 1.0.3 cisco.nxos 3.1.0 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.2 community.aws 3.4.0 community.azure 1.1.0 community.ciscosmb 1.0.5 community.crypto 2.4.0 community.digitalocean 1.21.0 community.dns 2.3.0 community.docker 2.7.0 community.fortios 1.0.0 community.general 5.4.0 community.google 1.0.0 community.grafana 1.5.1 community.hashi_vault 3.1.0 community.hrobot 1.5.0 community.libvirt 1.1.0 community.mongodb 1.4.2 community.mysql 3.3.0 community.network 4.0.1 community.okd 2.2.0 community.postgresql 2.2.0 community.proxysql 1.4.0 community.rabbitmq 1.2.2 community.routeros 2.2.0 community.sap 1.0.0 community.sap_libs 1.2.0 community.skydive 1.0.0 community.sops 1.2.3 community.vmware 2.7.0 community.windows 1.10.0 community.zabbix 1.7.0 containers.podman 1.9.4 cyberark.conjur 1.1.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 1.1.1 dellemc.openmanage 5.5.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.18.0 fortinet.fortimanager 2.1.5 fortinet.fortios 2.1.6 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.8.1 hpe.nimble 1.1.4 ibm.qradar 2.0.0 ibm.spectrum_virtualize 1.9.0 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.3.0 inspur.sm 2.0.0 junipernetworks.junos 3.1.0 kubernetes.core 2.3.2 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.18.0 netapp.elementsw 21.7.0 netapp.ontap 21.21.0 netapp.storagegrid 21.10.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.0 netbox.netbox 3.7.1 ngine_io.cloudstack 2.2.4 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.8.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.2.0 purestorage.flasharray 1.13.0 purestorage.flashblade 1.9.0 purestorage.fusion 1.0.2 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 2.0.0 t_systems_mms.icinga_director 1.30.1 theforeman.foreman 3.4.0 vmware.vmware_rest 2.2.0 vyos.vyos 3.0.1 wti.remote 1.0.4 ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment _No response_ ### Additional Information Clarifies what kind of dictionary the healthCheck parameter expects. Provides an example to help developers implement healthCheck more smoothly. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
For example, I tried ```yml healthCheck: command: [ "CMD-SHELL", "wget -q -O /dev/null -T 5 http://localhost:3000/ping || exit 1" ] startPeriod: 60 ``` but this did not make it into the JSON submitted to ECS. cc @Java1Guy @alinabuzachis @jillr @markuman @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> @aschereT Thank you for raising this. Would you be willing to open a PR to update the documentation and add an example? @aschereT It must look like this ```yml - name: update_taskdefinition ecs_taskdefinition: family: dplctrl state: present network_mode: awsvpc launch_type: FARGATE execution_role_arn: "arn:aws:iam::1234567890:role/ecsTaskExecutionRole" task_role_arn: "arn:aws:iam::1234567890:role/dplctrl" force_create: true region: eu-central-1 cpu: "256" memory: "512" containers: - name: dplctrl memoryReservation: 256 essential: true image: "{{ IMAGE }}" secrets: - valueFrom: arn:aws:ssm:eu-central-1:1234567890:parameter/test.website.redis_password name: REDIS_PASSWORD portMappings: - containerPort: 8000 hostPort: 8000 healthCheck: command: - CMD-SHELL - /app/healthcheck.py interval: 60 retries: 3 startPeriod: 15 timeout: 15 logConfiguration: logDriver: awslogs options: awslogs-group: /deploy.test awslogs-region: eu-central-1 awslogs-stream-prefix: ecs register: output ``` > @aschereT Thank you for raising this. Would you be willing to open a PR to update the documentation and add an example? I made the issue because I didn't know what the format is in the first place... @markuman Thanks for an example! This is what I'm looking for! I also made a PR now that I know what the format looks like. https://github.com/ansible-collections/community.aws/pull/1610
2022-12-06T10:48:37
ansible-collections/community.aws
1,613
ansible-collections__community.aws-1613
[ "1560" ]
57a9e6f847ed1aa49e56e21def2a2d383c2e7228
diff --git a/plugins/modules/opensearch.py b/plugins/modules/opensearch.py --- a/plugins/modules/opensearch.py +++ b/plugins/modules/opensearch.py @@ -948,6 +948,7 @@ def set_advanced_security_options( ] = advanced_security_opts.get("internal_user_database_enabled") master_user_opts = advanced_security_opts.get("master_user_options") if master_user_opts is not None: + advanced_security_config.setdefault("MasterUserOptions", {}) if master_user_opts.get("master_user_arn") is not None: advanced_security_config["MasterUserOptions"][ "MasterUserARN"
diff --git a/tests/integration/targets/opensearch/tasks/test_opensearch.yml b/tests/integration/targets/opensearch/tasks/test_opensearch.yml --- a/tests/integration/targets/opensearch/tasks/test_opensearch.yml +++ b/tests/integration/targets/opensearch/tasks/test_opensearch.yml @@ -3,7 +3,7 @@ - name: test without specifying required module options opensearch: engine_version: "Elasticsearch_7.1" - ignore_errors: yes + ignore_errors: true register: result - name: assert domain_name is a required module option @@ -959,7 +959,7 @@ wait_timeout: "{{ 60 * 60 }}" register: opensearch_domain until: opensearch_domain is not failed - ignore_errors: yes + ignore_errors: true retries: 10 # After enabling at rest encryption, there is a period during which the API fails, so retry. delay: 30 @@ -996,6 +996,11 @@ - "opensearch_domain.domain_endpoint_options.tls_security_policy == 'Policy-Min-TLS-1-2-2019-07'" - opensearch_domain is changed +- name: Set common facts for advanced security tests + set_fact: + test_master_user_name: my_custom_admin_username + test_master_user_password: "{{ lookup('ansible.builtin.password', '/dev/null chars=ascii_lowercase,digits length=16') }}" + - name: Configure advanced security block: - name: Enable advanced security, check mode @@ -1003,6 +1008,10 @@ domain_name: "es-{{ tiny_prefix }}-vpc" advanced_security_options: enabled: true + internal_user_database_enabled: false + master_user_options: + master_user_name: "{{ test_master_user_name }}" + master_user_password: "{{ test_master_user_password }}" wait: true check_mode: true register: opensearch_domain @@ -1019,12 +1028,20 @@ domain_name: "es-{{ tiny_prefix }}-vpc" advanced_security_options: enabled: true + internal_user_database_enabled: false + master_user_options: + master_user_name: "{{ test_master_user_name }}" + master_user_password: "{{ test_master_user_password }}" wait: true wait_timeout: "{{ 60 * 60 }}" register: opensearch_domain - assert: that: - "opensearch_domain.advanced_security_options.enabled == True" + - "opensearch_domain.advanced_security_options.internal_user_database_enabled == False" + - "opensearch_domain.advanced_security_options.master_user_options is defined" + - "opensearch_domain.advanced_security_options.master_user_options.master_user_name is test_master_user_name" + - "opensearch_domain.advanced_security_options.master_user_options.master_user_password is test_master_user_password" - opensearch_domain is changed - name: Enable advanced security, check mode again @@ -1032,6 +1049,10 @@ domain_name: "es-{{ tiny_prefix }}-vpc" advanced_security_options: enabled: true + internal_user_database_enabled: false + master_user_options: + master_user_name: "{{ test_master_user_name }}" + master_user_password: "{{ test_master_user_password }}" wait: true check_mode: true register: opensearch_domain @@ -1044,11 +1065,19 @@ domain_name: "es-{{ tiny_prefix }}-vpc" advanced_security_options: enabled: true + internal_user_database_enabled: false + master_user_options: + master_user_name: "{{ test_master_user_name }}" + master_user_password: "{{ test_master_user_password }}" wait: true register: opensearch_domain - assert: that: - "opensearch_domain.advanced_security_options.enabled == True" + - "opensearch_domain.advanced_security_options.internal_user_database_enabled == False" + - "opensearch_domain.advanced_security_options.master_user_options is defined" + - "opensearch_domain.advanced_security_options.master_user_options.master_user_name is test_master_user_name" + - "opensearch_domain.advanced_security_options.master_user_options.master_user_password is test_master_user_password" - opensearch_domain is not changed - name: Configure warm and cold storage
Cannot create Elastic Search cluster using advanced security options ### Summary This is ansible fragment from the code to create cluster: ``` advanced_security_options: enabled: true internal_user_database_enabled: true master_user_options: master_user_name: "{{ opensearch_user }}" master_user_password: "{{ opensearch_password }}" ``` This is the error I get: File "/usr/local/Cellar/[email protected]/3.8.7/Frameworks/Python.framework/Versions/3.8/lib/python3.8/runpy.py", line 87, in _run_code exec(code, run_globals) File "/var/folders/4p/p4gsm16109d0p78txhvjc2mw0000gn/T/ansible_community.aws.opensearch_payload_23ucyoth/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py", line 1500, in <module> File "/var/folders/4p/p4gsm16109d0p78txhvjc2mw0000gn/T/ansible_community.aws.opensearch_payload_23ucyoth/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py", line 1494, in main File "/var/folders/4p/p4gsm16109d0p78txhvjc2mw0000gn/T/ansible_community.aws.opensearch_payload_23ucyoth/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py", line 1232, in ensure_domain_present File "/var/folders/4p/p4gsm16109d0p78txhvjc2mw0000gn/T/ansible_community.aws.opensearch_payload_23ucyoth/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py", line 956, in set_advanced_security_options KeyError: 'MasterUserOptions' I think the code is trying to access MasterUserOptions key w/o setting it empty dictionary first. ### Issue Type Bug Report ### Component Name community.aws.opensearch ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.12.4] config file = /Users/dima/GIT/devops/ansible/ansible.cfg configured module search path = ['/Users/dima/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.8/site-packages/ansible ansible collection location = /Users/dima/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.8.7 (default, Dec 30 2020, 10:14:55) [Clang 12.0.0 (clang-1200.0.32.28)] jinja version = 2.11.3 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list ``` # /Users/dima/.ansible/collections/ansible_collections Collection Version ------------- ------- amazon.aws 5.0.2 community.aws 5.0.0 # /usr/local/lib/python3.8/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 2.2.0 ansible.netcommon 2.6.1 ansible.posix 1.3.0 ansible.utils 2.5.2 ansible.windows 1.9.0 arista.eos 3.1.0 awx.awx 19.4.0 azure.azcollection 1.12.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.2.0 cisco.aci 2.2.0 cisco.asa 2.1.0 cisco.intersight 1.0.18 cisco.ios 2.8.1 cisco.iosxr 2.9.0 cisco.ise 1.2.1 cisco.meraki 2.6.1 cisco.mso 1.4.0 cisco.nso 1.0.3 cisco.nxos 2.9.1 cisco.ucs 1.8.0 cloud.common 2.1.0 cloudscale_ch.cloud 2.2.1 community.aws 2.4.0 community.azure 1.1.0 community.ciscosmb 1.0.4 community.crypto 2.2.4 community.digitalocean 1.16.0 community.dns 2.0.9 community.docker 2.3.0 community.fortios 1.0.0 community.general 4.7.0 community.google 1.0.0 community.grafana 1.3.3 community.hashi_vault 2.4.0 community.hrobot 1.2.3 community.kubernetes 2.0.1 community.kubevirt 1.0.0 community.libvirt 1.0.2 community.mongodb 1.3.3 community.mysql 2.3.5 community.network 3.1.0 community.okd 2.1.0 community.postgresql 1.7.1 community.proxysql 1.3.1 community.rabbitmq 1.1.0 community.routeros 2.0.0 community.sap 1.0.0 community.skydive 1.0.0 community.sops 1.2.1 community.vmware 1.18.0 community.windows 1.9.0 community.zabbix 1.5.1 containers.podman 1.9.3 cyberark.conjur 1.1.0 cyberark.pas 1.0.13 dellemc.enterprise_sonic 1.1.0 dellemc.openmanage 4.4.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.15.0 fortinet.fortimanager 2.1.4 fortinet.fortios 2.1.4 frr.frr 1.0.3 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.6.0 hpe.nimble 1.1.4 ibm.qradar 1.0.3 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.2.1 inspur.sm 1.3.0 junipernetworks.junos 2.10.0 kubernetes.core 2.3.0 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.15.0 netapp.elementsw 21.7.0 netapp.ontap 21.17.3 netapp.storagegrid 21.10.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.0 netbox.netbox 3.6.0 ngine_io.cloudstack 2.2.3 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.1 openstack.cloud 1.7.2 openvswitch.openvswitch 2.1.0 ovirt.ovirt 1.6.6 purestorage.flasharray 1.12.1 purestorage.flashblade 1.9.0 sensu.sensu_go 1.13.0 servicenow.servicenow 1.0.6 splunk.es 1.0.2 t_systems_mms.icinga_director 1.28.0 theforeman.foreman 2.2.0 vyos.vyos 2.8.0 wti.remote 1.0.3 office:ansible dima$ ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore ``` office:ansible dima$ pip3 show boto boto3 botocore Name: boto Version: 2.49.0 Summary: Amazon Web Services Library Home-page: https://github.com/boto/boto/ Author: Mitch Garnaat Author-email: [email protected] License: MIT Location: /usr/local/lib/python3.8/site-packages Requires: Required-by: --- Name: boto3 Version: 1.24.89 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: None License: Apache License 2.0 Location: /usr/local/lib/python3.8/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.27.89 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: None License: Apache License 2.0 Location: /usr/local/lib/python3.8/site-packages Requires: python-dateutil, urllib3, jmespath Required-by: s3transfer, boto3 office:ansible dima$ ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment _No response_ ### Steps to Reproduce Just create the cluster with teh advanced security: advanced_security_options: enabled: true internal_user_database_enabled: true master_user_options: master_user_name: "{{ opensearch_user }}" master_user_password: "{{ opensearch_password }}" ### Expected Results Ansible should not crash ### Actual Results ```console (paste below) ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Hello, same issue here: ``` - name: Create OpenSearch domain for dev environment, no zone awareness, no dedicated masters community.aws.opensearch: domain_name: "{{ domain_name }}" engine_version: Elasticsearch_7.10 cluster_config: instance_type: "t2.small.search" instance_count: 2 zone_awareness: false dedicated_master: false ebs_options: ebs_enabled: true volume_type: "gp2" volume_size: 10 advanced_security_options: enabled: true internal_user_database_enabled: false master_user_options: master_user_name: myusername master_user_password: asecurepassword ``` Fails with: ``` KeyError: 'MasterUserOptions' fatal: [localhost]: FAILED! => {"changed": false, "module_stderr": "Traceback (most recent call last):\n File \"<stdin>\", line 107, in <module>\n File \"<stdin>\", line 99, in _ansiballz_main\n File \"<stdin>\", line 47, in invoke_module\n File \"/Users/Giovanni.Toraldo/.pyenv/versions/3.9.13/lib/python3.9/runpy.py\", line 225, in run_module\n return _run_module_code(code, init_globals, run_name, mod_spec)\n File \"/Users/Giovanni.Toraldo/.pyenv/versions/3.9.13/lib/python3.9/runpy.py\", line 97, in _run_module_code\n _run_code(code, mod_globals, init_globals,\n File \"/Users/Giovanni.Toraldo/.pyenv/versions/3.9.13/lib/python3.9/runpy.py\", line 87, in _run_code\n exec(code, run_globals)\n File \"/var/folders/l4/rr1s57q973ggg6ylq20bqy6h0000gq/T/ansible_community.aws.opensearch_payload_nvm9_wbt/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py\", line 1500, in <module>\n File \"/var/folders/l4/rr1s57q973ggg6ylq20bqy6h0000gq/T/ansible_community.aws.opensearch_payload_nvm9_wbt/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py\", line 1494, in main\n File \"/var/folders/l4/rr1s57q973ggg6ylq20bqy6h0000gq/T/ansible_community.aws.opensearch_payload_nvm9_wbt/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py\", line 1232, in ensure_domain_present\n File \"/var/folders/l4/rr1s57q973ggg6ylq20bqy6h0000gq/T/ansible_community.aws.opensearch_payload_nvm9_wbt/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py\", line 952, in set_advanced_security_options\nKeyError: 'MasterUserOptions'\n", "module_stdout": "", "msg": "MODULE FAILURE\nSee stdout/stderr for the exact error", "rc": 1} ``` @gionn @rogozind Thank you for reporting this. Would anyone be willing to to open a PR to fix this bug? I am not sure if this is the right fix but this patch solved it for me: ./community/aws/plugins/modules/opensearch.py: ``` master_user_opts = advanced_security_opts.get("master_user_options") if master_user_opts is not None: + advanced_security_config["MasterUserOptions"] = {} if master_user_opts.get("master_user_arn") is not None: advanced_security_config["MasterUserOptions"][ ```
2022-12-06T15:59:25
ansible-collections/community.aws
1,614
ansible-collections__community.aws-1614
[ "1560" ]
4a6c9af5d923e8a06b038bc941b3a55b1d23436f
diff --git a/plugins/modules/opensearch.py b/plugins/modules/opensearch.py --- a/plugins/modules/opensearch.py +++ b/plugins/modules/opensearch.py @@ -947,6 +947,7 @@ def set_advanced_security_options( ] = advanced_security_opts.get("internal_user_database_enabled") master_user_opts = advanced_security_opts.get("master_user_options") if master_user_opts is not None: + advanced_security_config.setdefault("MasterUserOptions", {}) if master_user_opts.get("master_user_arn") is not None: advanced_security_config["MasterUserOptions"][ "MasterUserARN"
diff --git a/tests/integration/targets/opensearch/tasks/test_opensearch.yml b/tests/integration/targets/opensearch/tasks/test_opensearch.yml --- a/tests/integration/targets/opensearch/tasks/test_opensearch.yml +++ b/tests/integration/targets/opensearch/tasks/test_opensearch.yml @@ -3,7 +3,7 @@ - name: test without specifying required module options opensearch: engine_version: "Elasticsearch_7.1" - ignore_errors: yes + ignore_errors: true register: result - name: assert domain_name is a required module option @@ -959,7 +959,7 @@ wait_timeout: "{{ 60 * 60 }}" register: opensearch_domain until: opensearch_domain is not failed - ignore_errors: yes + ignore_errors: true retries: 10 # After enabling at rest encryption, there is a period during which the API fails, so retry. delay: 30 @@ -996,6 +996,11 @@ - "opensearch_domain.domain_endpoint_options.tls_security_policy == 'Policy-Min-TLS-1-2-2019-07'" - opensearch_domain is changed +- name: Set common facts for advanced security tests + set_fact: + test_master_user_name: my_custom_admin_username + test_master_user_password: "{{ lookup('ansible.builtin.password', '/dev/null chars=ascii_lowercase,digits length=16') }}" + - name: Configure advanced security block: - name: Enable advanced security, check mode @@ -1003,6 +1008,10 @@ domain_name: "es-{{ tiny_prefix }}-vpc" advanced_security_options: enabled: true + internal_user_database_enabled: false + master_user_options: + master_user_name: "{{ test_master_user_name }}" + master_user_password: "{{ test_master_user_password }}" wait: true check_mode: true register: opensearch_domain @@ -1019,12 +1028,20 @@ domain_name: "es-{{ tiny_prefix }}-vpc" advanced_security_options: enabled: true + internal_user_database_enabled: false + master_user_options: + master_user_name: "{{ test_master_user_name }}" + master_user_password: "{{ test_master_user_password }}" wait: true wait_timeout: "{{ 60 * 60 }}" register: opensearch_domain - assert: that: - "opensearch_domain.advanced_security_options.enabled == True" + - "opensearch_domain.advanced_security_options.internal_user_database_enabled == False" + - "opensearch_domain.advanced_security_options.master_user_options is defined" + - "opensearch_domain.advanced_security_options.master_user_options.master_user_name is test_master_user_name" + - "opensearch_domain.advanced_security_options.master_user_options.master_user_password is test_master_user_password" - opensearch_domain is changed - name: Enable advanced security, check mode again @@ -1032,6 +1049,10 @@ domain_name: "es-{{ tiny_prefix }}-vpc" advanced_security_options: enabled: true + internal_user_database_enabled: false + master_user_options: + master_user_name: "{{ test_master_user_name }}" + master_user_password: "{{ test_master_user_password }}" wait: true check_mode: true register: opensearch_domain @@ -1044,11 +1065,19 @@ domain_name: "es-{{ tiny_prefix }}-vpc" advanced_security_options: enabled: true + internal_user_database_enabled: false + master_user_options: + master_user_name: "{{ test_master_user_name }}" + master_user_password: "{{ test_master_user_password }}" wait: true register: opensearch_domain - assert: that: - "opensearch_domain.advanced_security_options.enabled == True" + - "opensearch_domain.advanced_security_options.internal_user_database_enabled == False" + - "opensearch_domain.advanced_security_options.master_user_options is defined" + - "opensearch_domain.advanced_security_options.master_user_options.master_user_name is test_master_user_name" + - "opensearch_domain.advanced_security_options.master_user_options.master_user_password is test_master_user_password" - opensearch_domain is not changed - name: Configure warm and cold storage
Cannot create Elastic Search cluster using advanced security options ### Summary This is ansible fragment from the code to create cluster: ``` advanced_security_options: enabled: true internal_user_database_enabled: true master_user_options: master_user_name: "{{ opensearch_user }}" master_user_password: "{{ opensearch_password }}" ``` This is the error I get: File "/usr/local/Cellar/[email protected]/3.8.7/Frameworks/Python.framework/Versions/3.8/lib/python3.8/runpy.py", line 87, in _run_code exec(code, run_globals) File "/var/folders/4p/p4gsm16109d0p78txhvjc2mw0000gn/T/ansible_community.aws.opensearch_payload_23ucyoth/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py", line 1500, in <module> File "/var/folders/4p/p4gsm16109d0p78txhvjc2mw0000gn/T/ansible_community.aws.opensearch_payload_23ucyoth/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py", line 1494, in main File "/var/folders/4p/p4gsm16109d0p78txhvjc2mw0000gn/T/ansible_community.aws.opensearch_payload_23ucyoth/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py", line 1232, in ensure_domain_present File "/var/folders/4p/p4gsm16109d0p78txhvjc2mw0000gn/T/ansible_community.aws.opensearch_payload_23ucyoth/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py", line 956, in set_advanced_security_options KeyError: 'MasterUserOptions' I think the code is trying to access MasterUserOptions key w/o setting it empty dictionary first. ### Issue Type Bug Report ### Component Name community.aws.opensearch ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.12.4] config file = /Users/dima/GIT/devops/ansible/ansible.cfg configured module search path = ['/Users/dima/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.8/site-packages/ansible ansible collection location = /Users/dima/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.8.7 (default, Dec 30 2020, 10:14:55) [Clang 12.0.0 (clang-1200.0.32.28)] jinja version = 2.11.3 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list ``` # /Users/dima/.ansible/collections/ansible_collections Collection Version ------------- ------- amazon.aws 5.0.2 community.aws 5.0.0 # /usr/local/lib/python3.8/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 2.2.0 ansible.netcommon 2.6.1 ansible.posix 1.3.0 ansible.utils 2.5.2 ansible.windows 1.9.0 arista.eos 3.1.0 awx.awx 19.4.0 azure.azcollection 1.12.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.2.0 cisco.aci 2.2.0 cisco.asa 2.1.0 cisco.intersight 1.0.18 cisco.ios 2.8.1 cisco.iosxr 2.9.0 cisco.ise 1.2.1 cisco.meraki 2.6.1 cisco.mso 1.4.0 cisco.nso 1.0.3 cisco.nxos 2.9.1 cisco.ucs 1.8.0 cloud.common 2.1.0 cloudscale_ch.cloud 2.2.1 community.aws 2.4.0 community.azure 1.1.0 community.ciscosmb 1.0.4 community.crypto 2.2.4 community.digitalocean 1.16.0 community.dns 2.0.9 community.docker 2.3.0 community.fortios 1.0.0 community.general 4.7.0 community.google 1.0.0 community.grafana 1.3.3 community.hashi_vault 2.4.0 community.hrobot 1.2.3 community.kubernetes 2.0.1 community.kubevirt 1.0.0 community.libvirt 1.0.2 community.mongodb 1.3.3 community.mysql 2.3.5 community.network 3.1.0 community.okd 2.1.0 community.postgresql 1.7.1 community.proxysql 1.3.1 community.rabbitmq 1.1.0 community.routeros 2.0.0 community.sap 1.0.0 community.skydive 1.0.0 community.sops 1.2.1 community.vmware 1.18.0 community.windows 1.9.0 community.zabbix 1.5.1 containers.podman 1.9.3 cyberark.conjur 1.1.0 cyberark.pas 1.0.13 dellemc.enterprise_sonic 1.1.0 dellemc.openmanage 4.4.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.15.0 fortinet.fortimanager 2.1.4 fortinet.fortios 2.1.4 frr.frr 1.0.3 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.6.0 hpe.nimble 1.1.4 ibm.qradar 1.0.3 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.2.1 inspur.sm 1.3.0 junipernetworks.junos 2.10.0 kubernetes.core 2.3.0 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.15.0 netapp.elementsw 21.7.0 netapp.ontap 21.17.3 netapp.storagegrid 21.10.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.0 netbox.netbox 3.6.0 ngine_io.cloudstack 2.2.3 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.1 openstack.cloud 1.7.2 openvswitch.openvswitch 2.1.0 ovirt.ovirt 1.6.6 purestorage.flasharray 1.12.1 purestorage.flashblade 1.9.0 sensu.sensu_go 1.13.0 servicenow.servicenow 1.0.6 splunk.es 1.0.2 t_systems_mms.icinga_director 1.28.0 theforeman.foreman 2.2.0 vyos.vyos 2.8.0 wti.remote 1.0.3 office:ansible dima$ ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore ``` office:ansible dima$ pip3 show boto boto3 botocore Name: boto Version: 2.49.0 Summary: Amazon Web Services Library Home-page: https://github.com/boto/boto/ Author: Mitch Garnaat Author-email: [email protected] License: MIT Location: /usr/local/lib/python3.8/site-packages Requires: Required-by: --- Name: boto3 Version: 1.24.89 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: None License: Apache License 2.0 Location: /usr/local/lib/python3.8/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.27.89 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: None License: Apache License 2.0 Location: /usr/local/lib/python3.8/site-packages Requires: python-dateutil, urllib3, jmespath Required-by: s3transfer, boto3 office:ansible dima$ ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment _No response_ ### Steps to Reproduce Just create the cluster with teh advanced security: advanced_security_options: enabled: true internal_user_database_enabled: true master_user_options: master_user_name: "{{ opensearch_user }}" master_user_password: "{{ opensearch_password }}" ### Expected Results Ansible should not crash ### Actual Results ```console (paste below) ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Hello, same issue here: ``` - name: Create OpenSearch domain for dev environment, no zone awareness, no dedicated masters community.aws.opensearch: domain_name: "{{ domain_name }}" engine_version: Elasticsearch_7.10 cluster_config: instance_type: "t2.small.search" instance_count: 2 zone_awareness: false dedicated_master: false ebs_options: ebs_enabled: true volume_type: "gp2" volume_size: 10 advanced_security_options: enabled: true internal_user_database_enabled: false master_user_options: master_user_name: myusername master_user_password: asecurepassword ``` Fails with: ``` KeyError: 'MasterUserOptions' fatal: [localhost]: FAILED! => {"changed": false, "module_stderr": "Traceback (most recent call last):\n File \"<stdin>\", line 107, in <module>\n File \"<stdin>\", line 99, in _ansiballz_main\n File \"<stdin>\", line 47, in invoke_module\n File \"/Users/Giovanni.Toraldo/.pyenv/versions/3.9.13/lib/python3.9/runpy.py\", line 225, in run_module\n return _run_module_code(code, init_globals, run_name, mod_spec)\n File \"/Users/Giovanni.Toraldo/.pyenv/versions/3.9.13/lib/python3.9/runpy.py\", line 97, in _run_module_code\n _run_code(code, mod_globals, init_globals,\n File \"/Users/Giovanni.Toraldo/.pyenv/versions/3.9.13/lib/python3.9/runpy.py\", line 87, in _run_code\n exec(code, run_globals)\n File \"/var/folders/l4/rr1s57q973ggg6ylq20bqy6h0000gq/T/ansible_community.aws.opensearch_payload_nvm9_wbt/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py\", line 1500, in <module>\n File \"/var/folders/l4/rr1s57q973ggg6ylq20bqy6h0000gq/T/ansible_community.aws.opensearch_payload_nvm9_wbt/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py\", line 1494, in main\n File \"/var/folders/l4/rr1s57q973ggg6ylq20bqy6h0000gq/T/ansible_community.aws.opensearch_payload_nvm9_wbt/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py\", line 1232, in ensure_domain_present\n File \"/var/folders/l4/rr1s57q973ggg6ylq20bqy6h0000gq/T/ansible_community.aws.opensearch_payload_nvm9_wbt/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py\", line 952, in set_advanced_security_options\nKeyError: 'MasterUserOptions'\n", "module_stdout": "", "msg": "MODULE FAILURE\nSee stdout/stderr for the exact error", "rc": 1} ``` @gionn @rogozind Thank you for reporting this. Would anyone be willing to to open a PR to fix this bug? I am not sure if this is the right fix but this patch solved it for me: ./community/aws/plugins/modules/opensearch.py: ``` master_user_opts = advanced_security_opts.get("master_user_options") if master_user_opts is not None: + advanced_security_config["MasterUserOptions"] = {} if master_user_opts.get("master_user_arn") is not None: advanced_security_config["MasterUserOptions"][ ``` @rogozind looks good! I've opened a PR with that change if you don't mind
2022-12-07T10:42:39
ansible-collections/community.aws
1,615
ansible-collections__community.aws-1615
[ "1560" ]
f192ae7c7042ab05ae40586306216922a1e88acb
diff --git a/plugins/modules/opensearch.py b/plugins/modules/opensearch.py --- a/plugins/modules/opensearch.py +++ b/plugins/modules/opensearch.py @@ -948,6 +948,7 @@ def set_advanced_security_options( ] = advanced_security_opts.get("internal_user_database_enabled") master_user_opts = advanced_security_opts.get("master_user_options") if master_user_opts is not None: + advanced_security_config.setdefault("MasterUserOptions", {}) if master_user_opts.get("master_user_arn") is not None: advanced_security_config["MasterUserOptions"][ "MasterUserARN"
diff --git a/tests/integration/targets/opensearch/tasks/test_opensearch.yml b/tests/integration/targets/opensearch/tasks/test_opensearch.yml --- a/tests/integration/targets/opensearch/tasks/test_opensearch.yml +++ b/tests/integration/targets/opensearch/tasks/test_opensearch.yml @@ -3,7 +3,7 @@ - name: test without specifying required module options opensearch: engine_version: "Elasticsearch_7.1" - ignore_errors: yes + ignore_errors: true register: result - name: assert domain_name is a required module option @@ -959,7 +959,7 @@ wait_timeout: "{{ 60 * 60 }}" register: opensearch_domain until: opensearch_domain is not failed - ignore_errors: yes + ignore_errors: true retries: 10 # After enabling at rest encryption, there is a period during which the API fails, so retry. delay: 30 @@ -996,6 +996,11 @@ - "opensearch_domain.domain_endpoint_options.tls_security_policy == 'Policy-Min-TLS-1-2-2019-07'" - opensearch_domain is changed +- name: Set common facts for advanced security tests + set_fact: + test_master_user_name: my_custom_admin_username + test_master_user_password: "{{ lookup('ansible.builtin.password', '/dev/null chars=ascii_lowercase,digits length=16') }}" + - name: Configure advanced security block: - name: Enable advanced security, check mode @@ -1003,6 +1008,10 @@ domain_name: "es-{{ tiny_prefix }}-vpc" advanced_security_options: enabled: true + internal_user_database_enabled: false + master_user_options: + master_user_name: "{{ test_master_user_name }}" + master_user_password: "{{ test_master_user_password }}" wait: true check_mode: true register: opensearch_domain @@ -1019,12 +1028,20 @@ domain_name: "es-{{ tiny_prefix }}-vpc" advanced_security_options: enabled: true + internal_user_database_enabled: false + master_user_options: + master_user_name: "{{ test_master_user_name }}" + master_user_password: "{{ test_master_user_password }}" wait: true wait_timeout: "{{ 60 * 60 }}" register: opensearch_domain - assert: that: - "opensearch_domain.advanced_security_options.enabled == True" + - "opensearch_domain.advanced_security_options.internal_user_database_enabled == False" + - "opensearch_domain.advanced_security_options.master_user_options is defined" + - "opensearch_domain.advanced_security_options.master_user_options.master_user_name is test_master_user_name" + - "opensearch_domain.advanced_security_options.master_user_options.master_user_password is test_master_user_password" - opensearch_domain is changed - name: Enable advanced security, check mode again @@ -1032,6 +1049,10 @@ domain_name: "es-{{ tiny_prefix }}-vpc" advanced_security_options: enabled: true + internal_user_database_enabled: false + master_user_options: + master_user_name: "{{ test_master_user_name }}" + master_user_password: "{{ test_master_user_password }}" wait: true check_mode: true register: opensearch_domain @@ -1044,11 +1065,19 @@ domain_name: "es-{{ tiny_prefix }}-vpc" advanced_security_options: enabled: true + internal_user_database_enabled: false + master_user_options: + master_user_name: "{{ test_master_user_name }}" + master_user_password: "{{ test_master_user_password }}" wait: true register: opensearch_domain - assert: that: - "opensearch_domain.advanced_security_options.enabled == True" + - "opensearch_domain.advanced_security_options.internal_user_database_enabled == False" + - "opensearch_domain.advanced_security_options.master_user_options is defined" + - "opensearch_domain.advanced_security_options.master_user_options.master_user_name is test_master_user_name" + - "opensearch_domain.advanced_security_options.master_user_options.master_user_password is test_master_user_password" - opensearch_domain is not changed - name: Configure warm and cold storage
Cannot create Elastic Search cluster using advanced security options ### Summary This is ansible fragment from the code to create cluster: ``` advanced_security_options: enabled: true internal_user_database_enabled: true master_user_options: master_user_name: "{{ opensearch_user }}" master_user_password: "{{ opensearch_password }}" ``` This is the error I get: File "/usr/local/Cellar/[email protected]/3.8.7/Frameworks/Python.framework/Versions/3.8/lib/python3.8/runpy.py", line 87, in _run_code exec(code, run_globals) File "/var/folders/4p/p4gsm16109d0p78txhvjc2mw0000gn/T/ansible_community.aws.opensearch_payload_23ucyoth/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py", line 1500, in <module> File "/var/folders/4p/p4gsm16109d0p78txhvjc2mw0000gn/T/ansible_community.aws.opensearch_payload_23ucyoth/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py", line 1494, in main File "/var/folders/4p/p4gsm16109d0p78txhvjc2mw0000gn/T/ansible_community.aws.opensearch_payload_23ucyoth/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py", line 1232, in ensure_domain_present File "/var/folders/4p/p4gsm16109d0p78txhvjc2mw0000gn/T/ansible_community.aws.opensearch_payload_23ucyoth/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py", line 956, in set_advanced_security_options KeyError: 'MasterUserOptions' I think the code is trying to access MasterUserOptions key w/o setting it empty dictionary first. ### Issue Type Bug Report ### Component Name community.aws.opensearch ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.12.4] config file = /Users/dima/GIT/devops/ansible/ansible.cfg configured module search path = ['/Users/dima/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.8/site-packages/ansible ansible collection location = /Users/dima/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.8.7 (default, Dec 30 2020, 10:14:55) [Clang 12.0.0 (clang-1200.0.32.28)] jinja version = 2.11.3 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list ``` # /Users/dima/.ansible/collections/ansible_collections Collection Version ------------- ------- amazon.aws 5.0.2 community.aws 5.0.0 # /usr/local/lib/python3.8/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 2.2.0 ansible.netcommon 2.6.1 ansible.posix 1.3.0 ansible.utils 2.5.2 ansible.windows 1.9.0 arista.eos 3.1.0 awx.awx 19.4.0 azure.azcollection 1.12.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.2.0 cisco.aci 2.2.0 cisco.asa 2.1.0 cisco.intersight 1.0.18 cisco.ios 2.8.1 cisco.iosxr 2.9.0 cisco.ise 1.2.1 cisco.meraki 2.6.1 cisco.mso 1.4.0 cisco.nso 1.0.3 cisco.nxos 2.9.1 cisco.ucs 1.8.0 cloud.common 2.1.0 cloudscale_ch.cloud 2.2.1 community.aws 2.4.0 community.azure 1.1.0 community.ciscosmb 1.0.4 community.crypto 2.2.4 community.digitalocean 1.16.0 community.dns 2.0.9 community.docker 2.3.0 community.fortios 1.0.0 community.general 4.7.0 community.google 1.0.0 community.grafana 1.3.3 community.hashi_vault 2.4.0 community.hrobot 1.2.3 community.kubernetes 2.0.1 community.kubevirt 1.0.0 community.libvirt 1.0.2 community.mongodb 1.3.3 community.mysql 2.3.5 community.network 3.1.0 community.okd 2.1.0 community.postgresql 1.7.1 community.proxysql 1.3.1 community.rabbitmq 1.1.0 community.routeros 2.0.0 community.sap 1.0.0 community.skydive 1.0.0 community.sops 1.2.1 community.vmware 1.18.0 community.windows 1.9.0 community.zabbix 1.5.1 containers.podman 1.9.3 cyberark.conjur 1.1.0 cyberark.pas 1.0.13 dellemc.enterprise_sonic 1.1.0 dellemc.openmanage 4.4.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.15.0 fortinet.fortimanager 2.1.4 fortinet.fortios 2.1.4 frr.frr 1.0.3 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.6.0 hpe.nimble 1.1.4 ibm.qradar 1.0.3 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.2.1 inspur.sm 1.3.0 junipernetworks.junos 2.10.0 kubernetes.core 2.3.0 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.15.0 netapp.elementsw 21.7.0 netapp.ontap 21.17.3 netapp.storagegrid 21.10.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.0 netbox.netbox 3.6.0 ngine_io.cloudstack 2.2.3 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.1 openstack.cloud 1.7.2 openvswitch.openvswitch 2.1.0 ovirt.ovirt 1.6.6 purestorage.flasharray 1.12.1 purestorage.flashblade 1.9.0 sensu.sensu_go 1.13.0 servicenow.servicenow 1.0.6 splunk.es 1.0.2 t_systems_mms.icinga_director 1.28.0 theforeman.foreman 2.2.0 vyos.vyos 2.8.0 wti.remote 1.0.3 office:ansible dima$ ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore ``` office:ansible dima$ pip3 show boto boto3 botocore Name: boto Version: 2.49.0 Summary: Amazon Web Services Library Home-page: https://github.com/boto/boto/ Author: Mitch Garnaat Author-email: [email protected] License: MIT Location: /usr/local/lib/python3.8/site-packages Requires: Required-by: --- Name: boto3 Version: 1.24.89 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: None License: Apache License 2.0 Location: /usr/local/lib/python3.8/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.27.89 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: None License: Apache License 2.0 Location: /usr/local/lib/python3.8/site-packages Requires: python-dateutil, urllib3, jmespath Required-by: s3transfer, boto3 office:ansible dima$ ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment _No response_ ### Steps to Reproduce Just create the cluster with teh advanced security: advanced_security_options: enabled: true internal_user_database_enabled: true master_user_options: master_user_name: "{{ opensearch_user }}" master_user_password: "{{ opensearch_password }}" ### Expected Results Ansible should not crash ### Actual Results ```console (paste below) ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Hello, same issue here: ``` - name: Create OpenSearch domain for dev environment, no zone awareness, no dedicated masters community.aws.opensearch: domain_name: "{{ domain_name }}" engine_version: Elasticsearch_7.10 cluster_config: instance_type: "t2.small.search" instance_count: 2 zone_awareness: false dedicated_master: false ebs_options: ebs_enabled: true volume_type: "gp2" volume_size: 10 advanced_security_options: enabled: true internal_user_database_enabled: false master_user_options: master_user_name: myusername master_user_password: asecurepassword ``` Fails with: ``` KeyError: 'MasterUserOptions' fatal: [localhost]: FAILED! => {"changed": false, "module_stderr": "Traceback (most recent call last):\n File \"<stdin>\", line 107, in <module>\n File \"<stdin>\", line 99, in _ansiballz_main\n File \"<stdin>\", line 47, in invoke_module\n File \"/Users/Giovanni.Toraldo/.pyenv/versions/3.9.13/lib/python3.9/runpy.py\", line 225, in run_module\n return _run_module_code(code, init_globals, run_name, mod_spec)\n File \"/Users/Giovanni.Toraldo/.pyenv/versions/3.9.13/lib/python3.9/runpy.py\", line 97, in _run_module_code\n _run_code(code, mod_globals, init_globals,\n File \"/Users/Giovanni.Toraldo/.pyenv/versions/3.9.13/lib/python3.9/runpy.py\", line 87, in _run_code\n exec(code, run_globals)\n File \"/var/folders/l4/rr1s57q973ggg6ylq20bqy6h0000gq/T/ansible_community.aws.opensearch_payload_nvm9_wbt/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py\", line 1500, in <module>\n File \"/var/folders/l4/rr1s57q973ggg6ylq20bqy6h0000gq/T/ansible_community.aws.opensearch_payload_nvm9_wbt/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py\", line 1494, in main\n File \"/var/folders/l4/rr1s57q973ggg6ylq20bqy6h0000gq/T/ansible_community.aws.opensearch_payload_nvm9_wbt/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py\", line 1232, in ensure_domain_present\n File \"/var/folders/l4/rr1s57q973ggg6ylq20bqy6h0000gq/T/ansible_community.aws.opensearch_payload_nvm9_wbt/ansible_community.aws.opensearch_payload.zip/ansible_collections/community/aws/plugins/modules/opensearch.py\", line 952, in set_advanced_security_options\nKeyError: 'MasterUserOptions'\n", "module_stdout": "", "msg": "MODULE FAILURE\nSee stdout/stderr for the exact error", "rc": 1} ``` @gionn @rogozind Thank you for reporting this. Would anyone be willing to to open a PR to fix this bug? I am not sure if this is the right fix but this patch solved it for me: ./community/aws/plugins/modules/opensearch.py: ``` master_user_opts = advanced_security_opts.get("master_user_options") if master_user_opts is not None: + advanced_security_config["MasterUserOptions"] = {} if master_user_opts.get("master_user_arn") is not None: advanced_security_config["MasterUserOptions"][ ``` @rogozind looks good! I've opened a PR with that change if you don't mind
2022-12-07T10:42:55
ansible-collections/community.aws
1,623
ansible-collections__community.aws-1623
[ "1203" ]
bdb7c9f26f6ff39654cd90e2dd18605a6e3b026c
diff --git a/plugins/modules/ecs_ecr.py b/plugins/modules/ecs_ecr.py --- a/plugins/modules/ecs_ecr.py +++ b/plugins/modules/ecs_ecr.py @@ -85,6 +85,24 @@ default: false type: bool version_added: 1.3.0 + encryption_configuration: + description: + - The encryption configuration for the repository. + required: false + suboptions: + encryption_type: + description: + - The encryption type to use. + choices: [AES256, KMS] + default: 'AES256' + type: str + kms_key: + description: + - If I(encryption_type=KMS), specify the KMS key to use for encryption. + - The alias, key ID, or full ARN of the KMS key can be specified. + type: str + type: dict + version_added: 5.2.0 author: - David M. Lee (@leedm777) extends_documentation_fragment: @@ -161,6 +179,13 @@ community.aws.ecs_ecr: name: needs-no-lifecycle-policy purge_lifecycle_policy: true + +- name: set-encryption-configuration + community.aws.ecs_ecr: + name: uses-custom-kms-key + encryption_configuration: + encryption_type: KMS + kms_key: custom-kms-key-alias ''' RETURN = ''' @@ -201,6 +226,7 @@ except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible.module_utils.six import string_types from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule @@ -248,17 +274,21 @@ def get_repository_policy(self, registry_id, name): except is_boto3_error_code(['RepositoryNotFoundException', 'RepositoryPolicyNotFoundException']): return None - def create_repository(self, registry_id, name, image_tag_mutability): + def create_repository(self, registry_id, name, image_tag_mutability, encryption_configuration): if registry_id: default_registry_id = self.sts.get_caller_identity().get('Account') if registry_id != default_registry_id: raise Exception('Cannot create repository in registry {0}.' 'Would be created in {1} instead.'.format(registry_id, default_registry_id)) + if encryption_configuration is None: + encryption_configuration = dict(encryptionType='AES256') + if not self.check_mode: repo = self.ecr.create_repository( repositoryName=name, - imageTagMutability=image_tag_mutability).get('repository') + imageTagMutability=image_tag_mutability, + encryptionConfiguration=encryption_configuration).get('repository') self.changed = True return repo else: @@ -411,6 +441,7 @@ def run(ecr, params): lifecycle_policy_text = params['lifecycle_policy'] purge_lifecycle_policy = params['purge_lifecycle_policy'] scan_on_push = params['scan_on_push'] + encryption_configuration = snake_dict_to_camel_dict(params['encryption_configuration']) # Parse policies, if they are given try: @@ -437,10 +468,16 @@ def run(ecr, params): result['created'] = False if not repo: - repo = ecr.create_repository(registry_id, name, image_tag_mutability) + repo = ecr.create_repository( + registry_id, name, image_tag_mutability, encryption_configuration) result['changed'] = True result['created'] = True else: + if encryption_configuration is not None: + if repo.get('encryptionConfiguration') != encryption_configuration: + result['msg'] = 'Cannot modify repository encryption type' + return False, result + repo = ecr.put_image_tag_mutability(registry_id, name, image_tag_mutability) result['repository'] = repo @@ -550,7 +587,18 @@ def main(): purge_policy=dict(required=False, type='bool'), lifecycle_policy=dict(required=False, type='json'), purge_lifecycle_policy=dict(required=False, type='bool'), - scan_on_push=(dict(required=False, type='bool', default=False)) + scan_on_push=(dict(required=False, type='bool', default=False)), + encryption_configuration=dict( + required=False, + type='dict', + options=dict( + encryption_type=dict(required=False, type='str', default='AES256', choices=['AES256', 'KMS']), + kms_key=dict(required=False, type='str', no_log=False), + ), + required_if=[ + ['encryption_type', 'KMS', ['kms_key']], + ], + ), ) mutually_exclusive = [ ['policy', 'purge_policy'],
diff --git a/tests/integration/targets/ecs_ecr/tasks/main.yml b/tests/integration/targets/ecs_ecr/tasks/main.yml --- a/tests/integration/targets/ecs_ecr/tasks/main.yml +++ b/tests/integration/targets/ecs_ecr/tasks/main.yml @@ -10,6 +10,24 @@ - set_fact: ecr_name: '{{ resource_prefix }}-ecr' + - name: get ARN of calling user + aws_caller_info: + register: aws_caller_info + + - name: create KMS key for testing + aws_kms: + alias: "{{ resource_prefix }}-ecr" + description: a key used for testing ECR + state: present + enabled: yes + key_spec: SYMMETRIC_DEFAULT + key_usage: ENCRYPT_DECRYPT + policy: "{{ lookup('template', 'kms_policy.j2') }}" + tags: + Name: "{{ resource_prefix }}-ecr" + AnsibleTest: AnsibleTestVpc + register: kms_test_key + - name: When creating with check mode ecs_ecr: name: '{{ ecr_name }}' @@ -54,6 +72,11 @@ that: - result.repository.imageTagMutability == "MUTABLE" + - name: it should use AES256 encryption by default + assert: + that: + - result.repository.encryptionConfiguration.encryptionType == "AES256" + - name: When pulling an existing repository that has no existing policy ecs_ecr: name: '{{ ecr_name }}' @@ -538,9 +561,52 @@ - result is changed - not result.repository.imageScanningConfiguration.scanOnPush + - name: When modifying the encryption setting of an existing repository + ecs_ecr: + name: '{{ ecr_name }}' + encryption_configuration: + encryption_type: KMS + kms_key: '{{ kms_test_key.key_arn }}' + register: result + ignore_errors: true + + - name: it should fail + assert: + that: + - result is failed + + - name: delete repository + ecs_ecr: + name: '{{ ecr_name }}' + state: absent + + - name: When creating a repo using KMS encryption + ecs_ecr: + name: '{{ ecr_name }}' + encryption_configuration: + encryption_type: KMS + kms_key: '{{ kms_test_key.key_arn }}' + register: result + + - name: it should create the repo and use KMS encryption + assert: + that: + - result is changed + - result.repository.encryptionConfiguration.encryptionType == "KMS" + + - name: it should use the provided KMS key + assert: + that: + - result.repository.encryptionConfiguration.kmsKey == '{{ kms_test_key.key_arn }}' + always: - name: Delete lingering ECR repository ecs_ecr: name: '{{ ecr_name }}' state: absent + + - name: Delete KMS key + aws_kms: + key_id: '{{ kms_test_key.key_arn }}' + state: absent diff --git a/tests/integration/targets/ecs_ecr/templates/kms_policy.j2 b/tests/integration/targets/ecs_ecr/templates/kms_policy.j2 new file mode 100644 --- /dev/null +++ b/tests/integration/targets/ecs_ecr/templates/kms_policy.j2 @@ -0,0 +1,72 @@ +{ + "Id": "key-ansible-test-policy-123", + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Allow access for root user", + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::{{ aws_caller_info.account }}:root" + }, + "Action": "kms:*", + "Resource": "*" + }, + { + "Sid": "Allow access for calling user", + "Effect": "Allow", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": [ + "kms:Create*", + "kms:Describe*", + "kms:Enable*", + "kms:List*", + "kms:Put*", + "kms:Update*", + "kms:Revoke*", + "kms:Disable*", + "kms:Get*", + "kms:Delete*", + "kms:TagResource", + "kms:UntagResource", + "kms:ScheduleKeyDeletion", + "kms:CancelKeyDeletion" + ], + "Resource": "*" + }, + { + "Sid": "Allow use of the key", + "Effect": "Allow", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": [ + "kms:Encrypt", + "kms:Decrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + "kms:DescribeKey" + ], + "Resource": "*" + }, + { + "Sid": "Allow attachment of persistent resources", + "Effect": "Allow", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": [ + "kms:CreateGrant", + "kms:ListGrants", + "kms:RevokeGrant" + ], + "Resource": "*", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": "true" + } + } + } + ] +}
ecs_ecr - support for specifying KMS key ### Summary Unless I'm missing something, I don't see a way to specify KMS key usage in this module. Is it possible to integrate this? ### Issue Type Feature Idea ### Component Name ecs_ecr ### Additional Information <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: None If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> Yes. It's possible. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecr.html But only on create. It's not changeable once the ECR is created. Files identified in the description: * [`plugins/modules/ecs_ecr.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/ecs_ecr.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @leedm777 @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> @markuman > Yes. It's possible. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecr.html > > But only on create. It's not changeable once the ECR is created. I have this working to some extent, but is it possible to pass variables to it, in an Ansible role? Currently, it only works if I hard code the values in the client.create_repository function within the module. I would like to do something like this: ``` - name: create repo from custom module ecr_kms_module: repositoryName: "{{ repo_name }}" encryptionConfiguration.kmsKey: "{{ kms_key }}" ``` > @markuman > > > Yes. It's possible. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecr.html > > But only on create. It's not changeable once the ECR is created. > > I have this working to some extent, but is it possible to pass variables to it, in an Ansible role? Currently, it only works if I hard code the values in the client.create_repository function within the module. I would like to do something like this: > > ``` > - name: create repo from custom module > ecr_kms_module: > repositoryName: "{{ repo_name }}" > encryptionConfiguration.kmsKey: "{{ kms_key }}" > ``` Yes, it should work just out of the box. Maybe something is wrong with your key-name decision and handling. I suggest not to create a new module for this task. Instead patch the existing `ecs_ecr` module and add just the missing `kms_key` parameter. If the ecr already exists, the `kms_key` parameter can be ignored, because it's not changeable. Maybe through a warning if the requestes key differs from the existing key. Are you willing to prepare a PR @GreNIX ?
2022-12-20T18:00:18
ansible-collections/community.aws
1,627
ansible-collections__community.aws-1627
[ "1471" ]
99978ef51ce1372d2f36b501b084b2bf54381073
diff --git a/plugins/modules/ssm_parameter.py b/plugins/modules/ssm_parameter.py --- a/plugins/modules/ssm_parameter.py +++ b/plugins/modules/ssm_parameter.py @@ -383,7 +383,7 @@ def create_update_parameter(client, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="getting description value") - if describe_existing_parameter['Description'] != args['Description']: + if describe_existing_parameter.get('Description') != args['Description']: (changed, response) = update_parameter(client, module, **args) if changed: _wait_updated(client, module, module.params.get('name'), original_version)
diff --git a/tests/integration/targets/ssm_parameter/tasks/main.yml b/tests/integration/targets/ssm_parameter/tasks/main.yml --- a/tests/integration/targets/ssm_parameter/tasks/main.yml +++ b/tests/integration/targets/ssm_parameter/tasks/main.yml @@ -415,6 +415,30 @@ that: - result is not changed + - name: Create key/value pair in aws parameter store with no description + aws_ssm_parameter_store: + name: '{{ simple_name }}' + value: '{{ simple_value }}' + register: result + + - assert: + that: + - result is changed + - '"description" not in result.parameter_metadata' + + - name: Add a description + aws_ssm_parameter_store: + name: '{{ simple_name }}' + value: '{{ simple_value }}' + description: '{{ simple_description }}' + register: result + + - assert: + that: + - result is changed + - '"description" in result.parameter_metadata' + - result.parameter_metadata.description == simple_description + always: # ============================================================ - name: Delete remaining key/value pairs in aws parameter store
aws_ssm_parameter_store update fails if no description element was defined before ### Summary `aws_ssm_parameter_store` update fails if no description element was defined before. If an SSM parameter has an empty description already in SSM, then the AWS API (I'm guessing `aws ssm describe-parameters`) doesn't include the `description` element in the response JSON at all for such item, causing issues in `community.aws. aws_ssm_parameter_store` handling of such reply (Description element no defined). ### Issue Type Bug Report ### Component Name ssm_parameter ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.12.6] config file = None configured module search path = ['/home/circleci/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/circleci/.local/lib/python3.10/site-packages/ansible ansible collection location = /home/circleci/.ansible/collections:/usr/share/ansible/collections executable location = /home/circleci/.local/bin/ansible python version = 3.10.4 (main, Apr 2 2022, 09:04:19) [GCC 11.2.0] jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /home/circleci/.local/lib/python3.10/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 2.3.0 ansible.netcommon 2.6.1 ansible.posix 1.4.0 ansible.utils 2.6.1 ansible.windows 1.10.0 arista.eos 3.1.0 awx.awx 19.4.0 azure.azcollection 1.13.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.2.0 cisco.aci 2.2.0 cisco.asa 2.1.0 cisco.dnac 6.4.0 cisco.intersight 1.0.19 cisco.ios 2.8.1 cisco.iosxr 2.9.0 cisco.ise 1.2.1 cisco.meraki 2.6.2 cisco.mso 1.4.0 cisco.nso 1.0.3 cisco.nxos 2.9.1 cisco.ucs 1.8.0 cloud.common 2.1.1 cloudscale_ch.cloud 2.2.2 community.aws 2.5.0 community.azure 1.1.0 community.ciscosmb 1.0.5 community.crypto 2.3.2 community.digitalocean 1.19.0 community.dns 2.2.0 community.docker 2.6.0 community.fortios 1.0.0 community.general 4.8.2 community.google 1.0.0 community.grafana 1.4.0 community.hashi_vault 2.5.0 community.hrobot 1.4.0 community.kubernetes 2.0.1 community.kubevirt 1.0.0 community.libvirt 1.1.0 community.mongodb 1.4.0 community.mysql 2.3.8 community.network 3.3.0 community.okd 2.2.0 community.postgresql 1.7.4 community.proxysql 1.4.0 community.rabbitmq 1.2.1 community.routeros 2.1.0 community.sap 1.0.0 community.sap_libs 1.1.0 community.skydive 1.0.0 community.sops 1.2.2 community.vmware 1.18.0 community.windows 1.10.0 community.zabbix 1.7.0 containers.podman 1.9.3 cyberark.conjur 1.1.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 1.1.1 dellemc.openmanage 4.4.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.17.0 fortinet.fortimanager 2.1.5 fortinet.fortios 2.1.6 frr.frr 1.0.4 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.6.0 hpe.nimble 1.1.4 ibm.qradar 1.0.3 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.2.2 inspur.sm 1.3.0 junipernetworks.junos 2.10.0 kubernetes.core 2.3.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.17.0 netapp.elementsw 21.7.0 netapp.ontap 21.19.1 netapp.storagegrid 21.10.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.0 netbox.netbox 3.7.1 ngine_io.cloudstack 2.2.4 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.1 openstack.cloud 1.8.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 1.6.6 purestorage.flasharray 1.13.0 purestorage.flashblade 1.9.0 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 1.0.2 t_systems_mms.icinga_director 1.29.0 theforeman.foreman 2.2.0 vmware.vmware_rest 2.1.5 vyos.vyos 2.8.0 wti.remote 1.0.3 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore WARNING: Package(s) not found: boto Name: boto3 Version: 1.24.73 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /home/circleci/.local/lib/python3.10/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.27.73 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /home/circleci/.local/lib/python3.10/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment Ubuntu 22.04 ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) - name: "SSM" ssm_parameter: name: "mytest" string_type: "String" value: "myvalue" state: "present" overwrite_value: "changed" - name: "SSM" ssm_parameter: name: "mytest" string_type: "String" value: "myvalue" state: "present" description: "this_should_fail" overwrite_value: "changed" ``` ### Expected Results Second command successfully adding the description element. ### Actual Results Second command fails due to description being empty in SSM. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: None If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> Files identified in the description: * [`plugins/modules/ssm_parameter.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/ssm_parameter.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @116davinder @jillr @markuman @mikedlr @nathanwebsterdotme @ozbillwang @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> Potential solution is probably to consider `Description` an empty string if the Description element doesn't exist in the response: https://github.com/ansible-collections/community.aws/blob/3043bca5d3544a40b78f22e0ece084dde8324530/plugins/modules/ssm_parameter.py#L325 ``` if 'Description' not in existing_parameter['Parameters'][0]: existing_parameter['Parameters'][0]['Description'] = '' ``` @cergfix, It would be really nice if you can run above reproduce steps with `-vvvv` and paste the output to pin-point error location.
2022-12-21T19:24:09
ansible-collections/community.aws
1,628
ansible-collections__community.aws-1628
[ "1626", "1626" ]
88872b39fba1b979780d260ea5b0f9f5dcd5aeae
diff --git a/plugins/modules/secretsmanager_secret.py b/plugins/modules/secretsmanager_secret.py --- a/plugins/modules/secretsmanager_secret.py +++ b/plugins/modules/secretsmanager_secret.py @@ -29,6 +29,14 @@ default: 'present' choices: ['present', 'absent'] type: str + overwrite: + description: + - Whether to overwrite an existing secret with the same name. + - If set to C(True), an existing secret with the same I(name) will be overwritten. + - If set to C(False), a secret with the given I(name) will only be created if none exists. + type: bool + default: True + version_added: 5.3.0 recovery_window: description: - Only used if state is absent. @@ -130,6 +138,14 @@ state: absent secret_type: 'string' secret: "{{ super_secret_string }}" + +- name: Only create a new secret, but do not update if alredy exists by name + community.aws.secretsmanager_secret: + name: 'random_string' + state: present + secret_type: 'string' + secret: "{{ lookup('community.general.random_string', length=16, special=false) }}" + overwrite: false ''' RETURN = r''' @@ -524,6 +540,7 @@ def main(): argument_spec={ 'name': dict(required=True), 'state': dict(choices=['present', 'absent'], default='present'), + 'overwrite': dict(type='bool', default=True), 'description': dict(default=""), 'replica': dict(type='list', elements='dict', options=replica_args), 'kms_key_id': dict(), @@ -580,12 +597,15 @@ def main(): result = secrets_mgr.put_resource_policy(secret) changed = True else: + # current_secret exists; decide what to do with it if current_secret.get("DeletedDate"): secrets_mgr.restore_secret(secret.name) changed = True if not secrets_mgr.secrets_match(secret, current_secret): - result = secrets_mgr.update_secret(secret) - changed = True + overwrite = module.params.get('overwrite') + if overwrite: + result = secrets_mgr.update_secret(secret) + changed = True if not rotation_match(secret, current_secret): result = secrets_mgr.update_rotation(secret) changed = True
diff --git a/tests/integration/targets/secretsmanager_secret/tasks/basic.yml b/tests/integration/targets/secretsmanager_secret/tasks/basic.yml --- a/tests/integration/targets/secretsmanager_secret/tasks/basic.yml +++ b/tests/integration/targets/secretsmanager_secret/tasks/basic.yml @@ -688,6 +688,68 @@ that: - result is not changed + # ============================================================ + # Overwrite testing + # ============================================================ + + - name: Create secret with overwrite = False (Check mode) + aws_secret: + name: "{{ secret_name }}-2" + state: present + secret_type: 'string' + secret: "{{ super_secret_string }}" + overwrite: False + register: result + check_mode: True + + - name: assert key is changed + assert: + that: + - result is changed + + - name: Create secret with overwrite = False + aws_secret: + name: "{{ secret_name }}-2" + state: present + secret_type: 'string' + secret: "{{ super_secret_string }}" + overwrite: False + register: result + + - name: assert key is changed + assert: + that: + - result is changed + + - name: Update secret with overwrite = False (Check mode) + aws_secret: + name: "{{ secret_name }}-2" + state: present + secret_type: 'string' + secret: "{{ super_secret_string }}-2" + overwrite: False + register: result + check_mode: True + + - name: assert key is not changed + assert: + that: + - result is not changed + + - name: Create secret with overwrite = False + aws_secret: + name: "{{ secret_name }}-2" + state: present + secret_type: 'string' + secret: "{{ super_secret_string }}-2" + overwrite: False + register: result + + - name: assert key is not changed + assert: + that: + - result is not changed + # ============================================================ # Removal testing # ============================================================ @@ -749,3 +811,10 @@ state: absent recovery_window: 0 ignore_errors: yes + + - name: remove secret 2 + aws_secret: + name: "{{ secret_name }}-2" + state: absent + recovery_window: 0 + ignore_errors: yes
secretsmanager_secret module should not overwrite an existing Secret ### Summary The [`community.aws.secretsmanager_secret`](https://docs.ansible.com/ansible/latest/collections/community/aws/secretsmanager_secret_module.html) module currently offers no option to *not* overwrite a Secret if it exists with the same name but a different value. This forces the user to first try to check if the Secret exists and then skip the task if it does. It will simply call `secrets_mgr.update_secret(secret)` and overwrite the existing one. If the intended Secret value itself is, for example, a random password, the option to only lookup that the Secret exists (but not that its values match) would be a nice feature. Relevant code: https://github.com/ansible-collections/community.aws/blob/99978ef51ce1372d2f36b501b084b2bf54381073/plugins/modules/secretsmanager_secret.py#L479 ### Issue Type Feature Idea ### Component Name secretsmanager_secret ### Additional Information ```yaml - name: Try to retrive existing elastic secrets from AWS Secrets Manager ansible.builtin.set_fact: elastic_user_password: "{{ lookup('amazon.aws.aws_secret', clustername + '/' + elastic_namespace + '.elastic-user-password', nested=true, region=region, on_missing='error') }}" kibana_client_secret: "{{ lookup('amazon.aws.aws_secret', clustername + '/' + elastic_namespace + '.keycloak-secret', nested=true, region=region, on_missing='error') }}" register: secrets_found ignore_errors: true - name: Create elastic user password and Keycloak AWS secrets if necessary community.aws.secretsmanager_secret: name: "{{ clustername }}/{{ elastic_namespace }}" description: Elastic secrets for {{ elastic_namespace }} state: present secret_type: "string" json_secret: { "elastic-user-password": "{{ lookup('community.general.random_string', length=16, special=false) }}", "keycloak-secret": "{{ lookup('community.general.random_string', length=16, special=false) }}" } region: "{{ region }}" when: secrets_found is failed ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct secretsmanager_secret module should not overwrite an existing Secret ### Summary The [`community.aws.secretsmanager_secret`](https://docs.ansible.com/ansible/latest/collections/community/aws/secretsmanager_secret_module.html) module currently offers no option to *not* overwrite a Secret if it exists with the same name but a different value. This forces the user to first try to check if the Secret exists and then skip the task if it does. It will simply call `secrets_mgr.update_secret(secret)` and overwrite the existing one. If the intended Secret value itself is, for example, a random password, the option to only lookup that the Secret exists (but not that its values match) would be a nice feature. Relevant code: https://github.com/ansible-collections/community.aws/blob/99978ef51ce1372d2f36b501b084b2bf54381073/plugins/modules/secretsmanager_secret.py#L479 ### Issue Type Feature Idea ### Component Name secretsmanager_secret ### Additional Information ```yaml - name: Try to retrive existing elastic secrets from AWS Secrets Manager ansible.builtin.set_fact: elastic_user_password: "{{ lookup('amazon.aws.aws_secret', clustername + '/' + elastic_namespace + '.elastic-user-password', nested=true, region=region, on_missing='error') }}" kibana_client_secret: "{{ lookup('amazon.aws.aws_secret', clustername + '/' + elastic_namespace + '.keycloak-secret', nested=true, region=region, on_missing='error') }}" register: secrets_found ignore_errors: true - name: Create elastic user password and Keycloak AWS secrets if necessary community.aws.secretsmanager_secret: name: "{{ clustername }}/{{ elastic_namespace }}" description: Elastic secrets for {{ elastic_namespace }} state: present secret_type: "string" json_secret: { "elastic-user-password": "{{ lookup('community.general.random_string', length=16, special=false) }}", "keycloak-secret": "{{ lookup('community.general.random_string', length=16, special=false) }}" } region: "{{ region }}" when: secrets_found is failed ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`lib/ansible/plugins/lookup`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/lib/ansible/plugins/lookup) * [`plugins/modules/secretsmanager_secret.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/secretsmanager_secret.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @markuman @rrey @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> Files identified in the description: * [`lib/ansible/plugins/lookup`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/lib/ansible/plugins/lookup) * [`plugins/modules/secretsmanager_secret.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/secretsmanager_secret.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @markuman @rrey @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify --->
2022-12-21T19:31:52
ansible-collections/community.aws
1,629
ansible-collections__community.aws-1629
[ "1624" ]
99978ef51ce1372d2f36b501b084b2bf54381073
diff --git a/plugins/modules/s3_lifecycle.py b/plugins/modules/s3_lifecycle.py --- a/plugins/modules/s3_lifecycle.py +++ b/plugins/modules/s3_lifecycle.py @@ -467,38 +467,40 @@ def create_lifecycle_rule(client, module): (changed, lifecycle_configuration) = compare_and_update_configuration(client, module, old_lifecycle_rules, new_rule) - - # Write lifecycle to bucket - try: - client.put_bucket_lifecycle_configuration( - aws_retry=True, - Bucket=name, - LifecycleConfiguration=lifecycle_configuration) - except is_boto3_error_message('At least one action needs to be specified in a rule'): - # Amazon interpretted this as not changing anything - changed = False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules) - - _changed = changed - _retries = 10 - _not_changed_cnt = 6 - while wait and _changed and _retries and _not_changed_cnt: - # We've seen examples where get_bucket_lifecycle_configuration returns - # the updated rules, then the old rules, then the updated rules again and - # again couple of times. - # Thus try to read the rule few times in a row to check if it has changed. - time.sleep(5) - _retries -= 1 - new_rules = fetch_rules(client, module, name) - (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, - new_rules, - new_rule) - if not _changed: - _not_changed_cnt -= 1 - _changed = True - else: - _not_changed_cnt = 6 + if changed: + # Write lifecycle to bucket + try: + client.put_bucket_lifecycle_configuration( + aws_retry=True, + Bucket=name, + LifecycleConfiguration=lifecycle_configuration) + except is_boto3_error_message('At least one action needs to be specified in a rule'): + # Amazon interpretted this as not changing anything + changed = False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules) + + _changed = changed + _retries = 10 + _not_changed_cnt = 6 + while wait and _changed and _retries and _not_changed_cnt: + # We've seen examples where get_bucket_lifecycle_configuration returns + # the updated rules, then the old rules, then the updated rules again and + # again couple of times. + # Thus try to read the rule few times in a row to check if it has changed. + time.sleep(5) + _retries -= 1 + new_rules = fetch_rules(client, module, name) + (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, + new_rules, + new_rule) + if not _changed: + _not_changed_cnt -= 1 + _changed = True + else: + _not_changed_cnt = 6 + else: + _retries = 0 new_rules = fetch_rules(client, module, name) @@ -521,36 +523,39 @@ def destroy_lifecycle_rule(client, module): current_lifecycle_rules = fetch_rules(client, module, name) changed, lifecycle_obj = compare_and_remove_rule(current_lifecycle_rules, rule_id, prefix) - # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration - try: - if lifecycle_obj['Rules']: - client.put_bucket_lifecycle_configuration( - aws_retry=True, - Bucket=name, - LifecycleConfiguration=lifecycle_obj) - elif current_lifecycle_rules: - changed = True - client.delete_bucket_lifecycle(aws_retry=True, Bucket=name) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - - _changed = changed - _retries = 10 - _not_changed_cnt = 6 - while wait and _changed and _retries and _not_changed_cnt: - # We've seen examples where get_bucket_lifecycle_configuration returns - # the updated rules, then the old rules, then the updated rules again and - # again couple of times. - # Thus try to read the rule few times in a row to check if it has changed. - time.sleep(5) - _retries -= 1 - new_rules = fetch_rules(client, module, name) - (_changed, lifecycle_configuration) = compare_and_remove_rule(new_rules, rule_id, prefix) - if not _changed: - _not_changed_cnt -= 1 - _changed = True - else: - _not_changed_cnt = 6 + if changed: + # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration + try: + if lifecycle_obj['Rules']: + client.put_bucket_lifecycle_configuration( + aws_retry=True, + Bucket=name, + LifecycleConfiguration=lifecycle_obj) + elif current_lifecycle_rules: + changed = True + client.delete_bucket_lifecycle(aws_retry=True, Bucket=name) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) + + _changed = changed + _retries = 10 + _not_changed_cnt = 6 + while wait and _changed and _retries and _not_changed_cnt: + # We've seen examples where get_bucket_lifecycle_configuration returns + # the updated rules, then the old rules, then the updated rules again and + # again couple of times. + # Thus try to read the rule few times in a row to check if it has changed. + time.sleep(5) + _retries -= 1 + new_rules = fetch_rules(client, module, name) + (_changed, lifecycle_configuration) = compare_and_remove_rule(new_rules, rule_id, prefix) + if not _changed: + _not_changed_cnt -= 1 + _changed = True + else: + _not_changed_cnt = 6 + else: + _retries = 0 new_rules = fetch_rules(client, module, name)
s3_lifecycle is not idempotent - does write action for no change ### Summary When `s3_lifecycle` is run and there are no changes to make, it still calls [`put_bucket_lifecycle_configuration`](https://github.com/ansible-collections/community.aws/blob/bdb7c9f26f6ff39654cd90e2dd18605a6e3b026c/plugins/modules/s3_lifecycle.py#L473). My use case is that I am running a playbook multiple times concurrently, for a lifecycle configuration which is not changing. And I'm getting errors because of concurrency clashes. If I'm not changing the lifecycle, I expect only read-only calls to S3, which shouldn't clash. This module should get the existing lifecycle config, compare it to what we want, and only if it differs, put the new lifecycle. ### Issue Type Bug Report ### Component Name s3_lifecycle ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.13.6] config file = /home/ec2-user/.ansible.cfg configured module search path = ['/home/ec2-user/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/ec2-user/.pyenv/versions/3.8.11/lib/python3.8/site-packages/ansible ansible collection location = /home/ec2-user/.ansible/collections:/usr/share/ansible/collections executable location = /home/ec2-user/.pyenv/versions/3.8.11/bin/ansible python version = 3.8.11 (default, Sep 7 2022, 04:17:12) [GCC 7.3.1 20180712 (Red Hat 7.3.1-15)] jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) # /home/ec2-user/.pyenv/versions/3.8.11/lib/python3.8/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 3.5.0 ansible.netcommon 3.1.3 ansible.posix 1.4.0 ansible.utils 2.7.0 ansible.windows 1.12.0 arista.eos 5.0.1 awx.awx 21.8.0 azure.azcollection 1.14.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.3.1 cisco.aci 2.3.0 cisco.asa 3.1.0 cisco.dnac 6.6.0 cisco.intersight 1.0.20 cisco.ios 3.3.2 cisco.iosxr 3.3.1 cisco.ise 2.5.8 cisco.meraki 2.11.0 cisco.mso 2.1.0 cisco.nso 1.0.3 cisco.nxos 3.2.0 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.2 community.aws 3.6.0 community.azure 1.1.0 community.ciscosmb 1.0.5 community.crypto 2.8.1 community.digitalocean 1.22.0 community.dns 2.4.0 community.docker 2.7.1 community.fortios 1.0.0 community.general 5.8.0 community.google 1.0.0 community.grafana 1.5.3 community.hashi_vault 3.4.0 community.hrobot 1.6.0 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 4.0.1 community.okd 2.2.0 community.postgresql 2.3.0 community.proxysql 1.4.0 community.rabbitmq 1.2.3 community.routeros 2.3.1 community.sap 1.0.0 community.sap_libs 1.3.0 community.skydive 1.0.0 community.sops 1.4.1 community.vmware 2.10.1 community.windows 1.11.1 community.zabbix 1.8.0 containers.podman 1.9.4 cyberark.conjur 1.2.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 1.1.2 dellemc.openmanage 5.5.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.20.0 fortinet.fortimanager 2.1.6 fortinet.fortios 2.1.7 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.8.2 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.10.0 infinidat.infinibox 1.3.7 infoblox.nios_modules 1.4.0 inspur.ispim 1.2.0 inspur.sm 2.3.0 junipernetworks.junos 3.1.0 kubernetes.core 2.3.2 lowlydba.sqlserver 1.0.4 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.21.0 netapp.elementsw 21.7.0 netapp.ontap 21.24.1 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.1 netbox.netbox 3.8.1 ngine_io.cloudstack 2.2.4 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.3.1 purestorage.flasharray 1.14.0 purestorage.flashblade 1.10.0 purestorage.fusion 1.1.1 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 2.1.0 t_systems_mms.icinga_director 1.31.4 theforeman.foreman 3.7.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.3.0 vyos.vyos 3.0.1 wti.remote 1.0.4 # /home/ec2-user/.ansible/collections/ansible_collections Collection Version ----------------- ------- amazon.aws 5.1.0 ansible.netcommon 4.1.0 ansible.utils 2.8.0 community.aws 5.0.0 community.crypto 2.9.0 community.general 6.0.1 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ANSIBLE_PIPELINING(/home/ec2-user/.ansible.cfg) = True DEFAULT_LOCAL_TMP(/home/ec2-user/.ansible.cfg) = /dev/shm/ansible/tmp_local/ansible-local-24375r2_prrsj DEFAULT_STDOUT_CALLBACK(/home/ec2-user/.ansible.cfg) = yaml INTERPRETER_PYTHON(/home/ec2-user/.ansible.cfg) = /usr/bin/python3 ``` ### OS / Environment Amazon Linux 2 ### Steps to Reproduce `playbook.yaml` ``` --- - hosts: myhosts connection: local gather_facts: no vars: bucket: mybucket region: ap-southeast-2 rule_name: "my_rule" tasks: - name: create bucket run_once: true s3_bucket: state: present region: "{{ region }}" name: "{{ bucket }}" encryption: "AES256" tags: person: matt delete_after: "21/12/2022" - name: Add lifecycle config once run_once: true community.aws.s3_lifecycle: rule_id: "{{ rule_name }}" name: "{{ bucket }}" noncurrent_version_storage_class: standard_ia noncurrent_version_transition_days: 30 # minimum state: present status: enabled region: "{{ region }}" wait: True - name: Add lifecycle config many times run_once: False community.aws.s3_lifecycle: rule_id: "{{ rule_name }}" name: "{{ bucket }}" noncurrent_version_storage_class: standard_ia noncurrent_version_transition_days: 30 # minimum state: present status: enabled region: "{{ region }}" wait: True ``` `hosts.yaml` ``` myhosts: hosts: a: {} b: {} c: {} d: {} e: {} f: {} g: {} h: {} i: {} j: {} k: {} ``` Run with: ``` ansible-playbook playbook.yaml -i hosts.yaml -e ansible_python_interpreter=$(which python3) ``` ### Expected Results By the time we get to the last task, the bucket already has the lifecycle config we want. So the last tasks should also report success (no change), without throwing any errors. boto3 should only be used for read-only calls. No put call should be made by Ansible. ### Actual Results ``` PLAY [myhosts] ***************************************************************************************** TASK [create bucket] *********************************************************************************** changed: [a] TASK [Add lifecycle config once] *********************************************************************** changed: [a] TASK [Add lifecycle config many times] ***************************************************************** An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again. fatal: [c]: FAILED! => changed=false boto3_version: 1.24.82 botocore_version: 1.27.82 error: code: OperationAborted message: A conflicting conditional operation is currently in progress against this resource. Please try again. lifecycle_configuration: Rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled msg: 'An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again.' name: mybucket old_lifecycle_rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled response_metadata: host_id: Lf1tcMXZfFFbYqA4HnEu/Dbii3iAFeMpWzkN2GJ9RN/7H/KiqSYCqvQZWKrYVCEQ3/oiuNJtuyeW3qbWsTuPBg== http_headers: content-length: '308' content-type: application/xml date: Wed, 21 Dec 2022 06:33:34 GMT server: AmazonS3 x-amz-id-2: Lf1tcMXZfFFbYqA4HnEu/Dbii3iAFeMpWzkN2GJ9RN/7H/KiqSYCqvQZWKrYVCEQ3/oiuNJtuyeW3qbWsTuPBg== x-amz-request-id: X05KXWBXVB1FKJAY http_status_code: 409 request_id: X05KXWBXVB1FKJAY retry_attempts: 0 An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again. fatal: [e]: FAILED! => changed=false boto3_version: 1.24.82 botocore_version: 1.27.82 error: code: OperationAborted message: A conflicting conditional operation is currently in progress against this resource. Please try again. lifecycle_configuration: Rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled msg: 'An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again.' name: mybucket old_lifecycle_rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled response_metadata: host_id: 66BDcsa1gA2Sqn+HgKWnb0tst7Pp4KeRulVfOw0k41+El39THSbqbMC5qMuZaP3d8lV/2Od6ik/DBttggxai9g== http_headers: content-length: '308' content-type: application/xml date: Wed, 21 Dec 2022 06:33:34 GMT server: AmazonS3 x-amz-id-2: 66BDcsa1gA2Sqn+HgKWnb0tst7Pp4KeRulVfOw0k41+El39THSbqbMC5qMuZaP3d8lV/2Od6ik/DBttggxai9g== x-amz-request-id: X05ZGKE17DWTPNHA http_status_code: 409 request_id: X05ZGKE17DWTPNHA retry_attempts: 0 ok: [d] An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again. fatal: [a]: FAILED! => changed=false boto3_version: 1.24.82 botocore_version: 1.27.82 error: code: OperationAborted message: A conflicting conditional operation is currently in progress against this resource. Please try again. lifecycle_configuration: Rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled msg: 'An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again.' name: mybucket old_lifecycle_rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled response_metadata: host_id: jtEllYwZAVnS4V98eCIvffmBdiQajEMM6XgKTOrTYZ9wnfBk3C3yFa/QicPRTHmW+ljgLGdKMCqI5ExhvTId1w== http_headers: content-length: '308' content-type: application/xml date: Wed, 21 Dec 2022 06:33:34 GMT server: AmazonS3 x-amz-id-2: jtEllYwZAVnS4V98eCIvffmBdiQajEMM6XgKTOrTYZ9wnfBk3C3yFa/QicPRTHmW+ljgLGdKMCqI5ExhvTId1w== x-amz-request-id: X05Q23R8Y5KEWD4P http_status_code: 409 request_id: X05Q23R8Y5KEWD4P retry_attempts: 0 ok: [b] An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again. fatal: [f]: FAILED! => changed=false boto3_version: 1.24.82 botocore_version: 1.27.82 error: code: OperationAborted message: A conflicting conditional operation is currently in progress against this resource. Please try again. lifecycle_configuration: Rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled msg: 'An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again.' name: mybucket old_lifecycle_rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled response_metadata: host_id: JzIHlPYjCIlIa+o88FYvcEvFBKCQDdo75C0Mdwcr6ZQCHdP2hkEetTKdCqVe0m+fi2RcPMpXwqNN4JBTcoactQ== http_headers: content-length: '308' content-type: application/xml date: Wed, 21 Dec 2022 06:33:35 GMT server: AmazonS3 x-amz-id-2: JzIHlPYjCIlIa+o88FYvcEvFBKCQDdo75C0Mdwcr6ZQCHdP2hkEetTKdCqVe0m+fi2RcPMpXwqNN4JBTcoactQ== x-amz-request-id: 3FV4FJPW4MPJZTW3 http_status_code: 409 request_id: 3FV4FJPW4MPJZTW3 retry_attempts: 0 An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again. ... PLAY RECAP ********************************************************************************************* a : ok=2 changed=2 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 b : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 c : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 d : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 e : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 f : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 g : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 h : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 i : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 j : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 k : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 ``` i.e. some reported success, with no change. Others threw an error. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/s3_lifecycle.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/s3_lifecycle.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @markuman @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> @mdavis-xyz Can you debug e.g. with `q` and see what the value of the `changed` variable is, two lines above? https://github.com/ansible-collections/community.aws/blob/bdb7c9f26f6ff39654cd90e2dd18605a6e3b026c/plugins/modules/s3_lifecycle.py#L467 ```py import q q(changed) ``` and `cat /tmp/q` after it is executed. I've never heard of the `q` library before. Is this how you're supposed to debug Ansible modules? I've always struggled to debug code changes I've written, because even print statements don't work. We should add this to the contribution docs for this collection, and Ansible in general. Just to be clear, regardless of what `compare_and_update_configuration` does, `put_bucket_lifecycle_configuration` will always be called. https://github.com/ansible-collections/community.aws/blob/bdb7c9f26f6ff39654cd90e2dd18605a6e3b026c/plugins/modules/s3_lifecycle.py#L467-L476 There's really two issues here. 1. the module calls put when it shouldn't 2. the module reports changed=False after calling put For the second one, we can fix that with `changed |= True` in an `else` after that `try`. For the first one, perhaps everything after `compare_and_update_configuration` is called should be inside an `if changed`? I'll try the `q` thing later today. It will take me a while because figuring out how to run a clone of a module, without polluting my already-installed modules is not something that I find obvious nor easy. Ok I couldn't figure out how to run a playbook using a local clone of the module, without messing with my real global installation. (Are there docs for that somewhere? As an Ansible user I never need to touch galaxy or anything like that, because I only use the standard pre-installed collections.) So I just created a whole new VM to test in, and modified the file in the globally installed collection. `/tmp/q` is: ``` 0.3s create_lifecycle_rule: True 1.0s create_lifecycle_rule: False 1.4s create_lifecycle_rule: False 1.1s create_lifecycle_rule: False 1.2s create_lifecycle_rule: False 1.1s create_lifecycle_rule: False 1.0s create_lifecycle_rule: False 1.2s create_lifecycle_rule: False 1.1s create_lifecycle_rule: False 1.1s create_lifecycle_rule: False 0.7s create_lifecycle_rule: False 0.4s create_lifecycle_rule: False ``` So it was True the first time, as expected, and False the remainder, as expected. I tried wrapping up the put and try inside an `if` statement. That worked as expected. Now the MWE passes. (Not sure how to handle `_retries`) ``` (changed, lifecycle_configuration) = compare_and_update_configuration(client, module, old_lifecycle_rules, new_rule) if changed: # Write lifecycle to bucket try: client.put_bucket_lifecycle_configuration( aws_retry=True, Bucket=name, LifecycleConfiguration=lifecycle_configuration) except is_boto3_error_message('At least one action needs to be specified in a rule'): # Amazon interpretted this as not changing anything changed = False except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules) _changed = changed _retries = 10 while wait and _changed and _retries: # We've seen examples where get_bucket_lifecycle_configuration returns # the updated rules, then the old rules, then the updated rules again, time.sleep(5) _retries -= 1 new_rules = fetch_rules(client, module, name) (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, new_rules, new_rule) else: _retries=0 new_rules = fetch_rules(client, module, name) module.exit_json(changed=changed, new_rule=new_rule, rules=new_rules, old_rules=old_lifecycle_rules, _retries=_retries, _config=lifecycle_configuration) ``` What's the best way to add a unit/integration test for this? My MWE uses multiple hosts. Is that easy to do with the existing test setup? Or is there a way to run with a `loop` concurrently on one host? > Ok I couldn't figure out how to run a playbook using a local clone of the module, without messing with my real global installation. (Are there docs for that somewhere?) Yeah, basically you can also place hacky/patched modules in your roles/playbook directory in the `library` folder. The only thing you must change than is to call `s3_lifecycle:` instead of `community.aws.s3_lifecycle:` See https://docs.ansible.com/ansible/2.8/user_guide/playbooks_best_practices.html#directory-layout > library/ # if any custom modules, put them here (optional) > What's the best way to add a unit/integration test for this? My MWE uses multiple hosts. Is that easy to do with the existing test setup? Or is there a way to run with a loop concurrently on one host? Maybe @goneri or @tremble got an idea about testing.
2022-12-22T05:26:17
ansible-collections/community.aws
1,630
ansible-collections__community.aws-1630
[ "1471" ]
b24520678f18aed53fa61cc226c67fc16ed5b755
diff --git a/plugins/modules/aws_ssm_parameter_store.py b/plugins/modules/aws_ssm_parameter_store.py --- a/plugins/modules/aws_ssm_parameter_store.py +++ b/plugins/modules/aws_ssm_parameter_store.py @@ -380,7 +380,7 @@ def create_update_parameter(client, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="getting description value") - if describe_existing_parameter['Description'] != args['Description']: + if describe_existing_parameter.get('Description') != args['Description']: (changed, response) = update_parameter(client, module, **args) if changed: _wait_updated(client, module, module.params.get('name'), original_version)
diff --git a/tests/integration/targets/aws_ssm_parameter_store/tasks/main.yml b/tests/integration/targets/aws_ssm_parameter_store/tasks/main.yml --- a/tests/integration/targets/aws_ssm_parameter_store/tasks/main.yml +++ b/tests/integration/targets/aws_ssm_parameter_store/tasks/main.yml @@ -415,6 +415,30 @@ that: - result is not changed + - name: Create key/value pair in aws parameter store with no description + aws_ssm_parameter_store: + name: '{{ simple_name }}' + value: '{{ simple_value }}' + register: result + + - assert: + that: + - result is changed + - '"description" not in result.parameter_metadata' + + - name: Add a description + aws_ssm_parameter_store: + name: '{{ simple_name }}' + value: '{{ simple_value }}' + description: '{{ simple_description }}' + register: result + + - assert: + that: + - result is changed + - '"description" in result.parameter_metadata' + - result.parameter_metadata.description == simple_description + always: # ============================================================ - name: Delete remaining key/value pairs in aws parameter store
aws_ssm_parameter_store update fails if no description element was defined before ### Summary `aws_ssm_parameter_store` update fails if no description element was defined before. If an SSM parameter has an empty description already in SSM, then the AWS API (I'm guessing `aws ssm describe-parameters`) doesn't include the `description` element in the response JSON at all for such item, causing issues in `community.aws. aws_ssm_parameter_store` handling of such reply (Description element no defined). ### Issue Type Bug Report ### Component Name ssm_parameter ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.12.6] config file = None configured module search path = ['/home/circleci/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/circleci/.local/lib/python3.10/site-packages/ansible ansible collection location = /home/circleci/.ansible/collections:/usr/share/ansible/collections executable location = /home/circleci/.local/bin/ansible python version = 3.10.4 (main, Apr 2 2022, 09:04:19) [GCC 11.2.0] jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /home/circleci/.local/lib/python3.10/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 2.3.0 ansible.netcommon 2.6.1 ansible.posix 1.4.0 ansible.utils 2.6.1 ansible.windows 1.10.0 arista.eos 3.1.0 awx.awx 19.4.0 azure.azcollection 1.13.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.2.0 cisco.aci 2.2.0 cisco.asa 2.1.0 cisco.dnac 6.4.0 cisco.intersight 1.0.19 cisco.ios 2.8.1 cisco.iosxr 2.9.0 cisco.ise 1.2.1 cisco.meraki 2.6.2 cisco.mso 1.4.0 cisco.nso 1.0.3 cisco.nxos 2.9.1 cisco.ucs 1.8.0 cloud.common 2.1.1 cloudscale_ch.cloud 2.2.2 community.aws 2.5.0 community.azure 1.1.0 community.ciscosmb 1.0.5 community.crypto 2.3.2 community.digitalocean 1.19.0 community.dns 2.2.0 community.docker 2.6.0 community.fortios 1.0.0 community.general 4.8.2 community.google 1.0.0 community.grafana 1.4.0 community.hashi_vault 2.5.0 community.hrobot 1.4.0 community.kubernetes 2.0.1 community.kubevirt 1.0.0 community.libvirt 1.1.0 community.mongodb 1.4.0 community.mysql 2.3.8 community.network 3.3.0 community.okd 2.2.0 community.postgresql 1.7.4 community.proxysql 1.4.0 community.rabbitmq 1.2.1 community.routeros 2.1.0 community.sap 1.0.0 community.sap_libs 1.1.0 community.skydive 1.0.0 community.sops 1.2.2 community.vmware 1.18.0 community.windows 1.10.0 community.zabbix 1.7.0 containers.podman 1.9.3 cyberark.conjur 1.1.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 1.1.1 dellemc.openmanage 4.4.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.17.0 fortinet.fortimanager 2.1.5 fortinet.fortios 2.1.6 frr.frr 1.0.4 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.6.0 hpe.nimble 1.1.4 ibm.qradar 1.0.3 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.2.2 inspur.sm 1.3.0 junipernetworks.junos 2.10.0 kubernetes.core 2.3.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.17.0 netapp.elementsw 21.7.0 netapp.ontap 21.19.1 netapp.storagegrid 21.10.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.0 netbox.netbox 3.7.1 ngine_io.cloudstack 2.2.4 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.1 openstack.cloud 1.8.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 1.6.6 purestorage.flasharray 1.13.0 purestorage.flashblade 1.9.0 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 1.0.2 t_systems_mms.icinga_director 1.29.0 theforeman.foreman 2.2.0 vmware.vmware_rest 2.1.5 vyos.vyos 2.8.0 wti.remote 1.0.3 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore WARNING: Package(s) not found: boto Name: boto3 Version: 1.24.73 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /home/circleci/.local/lib/python3.10/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.27.73 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /home/circleci/.local/lib/python3.10/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment Ubuntu 22.04 ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) - name: "SSM" ssm_parameter: name: "mytest" string_type: "String" value: "myvalue" state: "present" overwrite_value: "changed" - name: "SSM" ssm_parameter: name: "mytest" string_type: "String" value: "myvalue" state: "present" description: "this_should_fail" overwrite_value: "changed" ``` ### Expected Results Second command successfully adding the description element. ### Actual Results Second command fails due to description being empty in SSM. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: None If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> Files identified in the description: * [`plugins/modules/ssm_parameter.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/ssm_parameter.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @116davinder @jillr @markuman @mikedlr @nathanwebsterdotme @ozbillwang @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> Potential solution is probably to consider `Description` an empty string if the Description element doesn't exist in the response: https://github.com/ansible-collections/community.aws/blob/3043bca5d3544a40b78f22e0ece084dde8324530/plugins/modules/ssm_parameter.py#L325 ``` if 'Description' not in existing_parameter['Parameters'][0]: existing_parameter['Parameters'][0]['Description'] = '' ``` @cergfix, It would be really nice if you can run above reproduce steps with `-vvvv` and paste the output to pin-point error location. ```paste below The full traceback is: Traceback (most recent call last): ... File "/tmp/ansible_community.aws.ssm_parameter_payload_fmtkbrh6/ansible_community.aws.ssm_parameter_payload.zip/ansible_collections/community/aws/plugins/modules/ssm_parameter.py", line 483, in <module> File "/tmp/ansible_community.aws.ssm_parameter_payload_fmtkbrh6/ansible_community.aws.ssm_parameter_payload.zip/ansible_collections/community/aws/plugins/modules/ssm_parameter.py", line 464, in main File "/tmp/ansible_community.aws.ssm_parameter_payload_fmtkbrh6/ansible_community.aws.ssm_parameter_payload.zip/ansible_collections/community/aws/plugins/modules/ssm_parameter.py", line 386, in create_update_parameter KeyError: 'Description' ``` I'm opening a PR now.
2022-12-22T08:51:24
ansible-collections/community.aws
1,631
ansible-collections__community.aws-1631
[ "1471" ]
751dcb28368703693a49e8cea371fd467dd12d92
diff --git a/plugins/modules/ssm_parameter.py b/plugins/modules/ssm_parameter.py --- a/plugins/modules/ssm_parameter.py +++ b/plugins/modules/ssm_parameter.py @@ -383,7 +383,7 @@ def create_update_parameter(client, module): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="getting description value") - if describe_existing_parameter['Description'] != args['Description']: + if describe_existing_parameter.get('Description') != args['Description']: (changed, response) = update_parameter(client, module, **args) if changed: _wait_updated(client, module, module.params.get('name'), original_version)
diff --git a/tests/integration/targets/ssm_parameter/tasks/main.yml b/tests/integration/targets/ssm_parameter/tasks/main.yml --- a/tests/integration/targets/ssm_parameter/tasks/main.yml +++ b/tests/integration/targets/ssm_parameter/tasks/main.yml @@ -415,6 +415,30 @@ that: - result is not changed + - name: Create key/value pair in aws parameter store with no description + aws_ssm_parameter_store: + name: '{{ simple_name }}' + value: '{{ simple_value }}' + register: result + + - assert: + that: + - result is changed + - '"description" not in result.parameter_metadata' + + - name: Add a description + aws_ssm_parameter_store: + name: '{{ simple_name }}' + value: '{{ simple_value }}' + description: '{{ simple_description }}' + register: result + + - assert: + that: + - result is changed + - '"description" in result.parameter_metadata' + - result.parameter_metadata.description == simple_description + always: # ============================================================ - name: Delete remaining key/value pairs in aws parameter store
aws_ssm_parameter_store update fails if no description element was defined before ### Summary `aws_ssm_parameter_store` update fails if no description element was defined before. If an SSM parameter has an empty description already in SSM, then the AWS API (I'm guessing `aws ssm describe-parameters`) doesn't include the `description` element in the response JSON at all for such item, causing issues in `community.aws. aws_ssm_parameter_store` handling of such reply (Description element no defined). ### Issue Type Bug Report ### Component Name ssm_parameter ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.12.6] config file = None configured module search path = ['/home/circleci/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/circleci/.local/lib/python3.10/site-packages/ansible ansible collection location = /home/circleci/.ansible/collections:/usr/share/ansible/collections executable location = /home/circleci/.local/bin/ansible python version = 3.10.4 (main, Apr 2 2022, 09:04:19) [GCC 11.2.0] jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /home/circleci/.local/lib/python3.10/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 2.3.0 ansible.netcommon 2.6.1 ansible.posix 1.4.0 ansible.utils 2.6.1 ansible.windows 1.10.0 arista.eos 3.1.0 awx.awx 19.4.0 azure.azcollection 1.13.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.2.0 cisco.aci 2.2.0 cisco.asa 2.1.0 cisco.dnac 6.4.0 cisco.intersight 1.0.19 cisco.ios 2.8.1 cisco.iosxr 2.9.0 cisco.ise 1.2.1 cisco.meraki 2.6.2 cisco.mso 1.4.0 cisco.nso 1.0.3 cisco.nxos 2.9.1 cisco.ucs 1.8.0 cloud.common 2.1.1 cloudscale_ch.cloud 2.2.2 community.aws 2.5.0 community.azure 1.1.0 community.ciscosmb 1.0.5 community.crypto 2.3.2 community.digitalocean 1.19.0 community.dns 2.2.0 community.docker 2.6.0 community.fortios 1.0.0 community.general 4.8.2 community.google 1.0.0 community.grafana 1.4.0 community.hashi_vault 2.5.0 community.hrobot 1.4.0 community.kubernetes 2.0.1 community.kubevirt 1.0.0 community.libvirt 1.1.0 community.mongodb 1.4.0 community.mysql 2.3.8 community.network 3.3.0 community.okd 2.2.0 community.postgresql 1.7.4 community.proxysql 1.4.0 community.rabbitmq 1.2.1 community.routeros 2.1.0 community.sap 1.0.0 community.sap_libs 1.1.0 community.skydive 1.0.0 community.sops 1.2.2 community.vmware 1.18.0 community.windows 1.10.0 community.zabbix 1.7.0 containers.podman 1.9.3 cyberark.conjur 1.1.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 1.1.1 dellemc.openmanage 4.4.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.17.0 fortinet.fortimanager 2.1.5 fortinet.fortios 2.1.6 frr.frr 1.0.4 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.6.0 hpe.nimble 1.1.4 ibm.qradar 1.0.3 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.2.2 inspur.sm 1.3.0 junipernetworks.junos 2.10.0 kubernetes.core 2.3.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.17.0 netapp.elementsw 21.7.0 netapp.ontap 21.19.1 netapp.storagegrid 21.10.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.0 netbox.netbox 3.7.1 ngine_io.cloudstack 2.2.4 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.1 openstack.cloud 1.8.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 1.6.6 purestorage.flasharray 1.13.0 purestorage.flashblade 1.9.0 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 1.0.2 t_systems_mms.icinga_director 1.29.0 theforeman.foreman 2.2.0 vmware.vmware_rest 2.1.5 vyos.vyos 2.8.0 wti.remote 1.0.3 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore WARNING: Package(s) not found: boto Name: boto3 Version: 1.24.73 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /home/circleci/.local/lib/python3.10/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.27.73 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /home/circleci/.local/lib/python3.10/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment Ubuntu 22.04 ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) - name: "SSM" ssm_parameter: name: "mytest" string_type: "String" value: "myvalue" state: "present" overwrite_value: "changed" - name: "SSM" ssm_parameter: name: "mytest" string_type: "String" value: "myvalue" state: "present" description: "this_should_fail" overwrite_value: "changed" ``` ### Expected Results Second command successfully adding the description element. ### Actual Results Second command fails due to description being empty in SSM. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: None If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> Files identified in the description: * [`plugins/modules/ssm_parameter.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/ssm_parameter.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @116davinder @jillr @markuman @mikedlr @nathanwebsterdotme @ozbillwang @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> Potential solution is probably to consider `Description` an empty string if the Description element doesn't exist in the response: https://github.com/ansible-collections/community.aws/blob/3043bca5d3544a40b78f22e0ece084dde8324530/plugins/modules/ssm_parameter.py#L325 ``` if 'Description' not in existing_parameter['Parameters'][0]: existing_parameter['Parameters'][0]['Description'] = '' ``` @cergfix, It would be really nice if you can run above reproduce steps with `-vvvv` and paste the output to pin-point error location. ```paste below The full traceback is: Traceback (most recent call last): ... File "/tmp/ansible_community.aws.ssm_parameter_payload_fmtkbrh6/ansible_community.aws.ssm_parameter_payload.zip/ansible_collections/community/aws/plugins/modules/ssm_parameter.py", line 483, in <module> File "/tmp/ansible_community.aws.ssm_parameter_payload_fmtkbrh6/ansible_community.aws.ssm_parameter_payload.zip/ansible_collections/community/aws/plugins/modules/ssm_parameter.py", line 464, in main File "/tmp/ansible_community.aws.ssm_parameter_payload_fmtkbrh6/ansible_community.aws.ssm_parameter_payload.zip/ansible_collections/community/aws/plugins/modules/ssm_parameter.py", line 386, in create_update_parameter KeyError: 'Description' ``` I'm opening a PR now.
2022-12-22T08:51:35
ansible-collections/community.aws
1,633
ansible-collections__community.aws-1633
[ "637" ]
7b68f1196bf57b16c3e442343dfa4e46ffcdc868
diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -87,10 +87,36 @@ vars: - name: ansible_aws_ssm_document version_added: 5.2.0 + s3_addressing_style: + description: + - The addressing style to use when using S3 URLs. + - When the S3 bucket isn't in the same region as the Instance + explicitly setting the addressing style to 'virtual' may be necessary + U(https://repost.aws/knowledge-center/s3-http-307-response) as this forces + the use of a specific endpoint. + choices: [ 'path', 'virtual', 'auto' ] + default: 'auto' + version_added: 5.2.0 + vars: + - name: ansible_aws_ssm_s3_addressing_style ''' EXAMPLES = r''' +# Wait for SSM Agent to be available on the Instance +- name: Wait for connection to be available + vars: + ansible_connection: aws_ssm + ansible_aws_ssm_bucket_name: nameofthebucket + ansible_aws_ssm_region: us-west-2 + # When the S3 bucket isn't in the same region as the Instance + # Explicitly setting the addressing style to 'virtual' may be necessary + # https://repost.aws/knowledge-center/s3-http-307-response + ansible_aws_ssm_s3_addressing_style: virtual + tasks: + - name: Wait for connection + wait_for_connection: + # Stop Spooler Process on Windows Instances - name: Stop Spooler Service on Windows Instances vars: @@ -708,7 +734,10 @@ def _get_boto_client(self, service, region_name=None, profile_name=None): client = session.client( service, - config=Config(signature_version="s3v4") + config=Config( + signature_version="s3v4", + s3={'addressing_style': self.get_option('s3_addressing_style')} + ) ) return client
diff --git a/tests/integration/targets/connection_aws_ssm_addressing/aliases b/tests/integration/targets/connection_aws_ssm_addressing/aliases new file mode 100644 --- /dev/null +++ b/tests/integration/targets/connection_aws_ssm_addressing/aliases @@ -0,0 +1,4 @@ +time=20m + +cloud/aws +connection_aws_ssm diff --git a/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_setup.yml b/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_setup.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_setup.yml @@ -0,0 +1,9 @@ +- hosts: localhost + roles: + - role: ../setup_connection_aws_ssm + vars: + target_os: fedora + encrypted_bucket: False + s3_bucket_region: 'eu-central-1' + s3_addressing_style: virtual + test_suffix: addressing diff --git a/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_teardown.yml b/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_teardown.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_teardown.yml @@ -0,0 +1,5 @@ +- hosts: localhost + tasks: + - include_role: + name: ../setup_connection_aws_ssm + tasks_from: cleanup.yml diff --git a/tests/integration/targets/connection_aws_ssm_addressing/meta/main.yml b/tests/integration/targets/connection_aws_ssm_addressing/meta/main.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/connection_aws_ssm_addressing/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - connection + - setup_connection_aws_ssm diff --git a/tests/integration/targets/connection_aws_ssm_addressing/runme.sh b/tests/integration/targets/connection_aws_ssm_addressing/runme.sh new file mode 100755 --- /dev/null +++ b/tests/integration/targets/connection_aws_ssm_addressing/runme.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +PLAYBOOK_DIR=$(pwd) +set -eux + +CMD_ARGS=("$@") + +# Destroy Environment +cleanup() { + + cd "${PLAYBOOK_DIR}" + ansible-playbook -c local aws_ssm_integration_test_teardown.yml "${CMD_ARGS[@]}" + +} + +trap "cleanup" EXIT + +# Setup Environment +ansible-playbook -c local aws_ssm_integration_test_setup.yml "$@" + +# Export the AWS Keys +set +x +. ./aws-env-vars.sh +set -x + +cd ../connection + +# Execute Integration tests +INVENTORY="${PLAYBOOK_DIR}/ssm_inventory" ./test.sh \ + -e target_hosts=aws_ssm \ + "$@" diff --git a/tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_setup.yml b/tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_setup.yml --- a/tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_setup.yml +++ b/tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_setup.yml @@ -4,3 +4,4 @@ vars: target_os: fedora encrypted_bucket: True + test_suffix: encrypteds3 diff --git a/tests/integration/targets/connection_aws_ssm_ssm_document/aws_ssm_integration_test_setup.yml b/tests/integration/targets/connection_aws_ssm_ssm_document/aws_ssm_integration_test_setup.yml --- a/tests/integration/targets/connection_aws_ssm_ssm_document/aws_ssm_integration_test_setup.yml +++ b/tests/integration/targets/connection_aws_ssm_ssm_document/aws_ssm_integration_test_setup.yml @@ -4,3 +4,4 @@ vars: target_os: fedora use_ssm_document: True + test_suffix: document diff --git a/tests/integration/targets/setup_connection_aws_ssm/defaults/main.yml b/tests/integration/targets/setup_connection_aws_ssm/defaults/main.yml --- a/tests/integration/targets/setup_connection_aws_ssm/defaults/main.yml +++ b/tests/integration/targets/setup_connection_aws_ssm/defaults/main.yml @@ -40,11 +40,6 @@ ami_details: </powershell> os_type: windows -# see: -# - https://github.com/mattclay/aws-terminator/pull/181 -# - https://github.com/ansible-collections/community.aws/pull/763 -encrypted_s3_bucket_name: ssm-encrypted-test-bucket - -s3_bucket_name: "{{ resource_prefix }}-connection-ssm" +s3_bucket_name: "{{ tiny_prefix }}-connection-ssm-{{ test_suffix | default(target_os) }}" kms_key_name: "{{ resource_prefix }}-connection-ssm" ssm_document_name: "{{ resource_prefix }}-connection-ssm" diff --git a/tests/integration/targets/setup_connection_aws_ssm/templates/inventory-combined.aws_ssm.j2 b/tests/integration/targets/setup_connection_aws_ssm/templates/inventory-combined.aws_ssm.j2 --- a/tests/integration/targets/setup_connection_aws_ssm/templates/inventory-combined.aws_ssm.j2 +++ b/tests/integration/targets/setup_connection_aws_ssm/templates/inventory-combined.aws_ssm.j2 @@ -32,6 +32,9 @@ ansible_aws_ssm_plugin=/usr/local/sessionmanagerplugin/bin/session-manager-plugi ansible_python_interpreter=/usr/bin/env python3 local_tmp=/tmp/ansible-local-{{ tiny_prefix }} ansible_aws_ssm_bucket_name={{ s3_bucket_name }} +{% if s3_addressing_style | default(False) %} +ansible_aws_ssm_s3_addressing_style={{ s3_addressing_style }} +{% endif %} {% if encrypted_bucket | default(False) %} {% if not (s3_bucket_encryption | default(False)) %} ansible_aws_ssm_bucket_sse_mode='aws:kms'
aws_ssm connection plugin: S3 Signed Url invalid for newly created S3 Bucket ### Summary When I try to execute a playbook against an Amazon Linux 2 instance in EC2 using the aws_ssm connection plugin and a recently created (less than an hour old) S3 bucket, it fails to correctly download `AnsiballZ_setup.py`, resulting in a python syntax error `" File \"/home/ssm-user/.ansible/tmp/ansible-tmp-1626190404.700778-20074-247496938615569/AnsiballZ_setup.py\", line 1\r\r\n <?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\r\n ^\r\r\nSyntaxError: invalid syntax\r\r",`. The curl is writing out the S3 XML error response to file, due to S3 returning a HTTP 307 redirect which the curl does not follow. This HTTP 307 from S3 is expected, as per [this AWS documentation](https://aws.amazon.com/premiumsupport/knowledge-center/s3-http-307-response/), because the bucket is too new for the global S3 DNS to have propagated out yet, so a regional endpoint has to be used. This overall seems similar to [this issue](https://github.com/ansible-collections/community.aws/issues/307), but is still happening for me when using the `main` branch of this repository where the fix has been applied. I believe the underlying problem is that when the signed url is generated in the function `_file_transport_command`, it is a global URL rather than a regional URL: For example, the URL below does not work and returns a 307; [https://test-bucket-garethsaxby-20210713-153159.**s3.amazonaws.com**/i-089c1ec0c85524f5d//home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIA5QGXVMSCMPOQVZH3%2F20210713%2Feu-west-2%2Fs3%2Faws4_request&X-Amz-Date=20210713T151841Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=18d520a539227540bef2ba06a6000dd6569c868aeb4cc6ae042fb895e5e2f880](https://test-bucket-garethsaxby-20210713-153159.s3.amazonaws.com/i-089c1ec0c85524f5d//home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIA5QGXVMSCMPOQVZH3%2F20210713%2Feu-west-2%2Fs3%2Faws4_request&X-Amz-Date=20210713T151841Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=18d520a539227540bef2ba06a6000dd6569c868aeb4cc6ae042fb895e5e2f880) Whilst the URL below, redirected by the 307, -does- work; [https://test-bucket-garethsaxby-20210713-153159.**s3.eu-west-2.amazonaws.com**/i-089c1ec0c85524f5d//home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIA5QGXVMSCMPOQVZH3%2F20210713%2Feu-west-2%2Fs3%2Faws4_request&X-Amz-Date=20210713T151841Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=18d520a539227540bef2ba06a6000dd6569c868aeb4cc6ae042fb895e5e2f880](https://test-bucket-garethsaxby-20210713-153159.s3.eu-west-2.amazonaws.com/i-089c1ec0c85524f5d//home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIA5QGXVMSCMPOQVZH3%2F20210713%2Feu-west-2%2Fs3%2Faws4_request&X-Amz-Date=20210713T151841Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=18d520a539227540bef2ba06a6000dd6569c868aeb4cc6ae042fb895e5e2f880) If I force the plugin to use a regional endpoint for S3, and use a region when creating the client, as per [my branch](https://github.com/ansible-collections/community.aws/compare/main...garethsaxby:fix/s3_regional_url?expand=1), it does work, albeit I'm not really sure -how- best to implement this to properly put a Pull Request together to fix the problem, given my branch feels like a really ugly hack. ### Issue Type Bug Report ### Component Name plugins/connection/aws_ssm ### Ansible Version ```console (paste below) ansible [core 2.11.2] config file = None configured module search path = ['/Users/gsaxby/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/Cellar/ansible/4.2.0/libexec/lib/python3.9/site-packages/ansible ansible collection location = /Users/gsaxby/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.9.6 (default, Jun 29 2021, 06:20:32) [Clang 12.0.0 (clang-1200.0.32.29)] jinja version = 3.0.1 libyaml = True ``` ### Collection Versions ```console (paste below) # /Users/gsaxby/.ansible/collections/ansible_collections Collection Version -------------------- ------- amazon.aws 1.5.0 ansible.netcommon 1.3.0 ansible.posix 1.1.1 community.aws 1.5.0 # Actually has been taken from main; I have shared my requirements.yml later on community.general 1.3.0 community.kubernetes 1.0.0 google.cloud 1.0.1 # /usr/local/Cellar/ansible/4.2.0/libexec/lib/python3.9/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 1.5.0 ansible.netcommon 2.2.0 ansible.posix 1.2.0 ansible.utils 2.3.0 ansible.windows 1.7.0 arista.eos 2.2.0 awx.awx 19.2.2 azure.azcollection 1.7.0 check_point.mgmt 2.0.0 chocolatey.chocolatey 1.1.0 cisco.aci 2.0.0 cisco.asa 2.0.2 cisco.intersight 1.0.15 cisco.ios 2.3.0 cisco.iosxr 2.3.0 cisco.meraki 2.4.2 cisco.mso 1.2.0 cisco.nso 1.0.3 cisco.nxos 2.4.0 cisco.ucs 1.6.0 cloudscale_ch.cloud 2.2.0 community.aws 1.5.0 community.azure 1.0.0 community.crypto 1.7.1 community.digitalocean 1.7.0 community.docker 1.8.0 community.fortios 1.0.0 community.general 3.3.0 community.google 1.0.0 community.grafana 1.2.1 community.hashi_vault 1.3.0 community.hrobot 1.1.1 community.kubernetes 1.2.1 community.kubevirt 1.0.0 community.libvirt 1.0.1 community.mongodb 1.2.1 community.mysql 2.1.0 community.network 3.0.0 community.okd 1.1.2 community.postgresql 1.3.0 community.proxysql 1.0.0 community.rabbitmq 1.0.3 community.routeros 1.2.0 community.skydive 1.0.0 community.sops 1.1.0 community.vmware 1.11.0 community.windows 1.5.0 community.zabbix 1.3.0 containers.podman 1.6.1 cyberark.conjur 1.1.0 cyberark.pas 1.0.7 dellemc.enterprise_sonic 1.1.0 dellemc.openmanage 3.5.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.10.1 fortinet.fortimanager 2.1.2 fortinet.fortios 2.1.1 frr.frr 1.0.3 gluster.gluster 1.0.1 google.cloud 1.0.2 hetzner.hcloud 1.4.3 hpe.nimble 1.1.3 ibm.qradar 1.0.3 infinidat.infinibox 1.2.4 inspur.sm 1.2.0 junipernetworks.junos 2.3.0 kubernetes.core 1.2.1 mellanox.onyx 1.0.0 netapp.aws 21.2.0 netapp.azure 21.7.0 netapp.cloudmanager 21.7.0 netapp.elementsw 21.6.1 netapp.ontap 21.7.0 netapp.um_info 21.6.0 netapp_eseries.santricity 1.2.13 netbox.netbox 3.1.1 ngine_io.cloudstack 2.1.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.0 openstack.cloud 1.5.0 openvswitch.openvswitch 2.0.0 ovirt.ovirt 1.5.3 purestorage.flasharray 1.8.0 purestorage.flashblade 1.6.0 sensu.sensu_go 1.11.1 servicenow.servicenow 1.0.6 splunk.es 1.0.2 t_systems_mms.icinga_director 1.18.0 theforeman.foreman 2.1.1 vyos.vyos 2.3.1 wti.remote 1.0.1 ``` ### AWS SDK versions ```console (paste below) WARNING: Package(s) not found: boto Name: boto3 Version: 1.17.110 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: None License: Apache License 2.0 Location: /usr/local/Cellar/ansible/4.2.0/libexec/lib/python3.9/site-packages Requires: botocore, s3transfer, jmespath Required-by: --- Name: botocore Version: 1.20.110 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: None License: Apache License 2.0 Location: /usr/local/Cellar/ansible/4.2.0/libexec/lib/python3.9/site-packages Requires: python-dateutil, urllib3, jmespath Required-by: s3transfer, boto3 ``` ### Configuration ```console (paste below) INTERPRETER_PYTHON(/Users/gsaxby/Code/DOG/ansible-testing/ansible/ansible.cfg) = auto INVENTORY_ENABLED(/Users/gsaxby/Code/DOG/ansible-testing/ansible/ansible.cfg) = ['amazon.aws.aws_ec2'] ``` ### OS / Environment Client: macOS Catalina 10.15.7, Ansible installed via Brew Remote: Amazon Linux 2, eu-west-2, ami-03ac5a9b225e99b02, amzn2-ami-hvm-2.0.20210701.0-x86_64-gp2 ### Steps to Reproduce 1. Create a new S3 bucket. I believe this is crucial, as it needs to be returning 307's when using the global endpoint, as per [this AWS knowledge centre article](https://aws.amazon.com/premiumsupport/knowledge-center/s3-http-307-response/). ``` Region: eu-west-2 ``` 2. Create the EC2 instance running the SSM agent. ``` Region: eu-west-2 AMI: ami-03ac5a9b225e99b02 (amzn2-ami-hvm-2.0.20210701.0-x86_64-gp2) IAM Policy Attached: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore Tags: Name: ansible-ssm-testing ``` 3. Execute the ansible playbook as below: **requirements.yml**: ```yaml --- collections: - name: amazon.aws version: 1.5.0 - name: https://github.com/ansible-collections/community.aws.git type: git version: main ``` **ansible.cfg**: ``` [defaults] interpreter_python = auto [inventory] enable_plugins = amazon.aws.aws_ec2 ``` **inventory.aws_ec2.yml**: ```yml # File name must end in `.aws_ec2.yml` otherwise the plugin will not read it plugin: amazon.aws.aws_ec2 regions: - eu-west-2 filters: tag:Name: ansible-ssm-testing ``` **playbook.yml** ```yml --- - hosts: all gather_facts: true vars: ansible_connection: aws_ssm ansible_aws_ssm_region: "eu-west-2" ansible_aws_ssm_instance_id: "{{ instance_id }}" ansible_aws_ssm_bucket_name: test-bucket-garethsaxby-20210713-153159 ansible_python_interpreter: /usr/bin/python3 tasks: - name: Ping Instance ansible.builtin.ping: ``` ```console $ ansible-galaxy install -r requirements.yml --force $ ansible-playbook -i inventory.aws_ec2.yml playbook.yml ``` ### Expected Results I'm expecting the curl against the S3 signed URL on the remote host to pull down `AnsiballZ_setup.py` correctly and continue running the playbook, returning the ping successfully. ### Actual Results ```console (paste below) $ ansible-playbook -vvvv -i inventory.aws_ec2.yml playbook.yml ansible-playbook [core 2.11.2] config file = /Users/gsaxby/Code/DOG/ansible-testing/ansible/ansible.cfg configured module search path = ['/Users/gsaxby/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/Cellar/ansible/4.2.0/libexec/lib/python3.9/site-packages/ansible ansible collection location = /Users/gsaxby/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible-playbook python version = 3.9.6 (default, Jun 29 2021, 06:20:32) [Clang 12.0.0 (clang-1200.0.32.29)] jinja version = 3.0.1 libyaml = True Using /Users/gsaxby/Code/DOG/ansible-testing/ansible/ansible.cfg as config file setting up inventory plugins Loading collection amazon.aws from /Users/gsaxby/.ansible/collections/ansible_collections/amazon/aws Parsed /Users/gsaxby/Code/DOG/ansible-testing/ansible/inventory.aws_ec2.yml inventory source with ansible_collections.amazon.aws.plugins.inventory.aws_ec2 plugin Loading callback plugin default of type stdout, v2.0 from /usr/local/Cellar/ansible/4.2.0/libexec/lib/python3.9/site-packages/ansible/plugins/callback/default.py Skipping callback 'default', as we already have a stdout callback. Skipping callback 'minimal', as we already have a stdout callback. Skipping callback 'oneline', as we already have a stdout callback. PLAYBOOK: playbook.yml ******************************************************************************************************************************************** Positional arguments: playbook.yml verbosity: 4 connection: smart timeout: 10 become_method: sudo tags: ('all',) inventory: ('/Users/gsaxby/Code/DOG/ansible-testing/ansible/inventory.aws_ec2.yml',) forks: 5 1 plays in playbook.yml PLAY [all] ******************************************************************************************************************************************************** TASK [Gathering Facts] ******************************************************************************************************************************************** task path: /Users/gsaxby/Code/DOG/ansible-testing/ansible/playbook.yml:2 redirecting (type: connection) ansible.builtin.aws_ssm to community.aws.aws_ssm Loading collection community.aws from /Users/gsaxby/.ansible/collections/ansible_collections/community/aws <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> ESTABLISH SSM CONNECTION TO: i-089c1ec0c85524f5d <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> SSM COMMAND: ['/usr/local/bin/session-manager-plugin', '{"SessionId": "gareth-saxby-temp-072af2adf96185184", "TokenValue": "AAEAAQl1UTpN1tP3cQsnNCTUvKP/y0eAIq8BgKoOVgNzAN4aAAAAAGDtrs9TgYC1XyDzkw5Y6le3Wt9fzFIXrw2thaxAz8Gvts868wSMlpFm+M7syYnedzJfgOMUIxN9/PDA/ph9qL8qZocUy9IdVmBC9oO6Z/yQr94sVYVvWvVHGFY3k9O/9oO8Eklc4SN6r2pl2Mmj3bFKDxH1mbTv15Fks3ieMIiZyxahkg2rwCxFplua+nFlja3w9bQVl+LUXogw19V9MNjy2UrrUiXSMWhwKPPE6Y/VWOgZrNu72mg2mbvAvRKjCV+hZ2vBSt7WZ+gKfGV/U3yUAEEUtnNIsCJz3fAUPlZXUKnIVnviMNL0HnBZzE1YA3BwtbF8R0390a0dNuQuMqrTzrOHI4hJVL0oDBnMXAISZaJ1UUFJ4L5jYMuwVHa8dJA2d4w=", "StreamUrl": "wss://ssmmessages.eu-west-2.amazonaws.com/v1/data-channel/gareth-saxby-temp-072af2adf96185184?role=publish_subscribe", "ResponseMetadata": {"RequestId": "4c322e4e-e0c8-4384-92e3-323414253881", "HTTPStatusCode": 200, "HTTPHeaders": {"server": "Server", "date": "Tue, 13 Jul 2021 15:18:39 GMT", "content-type": "application/x-amz-json-1.1", "content-length": "642", "connection": "keep-alive", "x-amzn-requestid": "4c322e4e-e0c8-4384-92e3-323414253881"}, "RetryAttempts": 0}}', 'eu-west-2', 'StartSession', '', '{"Target": "i-089c1ec0c85524f5d"}', 'https://ssm.eu-west-2.amazonaws.com'] <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> SSM CONNECTION ID: gareth-saxby-temp-072af2adf96185184 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC echo ~ <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> _wrap_command: 'echo IlyexxlDYKICMeyOVUZQHcnTAj echo ~ echo $'\n'$? echo qIhbHBrlBzTeHjwGNvWljDHeGw ' <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: Starting session with SessionId: gareth-saxby-temp-072af2adf96185184 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: sh-4.2$ stty -echo <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: sh-4.2$ IlyexxlDYKICMeyOVUZQHcnTAj <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: /home/ssm-user <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: qIhbHBrlBzTeHjwGNvWljDHeGw <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> POST_PROCESS: /home/ssm-user 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> (0, '/home/ssm-user\r\r', '') <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC ( umask 77 && mkdir -p "` echo /home/ssm-user/.ansible/tmp `"&& mkdir "` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193 `" && echo ansible-tmp-1626189520.659307-19800-45563405192193="` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193 `" ) <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> _wrap_command: 'echo wzCNiIYognSiHXqfCJMBRaRvKS ( umask 77 && mkdir -p "` echo /home/ssm-user/.ansible/tmp `"&& mkdir "` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193 `" && echo ansible-tmp-1626189520.659307-19800-45563405192193="` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193 `" ) echo $'\n'$? echo CqzxJpqOSlFCtzcRaDElqmzLuB ' <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: wzCNiIYognSiHXqfCJMBRaRvKS <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: ansible-tmp-1626189520.659307-19800-45563405192193=/home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: CqzxJpqOSlFCtzcRaDElqmzLuB <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> POST_PROCESS: ansible-tmp-1626189520.659307-19800-45563405192193=/home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> (0, 'ansible-tmp-1626189520.659307-19800-45563405192193=/home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193\r\r', '') Using module file /usr/local/Cellar/ansible/4.2.0/libexec/lib/python3.9/site-packages/ansible/modules/setup.py <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> PUT /Users/gsaxby/.ansible/tmp/ansible-local-19794ck3x4sje/tmpyrhzimx3 TO /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC curl 'https://test-bucket-garethsaxby-20210713-153159.s3.amazonaws.com/i-089c1ec0c85524f5d//home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIA5QGXVMSCMPOQVZH3%2F20210713%2Feu-west-2%2Fs3%2Faws4_request&X-Amz-Date=20210713T151841Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=18d520a539227540bef2ba06a6000dd6569c868aeb4cc6ae042fb895e5e2f880' -o '/home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py' <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> _wrap_command: 'echo YBqOPfzvxcGDqXREpbYSUvrIeG curl 'https://test-bucket-garethsaxby-20210713-153159.s3.amazonaws.com/i-089c1ec0c85524f5d//home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIA5QGXVMSCMPOQVZH3%2F20210713%2Feu-west-2%2Fs3%2Faws4_request&X-Amz-Date=20210713T151841Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=18d520a539227540bef2ba06a6000dd6569c868aeb4cc6ae042fb895e5e2f880' -o '/home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py' echo $'\n'$? echo dfIOAXVAOIcrDMPKZdUkrKCwKw ' <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: YBqOPfzvxcGDqXREpbYSUvrIeG <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: % Total % Received % Xferd Average Speed Time Time Time Current <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: Dload Upload Total Spent Left Speed 100 509 0 509 0 0 1641 0 --:--:-- --:--:-- --:--:-- 1641 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: dfIOAXVAOIcrDMPKZdUkrKCwKw <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> POST_PROCESS: % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 509 0 509 0 0 1641 0 --:--:-- --:--:-- --:--:-- 1641 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 509 0 509 0 0 1641 0 --:--:-- --:--:-- --:--:-- 1641\r\r', '') <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 509 0 509 0 0 1641 0 --:--:-- --:--:-- --:--:-- 1641\r\r', '') <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC chmod u+x /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/ /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> _wrap_command: 'echo xgTDBjuFPZtDfVuwGaZyflDWjV chmod u+x /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/ /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py echo $'\n'$? echo xOHqRiUNImldbBrdKmcVWrfEEd ' <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: xgTDBjuFPZtDfVuwGaZyflDWjV <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: xOHqRiUNImldbBrdKmcVWrfEEd <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> POST_PROCESS: 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> (0, '\r', '') <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC /usr/bin/python3 /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> _wrap_command: 'echo gzDaIZxdrfBqdBvcQHhExANgdr sudo /usr/bin/python3 /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py echo $'\n'$? echo CtSWJSGjdVuhayekdfLdqRalyk ' <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: gzDaIZxdrfBqdBvcQHhExANgdr <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: File "/home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py", line 1 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: <?xml version="1.0" encoding="UTF-8"?> <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: ^ <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: SyntaxError: invalid syntax <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: 1 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: CtSWJSGjdVuhayekdfLdqRalyk <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> POST_PROCESS: File "/home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py", line 1 <?xml version="1.0" encoding="UTF-8"?> ^ SyntaxError: invalid syntax 1 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> (1, ' File "/home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py", line 1\r\r\n <?xml version="1.0" encoding="UTF-8"?>\r\r\n ^\r\r\nSyntaxError: invalid syntax\r\r', '') <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC rm -f -r /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/ > /dev/null 2>&1 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> _wrap_command: 'echo JARmCoxpzNaNaBblIFeROdZYey rm -f -r /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/ > /dev/null 2>&1 echo $'\n'$? echo BifmuhtspkBDKrtrAJmsOZBxKg ' <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: JARmCoxpzNaNaBblIFeROdZYey <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: BifmuhtspkBDKrtrAJmsOZBxKg <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> POST_PROCESS: 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> (0, '\r', '') <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> CLOSING SSM CONNECTION TO: i-089c1ec0c85524f5d <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> TERMINATE SSM SESSION: gareth-saxby-temp-072af2adf96185184 fatal: [ec2-52-56-84-142.eu-west-2.compute.amazonaws.com]: FAILED! => { "ansible_facts": {}, "changed": false, "failed_modules": { "ansible.legacy.setup": { "failed": true, "module_stderr": "", "module_stdout": " File \"/home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py\", line 1\r\r\n <?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\r\n ^\r\r\nSyntaxError: invalid syntax\r\r", "msg": "MODULE FAILURE\nSee stdout/stderr for the exact error", "rc": 1 } }, "msg": "The following modules failed to execute: ansible.legacy.setup\n" } PLAY RECAP ******************************************************************************************************************************************************** ec2-52-56-84-142.eu-west-2.compute.amazonaws.com : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 ``` When I curl the signed URL from the remote instance using SSM Session Manager, I get the following response, showing that a 307 is being returned and I'm being redirected to the regional endpoint: ```console $ curl -i "https://test-bucket-garethsaxby-20210713-153159.s3.amazonaws.com/i-089c1ec0c85524f5d//home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIA5QGXVMSCMPOQVZH3%2F20210713%2Feu-west-2%2Fs3%2Faws4_request&X-Amz-Date=20210713T151841Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=18d520a539227540bef2ba06a6000dd6569c868aeb4cc6ae042fb895e5e2f880" HTTP/1.1 307 Temporary Redirect x-amz-bucket-region: eu-west-2 x-amz-request-id: 6077E9R1H8G7Z0S1 x-amz-id-2: OU6kvbWvdu79rmzjEa8YpVK8z1X5J0y1axM9o0bsEvQyA6vvdY+xZZDJFdUwez4leqMV5UUzxUw= Location: https://test-bucket-garethsaxby-20210713-153159.s3.eu-west-2.amazonaws.com/i-089c1ec0c85524f5d//home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIA5QGXVMSCMPOQVZH3%2F20210713%2Feu-west-2%2Fs3%2Faws4_request&X-Amz-Date=20210713T151841Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=18d520a539227540bef2ba06a6000dd6569c868aeb4cc6ae042fb895e5e2f880 Content-Type: application/xml Transfer-Encoding: chunked Date: Tue, 13 Jul 2021 15:21:02 GMT Server: AmazonS3 <?xml version="1.0" encoding="UTF-8"?> <Error><Code>TemporaryRedirect</Code><Message>Please re-send this request to the specified temporary endpoint. Continue to use the original request endpoint for future requests.</Message><Endpoint>test-bucket-garethsaxby-20210713-153159.s3.eu-west-2.amazonaws.com</Endpoint><Bucket>test-bucket-garethsaxby-20210713-153159</Bucket><RequestId>6077E9R1H8G7Z0S1</RequestId><HostId>OU6kvbWvdu79rmzjEa8YpVK8z1X5J0y1axM9o0bsEvQyA6vvdY+xZZDJFdUwez4leqMV5UUzxUw=</HostId></Error> ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`lib/ansible/plugins/connection`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/lib/ansible/plugins/connection) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> Closing this temporarily as I think I need to revisit some of this first, apologies! To be more specific; the errors are still happening, but my determination of the cause may not be right, and I want to dig into that more before someone else starts looking. EDIT: I've reopened the issue now, as I've realised that I was just confusing myself a bit reading the fix I'd hacked together myself. The issue is still present when using the latest `main` from this repository. Files identified in the description: * [`plugins/connection/aws_ssm.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/connection/aws_ssm.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> @garethsaxby thx for the detailed report! Sadly I'm not familiar with ssm connection plugin. > If I force the plugin to use a regional endpoint for S3, and use a region when creating the client, as per my branch, it does work, albeit I'm not really sure -how- best to implement this to properly put a Pull Request together to fix the problem, given my branch feels like a really ugly hack. There is a similar PR (not merged yet) that introduce a `bucket_region` parameter: https://github.com/ansible-collections/community.aws/pull/603/files#diff-8000d7fb9262e11565b61882567d729fe0958cd9b1a0105683d5bbe0e5b4e585R51 You can try to introduce that too ```python if bucket_region: client = session.client( service, config=Config(signature_version="s3v4"), endpoint_url=f'https://s3.{bucket_region}.amazonaws.com' ) else: client = session.client( service, config=Config(signature_version="s3v4") ) ``` I also confirm this issue. I think a issue fix would be to use '-L' option with curl. I'm definitely having issues with bucket regions and encrypted buckets too. I hit this issue yesterday when attempting to use the aws ssm connection with using a newly created bucket in us-east-2 yesterday. Can we reopen this issue? The referenced PR in the close action above (https://github.com/ansible-collections/community.aws/pull/1176) does not seem to resolve this particular issue. That PR was targeting a fix for pulling the region information for the S3 bucket used for SSM file transfers from the bucket metadata itself, but the pre-signed URLs generated for the S3 downloads are still pointing at the global S3 endpoint, and not the region specific one. Thus, attempting to use the ssm plugin with a newly created transfer bucket in a region like us-east-2 continues to return the presigned URLs targeting the global S3 endpoint, which results in the 307 redirect to the regional endpoint, which then causes the presigned URL to fail with a signature mismatch error as the url was signed for the global endpoint and not the regional endpoint. I believe that #1190 needs to be further addressed to resolve this issue. Hi @bodnarbm please see https://github.com/ansible-collections/community.aws/pull/743/files to use virtual addressing. There is a PR from @phene but that relies on a hardcoded region to be defined. @charles-paul-mox Thank you, but that pr looks to be closed unmerged and I would prefer to not patch the plugin separately (if I was I would probably add the s3 client endpoint url as a separate variable, that way I could also get it to work with other endpoints also (like fips endpoints)) I'm hoping that someone like @tremble could reopen this issue though. Yes, I cannot merge PRs due to company policies. The virtual addressing is the important part. @charles-paul-mox My PR doesn't rely on a hard-coded region _unless you are using a non-default partition like GovCloud_. It uses the default global region just to query information about the S3 bucket's region, then uses the bucket's region from there on. Any real resolution to this problem? I'm using [5.1.0](https://github.com/ansible-collections/community.aws/releases/tag/5.1.0) release and there is still issue with AnsiballZ file: ``` sh-4.2$ cat AnsiballZ_yum.py <?xml version="1.0" encoding="UTF-8"?> <Error><Code>TemporaryRedirect</Code><Message>Please re-send this request to the specified temporary endpoint. Continue to use the original request endpoint for future requests.</Message><Endpoint>testbucketkochan.s3.eu-west-3.amazonaws.com</Endpoint><Bucket>testbucketkochan</Bucket><RequestId>W5B7ZAZZ........</RequestId><HostId>DR//pSU97KgA8ZLVD/............................+lC/xwAuIBO/W4RuWIXqyFp+MZj0ZuI=</HostId><sh-4.2$ ``` Any solution for this ? Cause: This is mainly because of the TemporaryRedirect error from AWS s3 with the resigned URL. due to this when we transfer the setup.py file from host to remote it will download with below content on the remote. <?xml version="1.0" encoding="UTF-8"?> <Error><Code>TemporaryRedirect</Code><Message>Please re-send this request to the specified temporary endpoint. Continue to use the original request endpoint for future requests.</Message><Endpoint>enactortestssm123.s3.us-east-2.amazonaws.com</Endpoint><Bucket>enactortestssm123</Bucket><RequestId>RZYTVTWNVV6V0ET1</RequestId><HostId>jDf+7m1brAHn98LcbqJXDHfraqX5i4DadfixrNM+qqEu3abyB67zLMYK9o/+6lU+Y3jwg/KtQ30=</HostId></Error> to avoid this you can modify _get_boto_client() function client initializing as below to support addressing_style virtual for s3. Thanks! Feel free to correct me. client = session.client( service, config=Config(signature_version="s3v4", s3={'addressing_style': 'virtual'}) )
2022-12-30T13:48:29
ansible-collections/community.aws
1,640
ansible-collections__community.aws-1640
[ "770" ]
a8cbce24071bcc62fe4594c38aff1baf18bd2862
diff --git a/plugins/modules/ecs_cluster.py b/plugins/modules/ecs_cluster.py --- a/plugins/modules/ecs_cluster.py +++ b/plugins/modules/ecs_cluster.py @@ -41,6 +41,41 @@ required: false type: int default: 10 + capacity_providers: + version_added: 5.2.0 + description: + - List of capacity providers to use for the cluster. + required: false + type: list + elements: str + capacity_provider_strategy: + version_added: 5.2.0 + description: + - List of capacity provider strategies to use for the cluster. + required: false + type: list + elements: dict + suboptions: + capacity_provider: + description: + - Name of capacity provider. + type: str + weight: + description: + - The relative percentage of the total number of launched tasks that should use the specified provider. + type: int + base: + description: + - How many tasks, at a minimum, should use the specified provider. + type: int + purge_capacity_providers: + version_added: 5.2.0 + description: + - Toggle overwriting of existing capacity providers or strategy. This is needed for backwards compatibility. + - By default I(purge_capacity_providers=false). In a release after 2024-06-01 this will be changed to I(purge_capacity_providers=true). + required: false + type: bool + default: false extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -56,6 +91,21 @@ name: default state: present +- name: Cluster creation with capacity providers and strategies. + community.aws.ecs_cluster: + name: default + state: present + capacity_providers: + - FARGATE + - FARGATE_SPOT + capacity_provider_strategy: + - capacity_provider: FARGATE + base: 1 + weight: 1 + - capacity_provider: FARGATE_SPOT + weight: 100 + purge_capacity_providers: True + - name: Cluster deletion community.aws.ecs_cluster: name: default @@ -75,6 +125,16 @@ description: how many services are active in this cluster returned: 0 if a new cluster type: int +capacityProviders: + version_added: 5.2.0 + description: list of capacity providers used in this cluster + returned: always + type: list +defaultCapacityProviderStrategy: + version_added: 5.2.0 + description: list of capacity provider strategies used in this cluster + returned: always + type: list clusterArn: description: the ARN of the cluster just created type: str @@ -112,6 +172,8 @@ pass # Handled by AnsibleAWSModule from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict class EcsClusterManager: @@ -145,8 +207,26 @@ def describe_cluster(self, cluster_name): return c raise Exception("Unknown problem describing cluster %s." % cluster_name) - def create_cluster(self, clusterName='default'): - response = self.ecs.create_cluster(clusterName=clusterName) + def create_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy): + params = dict(clusterName=cluster_name) + if capacity_providers: + params['capacityProviders'] = snake_dict_to_camel_dict(capacity_providers) + if capacity_provider_strategy: + params['defaultCapacityProviderStrategy'] = snake_dict_to_camel_dict(capacity_provider_strategy) + response = self.ecs.create_cluster(**params) + return response['cluster'] + + def update_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy): + params = dict(cluster=cluster_name) + if capacity_providers: + params['capacityProviders'] = snake_dict_to_camel_dict(capacity_providers) + else: + params['capacityProviders'] = [] + if capacity_provider_strategy: + params['defaultCapacityProviderStrategy'] = snake_dict_to_camel_dict(capacity_provider_strategy) + else: + params['defaultCapacityProviderStrategy'] = [] + response = self.ecs.put_cluster_capacity_providers(**params) return response['cluster'] def delete_cluster(self, clusterName): @@ -159,7 +239,17 @@ def main(): state=dict(required=True, choices=['present', 'absent', 'has_instances']), name=dict(required=True, type='str'), delay=dict(required=False, type='int', default=10), - repeat=dict(required=False, type='int', default=10) + repeat=dict(required=False, type='int', default=10), + purge_capacity_providers=dict(required=False, type='bool', default=False), + capacity_providers=dict(required=False, type='list', elements='str'), + capacity_provider_strategy=dict(required=False, + type='list', + elements='dict', + options=dict(capacity_provider=dict(type='str'), + weight=dict(type='int'), + base=dict(type='int', default=0) + ) + ), ) required_together = [['state', 'name']] @@ -177,12 +267,53 @@ def main(): results = dict(changed=False) if module.params['state'] == 'present': + # Pull requested and existing capacity providers and strategies. + purge_capacity_providers = module.params['purge_capacity_providers'] + requested_cp = module.params['capacity_providers'] + requested_cps = module.params['capacity_provider_strategy'] if existing and 'status' in existing and existing['status'] == "ACTIVE": - results['cluster'] = existing + existing_cp = existing['capacityProviders'] + existing_cps = existing['defaultCapacityProviderStrategy'] + + if requested_cp is None: + requested_cp = [] + + # Check if capacity provider strategy needs to trigger an update. + cps_update_needed = False + if requested_cps is not None: + for strategy in requested_cps: + if snake_dict_to_camel_dict(strategy) not in existing_cps: + cps_update_needed = True + for strategy in existing_cps: + if camel_dict_to_snake_dict(strategy) not in requested_cps: + cps_update_needed = True + elif requested_cps is None and existing_cps != []: + cps_update_needed = True + + # Unless purge_capacity_providers is true, we will not be updating the providers or strategy. + if not purge_capacity_providers: + module.deprecate('After 2024-06-01 the default value of purge_capacity_providers will change from false to true.' + ' To maintain the existing behaviour explicitly set purge_capacity_providers=true', + date='2024-06-01', collection_name='community.aws') + cps_update_needed = False + requested_cp = existing_cp + requested_cps = existing_cps + + # If either the providers or strategy differ, update the cluster. + if requested_cp != existing_cp or cps_update_needed: + if not module.check_mode: + results['cluster'] = cluster_mgr.update_cluster(cluster_name=module.params['name'], + capacity_providers=requested_cp, + capacity_provider_strategy=requested_cps) + results['changed'] = True + else: + results['cluster'] = existing else: if not module.check_mode: # doesn't exist. create it. - results['cluster'] = cluster_mgr.create_cluster(module.params['name']) + results['cluster'] = cluster_mgr.create_cluster(cluster_name=module.params['name'], + capacity_providers=requested_cp, + capacity_provider_strategy=requested_cps) results['changed'] = True # delete the cluster
diff --git a/tests/integration/targets/ecs_cluster/tasks/main.yml b/tests/integration/targets/ecs_cluster/tasks/main.yml --- a/tests/integration/targets/ecs_cluster/tasks/main.yml +++ b/tests/integration/targets/ecs_cluster/tasks/main.yml @@ -63,6 +63,30 @@ that: - not ecs_cluster_again.changed + - name: add capacity providers and strategy + ecs_cluster: + name: "{{ ecs_cluster_name }}" + state: present + purge_capacity_providers: True + capacity_providers: + - FARGATE + - FARGATE_SPOT + capacity_provider_strategy: + - capacity_provider: FARGATE + base: 1 + weight: 1 + - capacity_provider: FARGATE_SPOT + weight: 100 + register: ecs_cluster_update + + - name: check that ecs_cluster was correctly updated + assert: + that: + - ecs_cluster_update.changed + - ecs_cluster_update.cluster is defined + - ecs_cluster_update.cluster.capacityProviders is defined + - "'FARGATE' in ecs_cluster_update.cluster.capacityProviders" + - name: create a VPC to work in ec2_vpc_net: cidr_block: 10.0.0.0/16
Add AWS ECS Capacity Provider Strategy Support ### Summary Migrated from https://github.com/ansible/ansible/issues/67997 (it doesn't seem to have made it over automatically) Add support for AWS ECS Cluster Capacity Provider Strategy configuration. Additional note, I noticed this because I was creating a cluster to use with the GitLab CI Fargate driver and started getting `The platform version must be null when specifying an EC2 launch type.` when trying to launch a job. It worked with a manually created Cluster and Task Definition, so I looked closely and found the difference was the manually created cluster had two Capacity Providers and the Ansible created one had none, nor can you manually add them. It's clearly something the AWS UI takes care of, which you can do with the API (see additional info) but this module currently does not support. It means you can't really use it to set up a Fargate cluster at all. ### Issue Type Feature Idea ### Component Name ecs_cluster ### Additional Information Enable configuration of ECS cluster capacity providers and strategies thereof. ```yaml ecs_cluster: ... capacity_providers: - "FARGATE" - "FARGATE_SPOT" capacity_provider_strategy: - capacity_provider: "FARGATE" base: 1 weight: 1 - capacity_provider: "FARGATE_SPOT" weight: 100 ``` hashicorp/terraform-provider-aws#11150 https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-capacity-providers.html https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.put_cluster_capacity_providers ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/ecs_cluster.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/ecs_cluster.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @Java1Guy @jillr @markuman @s-hertel @tremble @wimnat [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> > It means you can't really use it to set up a Fargate cluster at all. hm I'm not sure about it. At work we create some fargate ecs clusters just with ```yml - name: create ecs cluster ecs_cluster: name: serverless-housekeeping state: present ``` and we can run ecs taskdefinitions with `launch_type: FARGATE` without any problems in that cluster. ```yml - name: letsencrypt taskdefinition ecs_taskdefinition: family: letsencrypt cpu: "256" memory: "512" state: present network_mode: awsvpc launch_type: FARGATE execution_role_arn: "arn:aws:iam::{{ caller_facts.account }}:role/ecsTaskExecutionRole" task_role_arn: "arn:aws:iam::{{ caller_facts.account }}:role/letsencryptECSTask" region: eu-central-1 containers: - name: letsencrypt environment: - name: KMS value: "{{ kms.ssm }}" essential: true image: "{{ caller_facts.account }}.dkr.ecr.eu-central-1.amazonaws.com/letsencrypt:latest" logConfiguration: logDriver: awslogs options: awslogs-group: /ecs/letsencrypt awslogs-region: eu-central-1 awslogs-stream-prefix: ecs register: letsTD ``` >> It means you can't really use it to set up a Fargate cluster at all. > hm I'm not sure about it. ...which does not mean that the parameters should not be supported by `community.aws.ecs_cluster` I think its not that hard to implement. Hi @markuman, thanks for the reply. That's interesting, I wonder what I'm doing wrong then? It's OT for the issue but for some reason my Ansible-created cluster can't launch Fargate task definitions but my manually created one *can* ... and I can't see any other difference. I'll keep digging though, if it works for you then at least I know it's possible! :-) @gregharvey ```yml --- - hosts: localhost connection: local tags: - example vars: region: eu-central-1 subnet: subnet-d8309db2 security_group: sg-f32f0196 ecs_trusted_relationship: | { "Version": "2012-10-17", "Statement": [ { "Sid": "", "Effect": "Allow", "Principal": { "Service": "ecs-tasks.amazonaws.com" }, "Action": "sts:AssumeRole" } ] } tasks: - name: Get the current caller identity facts aws_caller_info: register: caller_facts - name: create ecsTaskExecution role iam_role: name: ecsTaskExecutionRole description: ecsTaskExecutionRole with to many permissions state: present purge_policies: yes managed_policy: - arn:aws:iam::aws:policy/CloudWatchLogsFullAccess - arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role - arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole - arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceEventsRole - arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly assume_role_policy_document: "{{ ecs_trusted_relationship }}" - name: create ecs cluster ecs_cluster: name: sometcluster state: present region: "{{ region }}" - name: create cloudwatch log group cloudwatchlogs_log_group: log_group_name: /ecs/fargate-test retention: 1 region: "{{ region }}" - name: some fargate task definition ecs_taskdefinition: family: something cpu: "256" memory: "512" state: present network_mode: awsvpc launch_type: FARGATE execution_role_arn: ecsTaskExecutionRole task_role_arn: ecsTaskExecutionRole region: "{{ region }}" containers: - name: something command: - uptime essential: true image: "alpine:latest" logConfiguration: logDriver: awslogs options: awslogs-group: /ecs/fargate-test awslogs-region: "{{ region }}" awslogs-stream-prefix: ecs register: td_output - name: Run task community.aws.ecs_task: operation: run cluster: sometcluster task_definition: something count: 1 started_by: ansible_user launch_type: FARGATE network_configuration: subnets: - "{{ subnet }}" security_groups: - "{{ security_group }}" register: task_output - debug: var: task_output - hosts: localhost connection: local tags: - cleanup vars: region: eu-central-1 subnet: subnet-d8309db2 security_group: sg-f32f0196 ecs_trusted_relationship: | { "Version": "2012-10-17", "Statement": [ { "Sid": "", "Effect": "Allow", "Principal": { "Service": "ecs-tasks.amazonaws.com" }, "Action": "sts:AssumeRole" } ] } tasks: - name: remove iam role iam_role: name: ecsTaskExecutionRole description: ecsTaskExecutionRole with to many permissions state: absent purge_policies: yes managed_policy: - arn:aws:iam::aws:policy/CloudWatchLogsFullAccess - arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role - arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole - arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceEventsRole - arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly assume_role_policy_document: "{{ ecs_trusted_relationship }}" - name: remove ecs cluster ecs_cluster: name: sometcluster state: absent region: "{{ region }}" - name: remove cloudwatch log group cloudwatchlogs_log_group: log_group_name: /ecs/fargate-test retention: 1 region: "{{ region }}" state: absent ``` adjust just the vars `AWS_PROFILE=yourprofile ansible-playbook 770.yml --tags example` and `AWS_PROFILE=yourprofile ansible-playbook 770.yml --tags cleanup` to remove the resources. just the image is failing to pull (_no idea atm_) ``` Stopped reason CannotPullContainerError: inspect image has been retried 5 time(s): failed to resolve ref "docker.io/library/alpine:latest": failed to do request: Head https://registry-1.docker.io/v2/library/alpine/manifests/latest: dial tcp 52.204.76.244:443: i/o tim... ``` but at least, it works without any issue to run fargate container in a cluster made by `ecs_cluster` module. Thank you so much, I'll give it a go! :+1: Just to follow up here, in case someone has a similar problem. The code above works perfectly, so indeed you clearly *can* create a cluster and run a task. However, my GitLab `fargate` custom executor still wasn't working. I reviewed the docs to try and understand what's different, and for reasons I don't know there's steps 7 and 8 here to add a default capacity provider strategy: * https://docs.gitlab.com/runner/configuration/runner_autoscale_aws_fargate/#step-5-create-an-ecs-fargate-cluster Without that step it doesn't work. I presume the driver does not set the `launch_type` when it runs a task, and that it defaults to `EC2` if you don't either specify `FARGATE` when you launch the task *or* tell your cluster to favour `FARGATE`. This is really a bug in the `fargate` driver for GitLab Runner, in fairness, but I could work around it if Ansible let me set that default capacity provider strategy. So it would be handy. :-)
2023-01-05T19:05:44
ansible-collections/community.aws
1,647
ansible-collections__community.aws-1647
[ "1624" ]
3fa111542fd518b0bfd763318333293ea059c3f1
diff --git a/plugins/modules/s3_lifecycle.py b/plugins/modules/s3_lifecycle.py --- a/plugins/modules/s3_lifecycle.py +++ b/plugins/modules/s3_lifecycle.py @@ -466,38 +466,40 @@ def create_lifecycle_rule(client, module): (changed, lifecycle_configuration) = compare_and_update_configuration(client, module, old_lifecycle_rules, new_rule) - - # Write lifecycle to bucket - try: - client.put_bucket_lifecycle_configuration( - aws_retry=True, - Bucket=name, - LifecycleConfiguration=lifecycle_configuration) - except is_boto3_error_message('At least one action needs to be specified in a rule'): - # Amazon interpretted this as not changing anything - changed = False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules) - - _changed = changed - _retries = 10 - _not_changed_cnt = 6 - while wait and _changed and _retries and _not_changed_cnt: - # We've seen examples where get_bucket_lifecycle_configuration returns - # the updated rules, then the old rules, then the updated rules again and - # again couple of times. - # Thus try to read the rule few times in a row to check if it has changed. - time.sleep(5) - _retries -= 1 - new_rules = fetch_rules(client, module, name) - (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, - new_rules, - new_rule) - if not _changed: - _not_changed_cnt -= 1 - _changed = True - else: - _not_changed_cnt = 6 + if changed: + # Write lifecycle to bucket + try: + client.put_bucket_lifecycle_configuration( + aws_retry=True, + Bucket=name, + LifecycleConfiguration=lifecycle_configuration) + except is_boto3_error_message('At least one action needs to be specified in a rule'): + # Amazon interpretted this as not changing anything + changed = False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules) + + _changed = changed + _retries = 10 + _not_changed_cnt = 6 + while wait and _changed and _retries and _not_changed_cnt: + # We've seen examples where get_bucket_lifecycle_configuration returns + # the updated rules, then the old rules, then the updated rules again and + # again couple of times. + # Thus try to read the rule few times in a row to check if it has changed. + time.sleep(5) + _retries -= 1 + new_rules = fetch_rules(client, module, name) + (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, + new_rules, + new_rule) + if not _changed: + _not_changed_cnt -= 1 + _changed = True + else: + _not_changed_cnt = 6 + else: + _retries = 0 new_rules = fetch_rules(client, module, name) @@ -520,36 +522,39 @@ def destroy_lifecycle_rule(client, module): current_lifecycle_rules = fetch_rules(client, module, name) changed, lifecycle_obj = compare_and_remove_rule(current_lifecycle_rules, rule_id, prefix) - # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration - try: - if lifecycle_obj['Rules']: - client.put_bucket_lifecycle_configuration( - aws_retry=True, - Bucket=name, - LifecycleConfiguration=lifecycle_obj) - elif current_lifecycle_rules: - changed = True - client.delete_bucket_lifecycle(aws_retry=True, Bucket=name) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - - _changed = changed - _retries = 10 - _not_changed_cnt = 6 - while wait and _changed and _retries and _not_changed_cnt: - # We've seen examples where get_bucket_lifecycle_configuration returns - # the updated rules, then the old rules, then the updated rules again and - # again couple of times. - # Thus try to read the rule few times in a row to check if it has changed. - time.sleep(5) - _retries -= 1 - new_rules = fetch_rules(client, module, name) - (_changed, lifecycle_configuration) = compare_and_remove_rule(new_rules, rule_id, prefix) - if not _changed: - _not_changed_cnt -= 1 - _changed = True - else: - _not_changed_cnt = 6 + if changed: + # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration + try: + if lifecycle_obj['Rules']: + client.put_bucket_lifecycle_configuration( + aws_retry=True, + Bucket=name, + LifecycleConfiguration=lifecycle_obj) + elif current_lifecycle_rules: + changed = True + client.delete_bucket_lifecycle(aws_retry=True, Bucket=name) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) + + _changed = changed + _retries = 10 + _not_changed_cnt = 6 + while wait and _changed and _retries and _not_changed_cnt: + # We've seen examples where get_bucket_lifecycle_configuration returns + # the updated rules, then the old rules, then the updated rules again and + # again couple of times. + # Thus try to read the rule few times in a row to check if it has changed. + time.sleep(5) + _retries -= 1 + new_rules = fetch_rules(client, module, name) + (_changed, lifecycle_configuration) = compare_and_remove_rule(new_rules, rule_id, prefix) + if not _changed: + _not_changed_cnt -= 1 + _changed = True + else: + _not_changed_cnt = 6 + else: + _retries = 0 new_rules = fetch_rules(client, module, name)
s3_lifecycle is not idempotent - does write action for no change ### Summary When `s3_lifecycle` is run and there are no changes to make, it still calls [`put_bucket_lifecycle_configuration`](https://github.com/ansible-collections/community.aws/blob/bdb7c9f26f6ff39654cd90e2dd18605a6e3b026c/plugins/modules/s3_lifecycle.py#L473). My use case is that I am running a playbook multiple times concurrently, for a lifecycle configuration which is not changing. And I'm getting errors because of concurrency clashes. If I'm not changing the lifecycle, I expect only read-only calls to S3, which shouldn't clash. This module should get the existing lifecycle config, compare it to what we want, and only if it differs, put the new lifecycle. ### Issue Type Bug Report ### Component Name s3_lifecycle ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.13.6] config file = /home/ec2-user/.ansible.cfg configured module search path = ['/home/ec2-user/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/ec2-user/.pyenv/versions/3.8.11/lib/python3.8/site-packages/ansible ansible collection location = /home/ec2-user/.ansible/collections:/usr/share/ansible/collections executable location = /home/ec2-user/.pyenv/versions/3.8.11/bin/ansible python version = 3.8.11 (default, Sep 7 2022, 04:17:12) [GCC 7.3.1 20180712 (Red Hat 7.3.1-15)] jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) # /home/ec2-user/.pyenv/versions/3.8.11/lib/python3.8/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 3.5.0 ansible.netcommon 3.1.3 ansible.posix 1.4.0 ansible.utils 2.7.0 ansible.windows 1.12.0 arista.eos 5.0.1 awx.awx 21.8.0 azure.azcollection 1.14.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.3.1 cisco.aci 2.3.0 cisco.asa 3.1.0 cisco.dnac 6.6.0 cisco.intersight 1.0.20 cisco.ios 3.3.2 cisco.iosxr 3.3.1 cisco.ise 2.5.8 cisco.meraki 2.11.0 cisco.mso 2.1.0 cisco.nso 1.0.3 cisco.nxos 3.2.0 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.2 community.aws 3.6.0 community.azure 1.1.0 community.ciscosmb 1.0.5 community.crypto 2.8.1 community.digitalocean 1.22.0 community.dns 2.4.0 community.docker 2.7.1 community.fortios 1.0.0 community.general 5.8.0 community.google 1.0.0 community.grafana 1.5.3 community.hashi_vault 3.4.0 community.hrobot 1.6.0 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 4.0.1 community.okd 2.2.0 community.postgresql 2.3.0 community.proxysql 1.4.0 community.rabbitmq 1.2.3 community.routeros 2.3.1 community.sap 1.0.0 community.sap_libs 1.3.0 community.skydive 1.0.0 community.sops 1.4.1 community.vmware 2.10.1 community.windows 1.11.1 community.zabbix 1.8.0 containers.podman 1.9.4 cyberark.conjur 1.2.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 1.1.2 dellemc.openmanage 5.5.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.20.0 fortinet.fortimanager 2.1.6 fortinet.fortios 2.1.7 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.8.2 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.10.0 infinidat.infinibox 1.3.7 infoblox.nios_modules 1.4.0 inspur.ispim 1.2.0 inspur.sm 2.3.0 junipernetworks.junos 3.1.0 kubernetes.core 2.3.2 lowlydba.sqlserver 1.0.4 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.21.0 netapp.elementsw 21.7.0 netapp.ontap 21.24.1 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.1 netbox.netbox 3.8.1 ngine_io.cloudstack 2.2.4 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.3.1 purestorage.flasharray 1.14.0 purestorage.flashblade 1.10.0 purestorage.fusion 1.1.1 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 2.1.0 t_systems_mms.icinga_director 1.31.4 theforeman.foreman 3.7.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.3.0 vyos.vyos 3.0.1 wti.remote 1.0.4 # /home/ec2-user/.ansible/collections/ansible_collections Collection Version ----------------- ------- amazon.aws 5.1.0 ansible.netcommon 4.1.0 ansible.utils 2.8.0 community.aws 5.0.0 community.crypto 2.9.0 community.general 6.0.1 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ANSIBLE_PIPELINING(/home/ec2-user/.ansible.cfg) = True DEFAULT_LOCAL_TMP(/home/ec2-user/.ansible.cfg) = /dev/shm/ansible/tmp_local/ansible-local-24375r2_prrsj DEFAULT_STDOUT_CALLBACK(/home/ec2-user/.ansible.cfg) = yaml INTERPRETER_PYTHON(/home/ec2-user/.ansible.cfg) = /usr/bin/python3 ``` ### OS / Environment Amazon Linux 2 ### Steps to Reproduce `playbook.yaml` ``` --- - hosts: myhosts connection: local gather_facts: no vars: bucket: mybucket region: ap-southeast-2 rule_name: "my_rule" tasks: - name: create bucket run_once: true s3_bucket: state: present region: "{{ region }}" name: "{{ bucket }}" encryption: "AES256" tags: person: matt delete_after: "21/12/2022" - name: Add lifecycle config once run_once: true community.aws.s3_lifecycle: rule_id: "{{ rule_name }}" name: "{{ bucket }}" noncurrent_version_storage_class: standard_ia noncurrent_version_transition_days: 30 # minimum state: present status: enabled region: "{{ region }}" wait: True - name: Add lifecycle config many times run_once: False community.aws.s3_lifecycle: rule_id: "{{ rule_name }}" name: "{{ bucket }}" noncurrent_version_storage_class: standard_ia noncurrent_version_transition_days: 30 # minimum state: present status: enabled region: "{{ region }}" wait: True ``` `hosts.yaml` ``` myhosts: hosts: a: {} b: {} c: {} d: {} e: {} f: {} g: {} h: {} i: {} j: {} k: {} ``` Run with: ``` ansible-playbook playbook.yaml -i hosts.yaml -e ansible_python_interpreter=$(which python3) ``` ### Expected Results By the time we get to the last task, the bucket already has the lifecycle config we want. So the last tasks should also report success (no change), without throwing any errors. boto3 should only be used for read-only calls. No put call should be made by Ansible. ### Actual Results ``` PLAY [myhosts] ***************************************************************************************** TASK [create bucket] *********************************************************************************** changed: [a] TASK [Add lifecycle config once] *********************************************************************** changed: [a] TASK [Add lifecycle config many times] ***************************************************************** An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again. fatal: [c]: FAILED! => changed=false boto3_version: 1.24.82 botocore_version: 1.27.82 error: code: OperationAborted message: A conflicting conditional operation is currently in progress against this resource. Please try again. lifecycle_configuration: Rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled msg: 'An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again.' name: mybucket old_lifecycle_rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled response_metadata: host_id: Lf1tcMXZfFFbYqA4HnEu/Dbii3iAFeMpWzkN2GJ9RN/7H/KiqSYCqvQZWKrYVCEQ3/oiuNJtuyeW3qbWsTuPBg== http_headers: content-length: '308' content-type: application/xml date: Wed, 21 Dec 2022 06:33:34 GMT server: AmazonS3 x-amz-id-2: Lf1tcMXZfFFbYqA4HnEu/Dbii3iAFeMpWzkN2GJ9RN/7H/KiqSYCqvQZWKrYVCEQ3/oiuNJtuyeW3qbWsTuPBg== x-amz-request-id: X05KXWBXVB1FKJAY http_status_code: 409 request_id: X05KXWBXVB1FKJAY retry_attempts: 0 An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again. fatal: [e]: FAILED! => changed=false boto3_version: 1.24.82 botocore_version: 1.27.82 error: code: OperationAborted message: A conflicting conditional operation is currently in progress against this resource. Please try again. lifecycle_configuration: Rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled msg: 'An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again.' name: mybucket old_lifecycle_rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled response_metadata: host_id: 66BDcsa1gA2Sqn+HgKWnb0tst7Pp4KeRulVfOw0k41+El39THSbqbMC5qMuZaP3d8lV/2Od6ik/DBttggxai9g== http_headers: content-length: '308' content-type: application/xml date: Wed, 21 Dec 2022 06:33:34 GMT server: AmazonS3 x-amz-id-2: 66BDcsa1gA2Sqn+HgKWnb0tst7Pp4KeRulVfOw0k41+El39THSbqbMC5qMuZaP3d8lV/2Od6ik/DBttggxai9g== x-amz-request-id: X05ZGKE17DWTPNHA http_status_code: 409 request_id: X05ZGKE17DWTPNHA retry_attempts: 0 ok: [d] An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again. fatal: [a]: FAILED! => changed=false boto3_version: 1.24.82 botocore_version: 1.27.82 error: code: OperationAborted message: A conflicting conditional operation is currently in progress against this resource. Please try again. lifecycle_configuration: Rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled msg: 'An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again.' name: mybucket old_lifecycle_rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled response_metadata: host_id: jtEllYwZAVnS4V98eCIvffmBdiQajEMM6XgKTOrTYZ9wnfBk3C3yFa/QicPRTHmW+ljgLGdKMCqI5ExhvTId1w== http_headers: content-length: '308' content-type: application/xml date: Wed, 21 Dec 2022 06:33:34 GMT server: AmazonS3 x-amz-id-2: jtEllYwZAVnS4V98eCIvffmBdiQajEMM6XgKTOrTYZ9wnfBk3C3yFa/QicPRTHmW+ljgLGdKMCqI5ExhvTId1w== x-amz-request-id: X05Q23R8Y5KEWD4P http_status_code: 409 request_id: X05Q23R8Y5KEWD4P retry_attempts: 0 ok: [b] An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again. fatal: [f]: FAILED! => changed=false boto3_version: 1.24.82 botocore_version: 1.27.82 error: code: OperationAborted message: A conflicting conditional operation is currently in progress against this resource. Please try again. lifecycle_configuration: Rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled msg: 'An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again.' name: mybucket old_lifecycle_rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled response_metadata: host_id: JzIHlPYjCIlIa+o88FYvcEvFBKCQDdo75C0Mdwcr6ZQCHdP2hkEetTKdCqVe0m+fi2RcPMpXwqNN4JBTcoactQ== http_headers: content-length: '308' content-type: application/xml date: Wed, 21 Dec 2022 06:33:35 GMT server: AmazonS3 x-amz-id-2: JzIHlPYjCIlIa+o88FYvcEvFBKCQDdo75C0Mdwcr6ZQCHdP2hkEetTKdCqVe0m+fi2RcPMpXwqNN4JBTcoactQ== x-amz-request-id: 3FV4FJPW4MPJZTW3 http_status_code: 409 request_id: 3FV4FJPW4MPJZTW3 retry_attempts: 0 An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again. ... PLAY RECAP ********************************************************************************************* a : ok=2 changed=2 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 b : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 c : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 d : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 e : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 f : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 g : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 h : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 i : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 j : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 k : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 ``` i.e. some reported success, with no change. Others threw an error. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/s3_lifecycle.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/s3_lifecycle.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @markuman @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> @mdavis-xyz Can you debug e.g. with `q` and see what the value of the `changed` variable is, two lines above? https://github.com/ansible-collections/community.aws/blob/bdb7c9f26f6ff39654cd90e2dd18605a6e3b026c/plugins/modules/s3_lifecycle.py#L467 ```py import q q(changed) ``` and `cat /tmp/q` after it is executed. I've never heard of the `q` library before. Is this how you're supposed to debug Ansible modules? I've always struggled to debug code changes I've written, because even print statements don't work. We should add this to the contribution docs for this collection, and Ansible in general. Just to be clear, regardless of what `compare_and_update_configuration` does, `put_bucket_lifecycle_configuration` will always be called. https://github.com/ansible-collections/community.aws/blob/bdb7c9f26f6ff39654cd90e2dd18605a6e3b026c/plugins/modules/s3_lifecycle.py#L467-L476 There's really two issues here. 1. the module calls put when it shouldn't 2. the module reports changed=False after calling put For the second one, we can fix that with `changed |= True` in an `else` after that `try`. For the first one, perhaps everything after `compare_and_update_configuration` is called should be inside an `if changed`? I'll try the `q` thing later today. It will take me a while because figuring out how to run a clone of a module, without polluting my already-installed modules is not something that I find obvious nor easy. Ok I couldn't figure out how to run a playbook using a local clone of the module, without messing with my real global installation. (Are there docs for that somewhere? As an Ansible user I never need to touch galaxy or anything like that, because I only use the standard pre-installed collections.) So I just created a whole new VM to test in, and modified the file in the globally installed collection. `/tmp/q` is: ``` 0.3s create_lifecycle_rule: True 1.0s create_lifecycle_rule: False 1.4s create_lifecycle_rule: False 1.1s create_lifecycle_rule: False 1.2s create_lifecycle_rule: False 1.1s create_lifecycle_rule: False 1.0s create_lifecycle_rule: False 1.2s create_lifecycle_rule: False 1.1s create_lifecycle_rule: False 1.1s create_lifecycle_rule: False 0.7s create_lifecycle_rule: False 0.4s create_lifecycle_rule: False ``` So it was True the first time, as expected, and False the remainder, as expected. I tried wrapping up the put and try inside an `if` statement. That worked as expected. Now the MWE passes. (Not sure how to handle `_retries`) ``` (changed, lifecycle_configuration) = compare_and_update_configuration(client, module, old_lifecycle_rules, new_rule) if changed: # Write lifecycle to bucket try: client.put_bucket_lifecycle_configuration( aws_retry=True, Bucket=name, LifecycleConfiguration=lifecycle_configuration) except is_boto3_error_message('At least one action needs to be specified in a rule'): # Amazon interpretted this as not changing anything changed = False except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules) _changed = changed _retries = 10 while wait and _changed and _retries: # We've seen examples where get_bucket_lifecycle_configuration returns # the updated rules, then the old rules, then the updated rules again, time.sleep(5) _retries -= 1 new_rules = fetch_rules(client, module, name) (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, new_rules, new_rule) else: _retries=0 new_rules = fetch_rules(client, module, name) module.exit_json(changed=changed, new_rule=new_rule, rules=new_rules, old_rules=old_lifecycle_rules, _retries=_retries, _config=lifecycle_configuration) ``` What's the best way to add a unit/integration test for this? My MWE uses multiple hosts. Is that easy to do with the existing test setup? Or is there a way to run with a `loop` concurrently on one host? > Ok I couldn't figure out how to run a playbook using a local clone of the module, without messing with my real global installation. (Are there docs for that somewhere?) Yeah, basically you can also place hacky/patched modules in your roles/playbook directory in the `library` folder. The only thing you must change than is to call `s3_lifecycle:` instead of `community.aws.s3_lifecycle:` See https://docs.ansible.com/ansible/2.8/user_guide/playbooks_best_practices.html#directory-layout > library/ # if any custom modules, put them here (optional) > What's the best way to add a unit/integration test for this? My MWE uses multiple hosts. Is that easy to do with the existing test setup? Or is there a way to run with a loop concurrently on one host? Maybe @goneri or @tremble got an idea about testing. Note for testing: my MWE only was for a non-empty list of rules. The same change applies for removing rules. In my PR I wrote the same change twice. For an integration test we may want to duplicate the last 2 tasks in the MWE to change present to absent, to test that second change.
2023-01-11T13:00:35
ansible-collections/community.aws
1,649
ansible-collections__community.aws-1649
[ "1624" ]
6e2adbfb567b79e30cb921c4e10d7e2790fd30cc
diff --git a/plugins/modules/s3_lifecycle.py b/plugins/modules/s3_lifecycle.py --- a/plugins/modules/s3_lifecycle.py +++ b/plugins/modules/s3_lifecycle.py @@ -467,38 +467,40 @@ def create_lifecycle_rule(client, module): (changed, lifecycle_configuration) = compare_and_update_configuration(client, module, old_lifecycle_rules, new_rule) - - # Write lifecycle to bucket - try: - client.put_bucket_lifecycle_configuration( - aws_retry=True, - Bucket=name, - LifecycleConfiguration=lifecycle_configuration) - except is_boto3_error_message('At least one action needs to be specified in a rule'): - # Amazon interpretted this as not changing anything - changed = False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules) - - _changed = changed - _retries = 10 - _not_changed_cnt = 6 - while wait and _changed and _retries and _not_changed_cnt: - # We've seen examples where get_bucket_lifecycle_configuration returns - # the updated rules, then the old rules, then the updated rules again and - # again couple of times. - # Thus try to read the rule few times in a row to check if it has changed. - time.sleep(5) - _retries -= 1 - new_rules = fetch_rules(client, module, name) - (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, - new_rules, - new_rule) - if not _changed: - _not_changed_cnt -= 1 - _changed = True - else: - _not_changed_cnt = 6 + if changed: + # Write lifecycle to bucket + try: + client.put_bucket_lifecycle_configuration( + aws_retry=True, + Bucket=name, + LifecycleConfiguration=lifecycle_configuration) + except is_boto3_error_message('At least one action needs to be specified in a rule'): + # Amazon interpretted this as not changing anything + changed = False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules) + + _changed = changed + _retries = 10 + _not_changed_cnt = 6 + while wait and _changed and _retries and _not_changed_cnt: + # We've seen examples where get_bucket_lifecycle_configuration returns + # the updated rules, then the old rules, then the updated rules again and + # again couple of times. + # Thus try to read the rule few times in a row to check if it has changed. + time.sleep(5) + _retries -= 1 + new_rules = fetch_rules(client, module, name) + (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, + new_rules, + new_rule) + if not _changed: + _not_changed_cnt -= 1 + _changed = True + else: + _not_changed_cnt = 6 + else: + _retries = 0 new_rules = fetch_rules(client, module, name) @@ -521,36 +523,39 @@ def destroy_lifecycle_rule(client, module): current_lifecycle_rules = fetch_rules(client, module, name) changed, lifecycle_obj = compare_and_remove_rule(current_lifecycle_rules, rule_id, prefix) - # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration - try: - if lifecycle_obj['Rules']: - client.put_bucket_lifecycle_configuration( - aws_retry=True, - Bucket=name, - LifecycleConfiguration=lifecycle_obj) - elif current_lifecycle_rules: - changed = True - client.delete_bucket_lifecycle(aws_retry=True, Bucket=name) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e) - - _changed = changed - _retries = 10 - _not_changed_cnt = 6 - while wait and _changed and _retries and _not_changed_cnt: - # We've seen examples where get_bucket_lifecycle_configuration returns - # the updated rules, then the old rules, then the updated rules again and - # again couple of times. - # Thus try to read the rule few times in a row to check if it has changed. - time.sleep(5) - _retries -= 1 - new_rules = fetch_rules(client, module, name) - (_changed, lifecycle_configuration) = compare_and_remove_rule(new_rules, rule_id, prefix) - if not _changed: - _not_changed_cnt -= 1 - _changed = True - else: - _not_changed_cnt = 6 + if changed: + # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration + try: + if lifecycle_obj['Rules']: + client.put_bucket_lifecycle_configuration( + aws_retry=True, + Bucket=name, + LifecycleConfiguration=lifecycle_obj) + elif current_lifecycle_rules: + changed = True + client.delete_bucket_lifecycle(aws_retry=True, Bucket=name) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) + + _changed = changed + _retries = 10 + _not_changed_cnt = 6 + while wait and _changed and _retries and _not_changed_cnt: + # We've seen examples where get_bucket_lifecycle_configuration returns + # the updated rules, then the old rules, then the updated rules again and + # again couple of times. + # Thus try to read the rule few times in a row to check if it has changed. + time.sleep(5) + _retries -= 1 + new_rules = fetch_rules(client, module, name) + (_changed, lifecycle_configuration) = compare_and_remove_rule(new_rules, rule_id, prefix) + if not _changed: + _not_changed_cnt -= 1 + _changed = True + else: + _not_changed_cnt = 6 + else: + _retries = 0 new_rules = fetch_rules(client, module, name)
s3_lifecycle is not idempotent - does write action for no change ### Summary When `s3_lifecycle` is run and there are no changes to make, it still calls [`put_bucket_lifecycle_configuration`](https://github.com/ansible-collections/community.aws/blob/bdb7c9f26f6ff39654cd90e2dd18605a6e3b026c/plugins/modules/s3_lifecycle.py#L473). My use case is that I am running a playbook multiple times concurrently, for a lifecycle configuration which is not changing. And I'm getting errors because of concurrency clashes. If I'm not changing the lifecycle, I expect only read-only calls to S3, which shouldn't clash. This module should get the existing lifecycle config, compare it to what we want, and only if it differs, put the new lifecycle. ### Issue Type Bug Report ### Component Name s3_lifecycle ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.13.6] config file = /home/ec2-user/.ansible.cfg configured module search path = ['/home/ec2-user/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/ec2-user/.pyenv/versions/3.8.11/lib/python3.8/site-packages/ansible ansible collection location = /home/ec2-user/.ansible/collections:/usr/share/ansible/collections executable location = /home/ec2-user/.pyenv/versions/3.8.11/bin/ansible python version = 3.8.11 (default, Sep 7 2022, 04:17:12) [GCC 7.3.1 20180712 (Red Hat 7.3.1-15)] jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) # /home/ec2-user/.pyenv/versions/3.8.11/lib/python3.8/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 3.5.0 ansible.netcommon 3.1.3 ansible.posix 1.4.0 ansible.utils 2.7.0 ansible.windows 1.12.0 arista.eos 5.0.1 awx.awx 21.8.0 azure.azcollection 1.14.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.3.1 cisco.aci 2.3.0 cisco.asa 3.1.0 cisco.dnac 6.6.0 cisco.intersight 1.0.20 cisco.ios 3.3.2 cisco.iosxr 3.3.1 cisco.ise 2.5.8 cisco.meraki 2.11.0 cisco.mso 2.1.0 cisco.nso 1.0.3 cisco.nxos 3.2.0 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.2 community.aws 3.6.0 community.azure 1.1.0 community.ciscosmb 1.0.5 community.crypto 2.8.1 community.digitalocean 1.22.0 community.dns 2.4.0 community.docker 2.7.1 community.fortios 1.0.0 community.general 5.8.0 community.google 1.0.0 community.grafana 1.5.3 community.hashi_vault 3.4.0 community.hrobot 1.6.0 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 4.0.1 community.okd 2.2.0 community.postgresql 2.3.0 community.proxysql 1.4.0 community.rabbitmq 1.2.3 community.routeros 2.3.1 community.sap 1.0.0 community.sap_libs 1.3.0 community.skydive 1.0.0 community.sops 1.4.1 community.vmware 2.10.1 community.windows 1.11.1 community.zabbix 1.8.0 containers.podman 1.9.4 cyberark.conjur 1.2.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 1.1.2 dellemc.openmanage 5.5.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.20.0 fortinet.fortimanager 2.1.6 fortinet.fortios 2.1.7 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.8.2 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.10.0 infinidat.infinibox 1.3.7 infoblox.nios_modules 1.4.0 inspur.ispim 1.2.0 inspur.sm 2.3.0 junipernetworks.junos 3.1.0 kubernetes.core 2.3.2 lowlydba.sqlserver 1.0.4 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.21.0 netapp.elementsw 21.7.0 netapp.ontap 21.24.1 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.1 netbox.netbox 3.8.1 ngine_io.cloudstack 2.2.4 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.3.1 purestorage.flasharray 1.14.0 purestorage.flashblade 1.10.0 purestorage.fusion 1.1.1 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 2.1.0 t_systems_mms.icinga_director 1.31.4 theforeman.foreman 3.7.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.3.0 vyos.vyos 3.0.1 wti.remote 1.0.4 # /home/ec2-user/.ansible/collections/ansible_collections Collection Version ----------------- ------- amazon.aws 5.1.0 ansible.netcommon 4.1.0 ansible.utils 2.8.0 community.aws 5.0.0 community.crypto 2.9.0 community.general 6.0.1 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ANSIBLE_PIPELINING(/home/ec2-user/.ansible.cfg) = True DEFAULT_LOCAL_TMP(/home/ec2-user/.ansible.cfg) = /dev/shm/ansible/tmp_local/ansible-local-24375r2_prrsj DEFAULT_STDOUT_CALLBACK(/home/ec2-user/.ansible.cfg) = yaml INTERPRETER_PYTHON(/home/ec2-user/.ansible.cfg) = /usr/bin/python3 ``` ### OS / Environment Amazon Linux 2 ### Steps to Reproduce `playbook.yaml` ``` --- - hosts: myhosts connection: local gather_facts: no vars: bucket: mybucket region: ap-southeast-2 rule_name: "my_rule" tasks: - name: create bucket run_once: true s3_bucket: state: present region: "{{ region }}" name: "{{ bucket }}" encryption: "AES256" tags: person: matt delete_after: "21/12/2022" - name: Add lifecycle config once run_once: true community.aws.s3_lifecycle: rule_id: "{{ rule_name }}" name: "{{ bucket }}" noncurrent_version_storage_class: standard_ia noncurrent_version_transition_days: 30 # minimum state: present status: enabled region: "{{ region }}" wait: True - name: Add lifecycle config many times run_once: False community.aws.s3_lifecycle: rule_id: "{{ rule_name }}" name: "{{ bucket }}" noncurrent_version_storage_class: standard_ia noncurrent_version_transition_days: 30 # minimum state: present status: enabled region: "{{ region }}" wait: True ``` `hosts.yaml` ``` myhosts: hosts: a: {} b: {} c: {} d: {} e: {} f: {} g: {} h: {} i: {} j: {} k: {} ``` Run with: ``` ansible-playbook playbook.yaml -i hosts.yaml -e ansible_python_interpreter=$(which python3) ``` ### Expected Results By the time we get to the last task, the bucket already has the lifecycle config we want. So the last tasks should also report success (no change), without throwing any errors. boto3 should only be used for read-only calls. No put call should be made by Ansible. ### Actual Results ``` PLAY [myhosts] ***************************************************************************************** TASK [create bucket] *********************************************************************************** changed: [a] TASK [Add lifecycle config once] *********************************************************************** changed: [a] TASK [Add lifecycle config many times] ***************************************************************** An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again. fatal: [c]: FAILED! => changed=false boto3_version: 1.24.82 botocore_version: 1.27.82 error: code: OperationAborted message: A conflicting conditional operation is currently in progress against this resource. Please try again. lifecycle_configuration: Rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled msg: 'An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again.' name: mybucket old_lifecycle_rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled response_metadata: host_id: Lf1tcMXZfFFbYqA4HnEu/Dbii3iAFeMpWzkN2GJ9RN/7H/KiqSYCqvQZWKrYVCEQ3/oiuNJtuyeW3qbWsTuPBg== http_headers: content-length: '308' content-type: application/xml date: Wed, 21 Dec 2022 06:33:34 GMT server: AmazonS3 x-amz-id-2: Lf1tcMXZfFFbYqA4HnEu/Dbii3iAFeMpWzkN2GJ9RN/7H/KiqSYCqvQZWKrYVCEQ3/oiuNJtuyeW3qbWsTuPBg== x-amz-request-id: X05KXWBXVB1FKJAY http_status_code: 409 request_id: X05KXWBXVB1FKJAY retry_attempts: 0 An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again. fatal: [e]: FAILED! => changed=false boto3_version: 1.24.82 botocore_version: 1.27.82 error: code: OperationAborted message: A conflicting conditional operation is currently in progress against this resource. Please try again. lifecycle_configuration: Rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled msg: 'An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again.' name: mybucket old_lifecycle_rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled response_metadata: host_id: 66BDcsa1gA2Sqn+HgKWnb0tst7Pp4KeRulVfOw0k41+El39THSbqbMC5qMuZaP3d8lV/2Od6ik/DBttggxai9g== http_headers: content-length: '308' content-type: application/xml date: Wed, 21 Dec 2022 06:33:34 GMT server: AmazonS3 x-amz-id-2: 66BDcsa1gA2Sqn+HgKWnb0tst7Pp4KeRulVfOw0k41+El39THSbqbMC5qMuZaP3d8lV/2Od6ik/DBttggxai9g== x-amz-request-id: X05ZGKE17DWTPNHA http_status_code: 409 request_id: X05ZGKE17DWTPNHA retry_attempts: 0 ok: [d] An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again. fatal: [a]: FAILED! => changed=false boto3_version: 1.24.82 botocore_version: 1.27.82 error: code: OperationAborted message: A conflicting conditional operation is currently in progress against this resource. Please try again. lifecycle_configuration: Rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled msg: 'An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again.' name: mybucket old_lifecycle_rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled response_metadata: host_id: jtEllYwZAVnS4V98eCIvffmBdiQajEMM6XgKTOrTYZ9wnfBk3C3yFa/QicPRTHmW+ljgLGdKMCqI5ExhvTId1w== http_headers: content-length: '308' content-type: application/xml date: Wed, 21 Dec 2022 06:33:34 GMT server: AmazonS3 x-amz-id-2: jtEllYwZAVnS4V98eCIvffmBdiQajEMM6XgKTOrTYZ9wnfBk3C3yFa/QicPRTHmW+ljgLGdKMCqI5ExhvTId1w== x-amz-request-id: X05Q23R8Y5KEWD4P http_status_code: 409 request_id: X05Q23R8Y5KEWD4P retry_attempts: 0 ok: [b] An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again. fatal: [f]: FAILED! => changed=false boto3_version: 1.24.82 botocore_version: 1.27.82 error: code: OperationAborted message: A conflicting conditional operation is currently in progress against this resource. Please try again. lifecycle_configuration: Rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled msg: 'An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again.' name: mybucket old_lifecycle_rules: - Filter: Prefix: '' ID: my_rule NoncurrentVersionTransitions: - NoncurrentDays: 30 StorageClass: STANDARD_IA Status: Enabled response_metadata: host_id: JzIHlPYjCIlIa+o88FYvcEvFBKCQDdo75C0Mdwcr6ZQCHdP2hkEetTKdCqVe0m+fi2RcPMpXwqNN4JBTcoactQ== http_headers: content-length: '308' content-type: application/xml date: Wed, 21 Dec 2022 06:33:35 GMT server: AmazonS3 x-amz-id-2: JzIHlPYjCIlIa+o88FYvcEvFBKCQDdo75C0Mdwcr6ZQCHdP2hkEetTKdCqVe0m+fi2RcPMpXwqNN4JBTcoactQ== x-amz-request-id: 3FV4FJPW4MPJZTW3 http_status_code: 409 request_id: 3FV4FJPW4MPJZTW3 retry_attempts: 0 An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (OperationAborted) when calling the PutBucketLifecycleConfiguration operation: A conflicting conditional operation is currently in progress against this resource. Please try again. ... PLAY RECAP ********************************************************************************************* a : ok=2 changed=2 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 b : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 c : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 d : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 e : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 f : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 g : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 h : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 i : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 j : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 k : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 ``` i.e. some reported success, with no change. Others threw an error. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/s3_lifecycle.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/s3_lifecycle.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @markuman @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> @mdavis-xyz Can you debug e.g. with `q` and see what the value of the `changed` variable is, two lines above? https://github.com/ansible-collections/community.aws/blob/bdb7c9f26f6ff39654cd90e2dd18605a6e3b026c/plugins/modules/s3_lifecycle.py#L467 ```py import q q(changed) ``` and `cat /tmp/q` after it is executed. I've never heard of the `q` library before. Is this how you're supposed to debug Ansible modules? I've always struggled to debug code changes I've written, because even print statements don't work. We should add this to the contribution docs for this collection, and Ansible in general. Just to be clear, regardless of what `compare_and_update_configuration` does, `put_bucket_lifecycle_configuration` will always be called. https://github.com/ansible-collections/community.aws/blob/bdb7c9f26f6ff39654cd90e2dd18605a6e3b026c/plugins/modules/s3_lifecycle.py#L467-L476 There's really two issues here. 1. the module calls put when it shouldn't 2. the module reports changed=False after calling put For the second one, we can fix that with `changed |= True` in an `else` after that `try`. For the first one, perhaps everything after `compare_and_update_configuration` is called should be inside an `if changed`? I'll try the `q` thing later today. It will take me a while because figuring out how to run a clone of a module, without polluting my already-installed modules is not something that I find obvious nor easy. Ok I couldn't figure out how to run a playbook using a local clone of the module, without messing with my real global installation. (Are there docs for that somewhere? As an Ansible user I never need to touch galaxy or anything like that, because I only use the standard pre-installed collections.) So I just created a whole new VM to test in, and modified the file in the globally installed collection. `/tmp/q` is: ``` 0.3s create_lifecycle_rule: True 1.0s create_lifecycle_rule: False 1.4s create_lifecycle_rule: False 1.1s create_lifecycle_rule: False 1.2s create_lifecycle_rule: False 1.1s create_lifecycle_rule: False 1.0s create_lifecycle_rule: False 1.2s create_lifecycle_rule: False 1.1s create_lifecycle_rule: False 1.1s create_lifecycle_rule: False 0.7s create_lifecycle_rule: False 0.4s create_lifecycle_rule: False ``` So it was True the first time, as expected, and False the remainder, as expected. I tried wrapping up the put and try inside an `if` statement. That worked as expected. Now the MWE passes. (Not sure how to handle `_retries`) ``` (changed, lifecycle_configuration) = compare_and_update_configuration(client, module, old_lifecycle_rules, new_rule) if changed: # Write lifecycle to bucket try: client.put_bucket_lifecycle_configuration( aws_retry=True, Bucket=name, LifecycleConfiguration=lifecycle_configuration) except is_boto3_error_message('At least one action needs to be specified in a rule'): # Amazon interpretted this as not changing anything changed = False except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules) _changed = changed _retries = 10 while wait and _changed and _retries: # We've seen examples where get_bucket_lifecycle_configuration returns # the updated rules, then the old rules, then the updated rules again, time.sleep(5) _retries -= 1 new_rules = fetch_rules(client, module, name) (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, new_rules, new_rule) else: _retries=0 new_rules = fetch_rules(client, module, name) module.exit_json(changed=changed, new_rule=new_rule, rules=new_rules, old_rules=old_lifecycle_rules, _retries=_retries, _config=lifecycle_configuration) ``` What's the best way to add a unit/integration test for this? My MWE uses multiple hosts. Is that easy to do with the existing test setup? Or is there a way to run with a `loop` concurrently on one host? > Ok I couldn't figure out how to run a playbook using a local clone of the module, without messing with my real global installation. (Are there docs for that somewhere?) Yeah, basically you can also place hacky/patched modules in your roles/playbook directory in the `library` folder. The only thing you must change than is to call `s3_lifecycle:` instead of `community.aws.s3_lifecycle:` See https://docs.ansible.com/ansible/2.8/user_guide/playbooks_best_practices.html#directory-layout > library/ # if any custom modules, put them here (optional) > What's the best way to add a unit/integration test for this? My MWE uses multiple hosts. Is that easy to do with the existing test setup? Or is there a way to run with a loop concurrently on one host? Maybe @goneri or @tremble got an idea about testing. Note for testing: my MWE only was for a non-empty list of rules. The same change applies for removing rules. In my PR I wrote the same change twice. For an integration test we may want to duplicate the last 2 tasks in the MWE to change present to absent, to test that second change.
2023-01-11T13:00:48
ansible-collections/community.aws
1,653
ansible-collections__community.aws-1653
[ "1163" ]
d0c5238416f59cc08aeca7dc05e25e2a516d2b51
diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -447,11 +447,94 @@ def exec_command(self, cmd, in_data=None, sudoable=True): def _prepare_terminal(self): ''' perform any one-time terminal settings ''' + # No windows setup for now + if self.is_windows: + return + + # *_complete variables are 3 valued: + # - None: not started + # - False: started + # - True: complete + + startup_complete = False + disable_echo_complete = None + disable_echo_cmd = to_bytes("stty -echo\n", errors="surrogate_or_strict") + + disable_prompt_complete = None + end_mark = "".join( + [random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH)] + ) + disable_prompt_cmd = to_bytes( + "PS1='' ; printf '\\n%s\\n' '" + end_mark + "'\n", + errors="surrogate_or_strict", + ) + disable_prompt_reply = re.compile( + r"\r\r\n" + re.escape(end_mark) + r"\r\r\n", re.MULTILINE + ) - if not self.is_windows: - cmd = "stty -echo\n" + "PS1=''\n" - cmd = to_bytes(cmd, errors='surrogate_or_strict') - self._session.stdin.write(cmd) + stdout = "" + # Custom command execution for when we're waiting for startup + stop_time = int(round(time.time())) + self.get_option("ssm_timeout") + while (not disable_prompt_complete) and (self._session.poll() is None): + remaining = stop_time - int(round(time.time())) + if remaining < 1: + self._timeout = True + display.vvvv( + "PRE timeout stdout: {0}".format(to_bytes(stdout)), host=self.host + ) + raise AnsibleConnectionFailure( + "SSM start_session timeout on host: %s" % self.instance_id + ) + if self._poll_stdout.poll(1000): + stdout += to_text(self._stdout.read(1024)) + display.vvvv( + "PRE stdout line: {0}".format(to_bytes(stdout)), host=self.host + ) + else: + display.vvvv("PRE remaining: {0}".format(remaining), host=self.host) + + # wait til prompt is ready + if startup_complete is False: + match = str(stdout).find("Starting session with SessionId") + if match != -1: + display.vvvv("PRE startup output received", host=self.host) + startup_complete = True + + # disable echo + if startup_complete and (disable_echo_complete is None): + display.vvvv( + "PRE Disabling Echo: {0}".format(disable_echo_cmd), host=self.host + ) + self._session.stdin.write(disable_echo_cmd) + disable_echo_complete = False + + if disable_echo_complete is False: + match = str(stdout).find("stty -echo") + if match != -1: + disable_echo_complete = True + + # disable prompt + if disable_echo_complete and disable_prompt_complete is None: + display.vvvv( + "PRE Disabling Prompt: {0}".format(disable_prompt_cmd), + host=self.host, + ) + self._session.stdin.write(disable_prompt_cmd) + disable_prompt_complete = False + + if disable_prompt_complete is False: + match = disable_prompt_reply.search(stdout) + if match: + stdout = stdout[match.end():] + disable_prompt_complete = True + + if not disable_prompt_complete: + raise AnsibleConnectionFailure( + "SSM process closed during _prepare_terminal on host: %s" + % self.instance_id + ) + else: + display.vvv("PRE Terminal configured", host=self.host) def _wrap_command(self, cmd, sudoable, mark_start, mark_end): ''' wrap command so stdout and status can be extracted ''' @@ -463,7 +546,11 @@ def _wrap_command(self, cmd, sudoable, mark_start, mark_end): else: if sudoable: cmd = "sudo " + cmd - cmd = "echo " + mark_start + "\n" + cmd + "\necho $'\\n'$?\n" + "echo " + mark_end + "\n" + cmd = ( + f"printf '%s\\n' '{mark_start}';\n" + f"echo | {cmd};\n" + f"printf '\\n%s\\n%s\\n' \"$?\" '{mark_end}';\n" + ) display.vvvv(u"_wrap_command: '{0}'".format(to_text(cmd)), host=self.host) return cmd
Unable to use ansible_connection aws_ssm to configure Ubuntu 22.04 Jammy targets ### Summary Using the aws_ssm connection to provision a Ubuntu 22.04 Jammy target machine fails presumably due to unexpected characters in the output that it receives (see examples below). ### Issue Type Bug Report ### Component Name aws_ssm ### Ansible Version ```console ❱ ~ ansible --version /usr/local/Cellar/ansible/5.7.1/libexec/lib/python3.10/site-packages/paramiko/transport.py:236: CryptographyDeprecationWarning: Blowfish has been deprecated "class": algorithms.Blowfish, ansible [core 2.12.5] config file = None configured module search path = ['/Users/abent/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/Cellar/ansible/5.7.1/libexec/lib/python3.10/site-packages/ansible ansible collection location = /Users/abent/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.10.4 (main, Apr 26 2022, 19:42:59) [Clang 13.1.6 (clang-1316.0.21.2)] jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console ❱ ~ ansible-galaxy collection list # /Users/abent/.ansible/collections/ansible_collections Collection Version ------------- ------- amazon.aws 3.2.0 community.aws 3.2.1 # /usr/local/Cellar/ansible/5.7.1/libexec/lib/python3.10/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 2.2.0 ansible.netcommon 2.6.1 ansible.posix 1.3.0 ansible.utils 2.6.0 ansible.windows 1.9.0 arista.eos 3.1.0 awx.awx 19.4.0 azure.azcollection 1.12.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.2.0 cisco.aci 2.2.0 cisco.asa 2.1.0 cisco.intersight 1.0.18 cisco.ios 2.8.1 cisco.iosxr 2.9.0 cisco.ise 1.2.1 cisco.meraki 2.6.1 cisco.mso 1.4.0 cisco.nso 1.0.3 cisco.nxos 2.9.1 cisco.ucs 1.8.0 cloud.common 2.1.1 cloudscale_ch.cloud 2.2.1 community.aws 2.4.0 community.azure 1.1.0 community.ciscosmb 1.0.4 community.crypto 2.2.4 community.digitalocean 1.16.0 community.dns 2.1.0 community.docker 2.4.0 community.fortios 1.0.0 community.general 4.8.0 community.google 1.0.0 community.grafana 1.4.0 community.hashi_vault 2.4.0 community.hrobot 1.3.0 community.kubernetes 2.0.1 community.kubevirt 1.0.0 community.libvirt 1.0.2 community.mongodb 1.3.3 community.mysql 2.3.5 community.network 3.1.0 community.okd 2.1.0 community.postgresql 1.7.2 community.proxysql 1.3.2 community.rabbitmq 1.1.0 community.routeros 2.0.0 community.sap 1.0.0 community.skydive 1.0.0 community.sops 1.2.1 community.vmware 1.18.0 community.windows 1.9.0 community.zabbix 1.6.0 containers.podman 1.9.3 cyberark.conjur 1.1.0 cyberark.pas 1.0.13 dellemc.enterprise_sonic 1.1.0 dellemc.openmanage 4.4.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.16.0 fortinet.fortimanager 2.1.5 fortinet.fortios 2.1.4 frr.frr 1.0.3 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.6.0 hpe.nimble 1.1.4 ibm.qradar 1.0.3 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.2.1 inspur.sm 1.3.0 junipernetworks.junos 2.10.0 kubernetes.core 2.3.0 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.16.0 netapp.elementsw 21.7.0 netapp.ontap 21.18.1 netapp.storagegrid 21.10.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.0 netbox.netbox 3.7.0 ngine_io.cloudstack 2.2.3 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.1 openstack.cloud 1.8.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 1.6.6 purestorage.flasharray 1.12.1 purestorage.flashblade 1.9.0 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 1.0.2 t_systems_mms.icinga_director 1.29.0 theforeman.foreman 2.2.0 vyos.vyos 2.8.0 wti.remote 1.0.3 ``` ### AWS SDK versions ```console ❱ ~ pip show boto boto3 botocore WARNING: Package(s) not found: boto Name: boto3 Version: 1.23.0 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.10/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.26.0 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.10/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: boto3, s3transfer ``` ### Configuration ```console (paste below) # Inventory (aws_ec2.yaml) plugin: aws_ec2 aws_profile: qa strict: False regions: - us-east-2 # Example Automation (site.yaml) - hosts: all become: true vars: ansible_connection: aws_ssm ansible_python_interpreter: /usr/bin/python3 #Added to overcome python detection failure aws_s3_region: us-east-2 ansible_aws_ssm_profile: qa ansible_aws_ssm_region: us-east-2 ansible_aws_ssm_bucket_name: <redacted bucket name> ansible_aws_ssm_instance_id: <redacted instance id> tasks: - name: Test command ansible.builtin.shell: mkdir /tmp/foo ``` ### OS / Environment Ubuntu 22.04 LTS ### Steps to Reproduce 1. Set SSM Linux shell profile to `/bin/bash` in order to work around incompatibility with `/bin/dash`, which exists for Ubuntu 20.04 LTS. 2. Run the configuration provided above ```yaml ansible-playbook -i aws_ec2.yml site.yml ``` ### Expected Results 1. Python interpreter is detected 2. Empty folder `/tmp/foo` is created ### Actual Results 1. Python version fails to detect - verbose output from Gathering Facts, some IDs redacted with XXXX ``` <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> ESTABLISH SSM CONNECTION TO: i-03bd947822XXXXXXX <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> SSM CONNECTION ID: [email protected] <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC echo ~ <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\r/home/ssm-user\r\r', '') <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC ( umask 77 && mkdir -p "` echo /home/ssm-user/.ansible/tmp `"&& mkdir "` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1653405370.894557-13636-81324261820323 `" && echo ansible-tmp-1653405370.894557-13636-81324261820323="` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1653405370.894557-13636-81324261820323 `" ) <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\ransible-tmp-1653405370.894557-13636-81324261820323=/home/ssm-user/.ansible/tmp/ansible-tmp-1653405370.894557-13636-81324261820323\r\r', '') <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> Attempting python interpreter discovery <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC echo PLATFORM; uname; echo FOUND; command -v 'python3.10'; command -v 'python3.9'; command -v 'python3.8'; command -v 'python3.7'; command -v 'python3.6'; command -v 'python3.5'; command -v '/usr/bin/python3'; command -v '/usr/libexec/platform-python'; command -v 'python2.7'; command -v 'python2.6'; command -v '/usr/bin/python'; command -v 'python'; echo ENDFOUND <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\rPLATFORM\r\r\nLinux\r\r\nFOUND\r\r\n/usr/bin/python3.10\r\r\n/usr/bin/python3\r\r\nENDFOUND\r\r', '') [WARNING]: Unhandled error in Python interpreter discovery for host ec2-3-16-81-57.us-east-2.compute.amazonaws.com: unexpected output from Python interpreter discovery Using module file /usr/local/Cellar/ansible/5.7.1/libexec/lib/python3.10/site-packages/ansible/modules/setup.py <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> PUT /Users/abent/.ansible/tmp/ansible-local-13610fw9y0i6k/tmpnfywx2vw TO /home/ssm-user/.ansible/tmp/ansible-tmp-1653405370.894557-13636-81324261820323/AnsiballZ_setup.py <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC curl 'https://XXXX.s3.amazonaws.com/i-03bd94782282fe77c//home/ssm-user/.ansible/tmp/ansible-tmp-1653405370.894557-13636-81324261820323/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=ASIASE6ULJM5CWCSSYVQ%2F20220524%2Fus-east-2%2Fs3%2Faws4_request&X-Amz-Date=20220524T151612Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Security-Token=IQoJb3JpZ2luX2VjEJD%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEaCXVzLWVhc3QtMSJHMEUCIQDH2GZyL9nZYpBxHBFH3%2FH%2BHEpHGIQhgtbktk%2FGcY0LPAIgJZw9%2BYrZ62Dok15Agfy40vFcfN8zLa%2B9xxhzoj8caMEqiAMIeBADGgwxNDgwODQ4MzcxNzgiDE2enjGjMDfRwEomgirlArjBOtuUi0t0haBKlfdyi0KK3NOaivLGdT9Q8fomo29gHNdR3dCXePMHXPb29D3mjpUYyfbgQg7PIUeyW2OexxFgDWzLm3Td%2BuHFDm%2B6WKM7x8OpRrjnGpdcIXYRBMCRSYlWZS%2Fjcd2AD5DYSo6V4nd9jlRG5th24bIJ48rUtpKX2Jn2HOwrwvJg9CC64sxHgKS%2BZoFcEo1fqUutY3P2aBoJzRGGl%2BrYDjUYew3PQLiqa15LXixtnS66Ppltg5G6Zrm9RFNxxPEccw5r2FdNZxR5CLqZFC7%2BXFLoCSMEQy5J2%2FAlRq2z7k0IL9f7YnSGZgcwCprrHvz3GQVv%2Bp439siS5cez2RMVfTOJTnNCQIDOi0YSFcRqYvuWdeIycNIsZGBEJTOckvXNUuI3ZLxBpjxyI6xTFF3%2FFvc%2BncDMjRUv8YhRaakyD%2FsycwggSjGyfNBTSalnJPGTDKc2kVVTrlbQR%2BIEmDC87bOUBjqmARSckHcf%2BQ27lMTsgWO%2BuxO%2BVjbzx3ZX8VO4RLkQmWvmaEROCOhSWw2vVxKh1IiVAbM7i9Fb%2Br1WF27YK1mfHiMIcZu1fXWYcGmI4nRPjCNSpse7vTi%2FeXEPqKCWyWkjTlpGHCPPrD%2BMWMdZptWECslkk%2BSyu%2BEoaDUVYy%2FCSzlXZi1uxMBGvKaMwldLsvYNYKCnWpWX2Q1M7oN5vbB8NzmYPYGoFyo%3D&X-Amz-Signature=4c0b039a9c2c1a87dbc44bee09bdb18e5a99da85b41593b18f0d2d9bc31ca13b' -o '/home/ssm-user/.ansible/tmp/ansible-tmp-1653405370.894557-13636-81324261820323/AnsiballZ_setup.py' <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\r % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 509 0 509 0 0 4129 0 --:--:-- --:--:-- --:--:-- 4138\r\r', '') <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\r % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 509 0 509 0 0 4129 0 --:--:-- --:--:-- --:--:-- 4138\r\r', '') <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC chmod u+x /home/ssm-user/.ansible/tmp/ansible-tmp-1653405370.894557-13636-81324261820323/ /home/ssm-user/.ansible/tmp/ansible-tmp-1653405370.894557-13636-81324261820323/AnsiballZ_setup.py <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\r\x1b[?2004h\x1b[?2004l\r\r', '') <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC sudo -H -S -n -u root /bin/sh -c 'echo BECOME-SUCCESS-wmigxosgjbdymebxzjuthvcxpronrokr ; /usr/bin/python /home/ssm-user/.ansible/tmp/ansible-tmp-1653405370.894557-13636-81324261820323/AnsiballZ_setup.py' ``` 2. If the python interpreter is manually specified, the `AnsiballZ_setup.py` script hangs waiting for output. However, investigating on the target machine, the script did indeed complete running, and is able to run successfully. ``` <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> ESTABLISH SSM CONNECTION TO: i-03bd947822XXXXXXX <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> SSM CONNECTION ID: [email protected] <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC echo ~ <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\r/home/ssm-user\r\r', '') <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC ( umask 77 && mkdir -p "` echo /home/ssm-user/.ansible/tmp `"&& mkdir "` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1653484883.342571-21201-234108973790497 `" && echo ansible-tmp-1653484883.342571-21201-234108973790497="` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1653484883.342571-21201-234108973790497 `" ) <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\ransible-tmp-1653484883.342571-21201-234108973790497=/home/ssm-user/.ansible/tmp/ansible-tmp-1653484883.342571-21201-234108973790497\r\r', '') Using module file /usr/local/Cellar/ansible/5.7.1/libexec/lib/python3.10/site-packages/ansible/modules/setup.py <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> PUT /Users/abent/.ansible/tmp/ansible-local-2119271p8m00m/tmp1r3pqqr0 TO /home/ssm-user/.ansible/tmp/ansible-tmp-1653484883.342571-21201-234108973790497/AnsiballZ_setup.py <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC curl 'https://imt-ansible-aws-ssm-bucket-qa-us-east-2.s3.amazonaws.com/i-03bd947822XXXXXXX//home/ssm-user/.ansible/tmp/ansible-tmp-1653484883.342571-21201-234108973790497/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=ASIASE6ULJM5NTXR7R5U%2F20220525%2Fus-east-2%2Fs3%2Faws4_request&X-Amz-Date=20220525T132124Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Security-Token=IQoJb3JpZ2luX2VjEKb%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEaCXVzLWVhc3QtMSJHMEUCIQDI6v71%2FadgKOOSpRmoqr8D4YBeGNTeJJYOQNFOZ%2B5O1AIgN%2F4EhrywCxPmlZYQG81P5H4%2FHvLx7KWNQwosNMLREX8qkQMIjv%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FARADGgwxNDgwODQ4MzcxNzgiDJmqMfH5Rm%2BRBoa%2BWSrlAp3NBPulNa255uMjuq2tUJJeDY60qSRvK9%2BA5Zk0MiOsc2eOfezBPVddZu93aaujhy9yQ23sNAVQ%2BrO04IO%2Br2NKqu0WCJhMLtx3OyHLDdqqZy1%2BFuyRkz%2FkgJeDimgrTPQy99B2VTHo3Ax%2F0G%2BXUi8MuZLlKzwp5I2LKoGYp91PIXjnoIjv5zHyVU5nx8c6SjZ5vDFt1avxXqc6jgZeLip8aUn6GM7z1dXpjhKUgXeW8q7oawSWNmD93ko2ceDqn7VzWvw6Va%2Fpy4YdnD2IHCcP0cdVxD5Cx1D2PtQ51PjOsiE6Ltkk4w5Ua7mVxZ%2BOxu6okTjdx56vMj7fPp1FkRgikyxp8UCElNgCmjDd7SOfoFqunrX9HaP23QLC9dTvHtMpviBDsEZqiU%2BIGuxmrNUlvp2R%2FhKKy9VKXKCjzIsMWLUrXuTOiu5wslWkNHieFOtvvcc1iObj1sYmll26vVq5Uj9E3zDU2riUBjqmAYveqZhqsCSRUWCZ3PH3MB8%2BHSzGCLNZ4HE1Foeu9QdOmL6kYCyFAOkm9uevonuVlXbcVshNinKUtWk%2Fam1L2%2BaxpU7uAgYiVOp8Fa3y97I35zcz%2BNDhzIHlf0TF%2F7I%2Bs%2Bclr4HmjuHC8nn%2BnQ8MFGW5OvVtsxdVEacIvP2qSl9uN6DosqGHKvbjFKQnCA43aJnfcMYpTVWQrIsvcd%2FeepD3isr1TdA%3D&X-Amz-Signature=e518065e2f831f3ad4f785825507c029e1423ca11044da4178714f97496ca13e' -o '/home/ssm-user/.ansible/tmp/ansible-tmp-1653484883.342571-21201-234108973790497/AnsiballZ_setup.py' <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\r % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 285k 100 285k 0 0 1885k 0 --:--:-- --:--:-- --:--:-- 1887k\r\r', '') <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\r % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 285k 100 285k 0 0 1885k 0 --:--:-- --:--:-- --:--:-- 1887k\r\r', '') <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC chmod u+x /home/ssm-user/.ansible/tmp/ansible-tmp-1653484883.342571-21201-234108973790497/ /home/ssm-user/.ansible/tmp/ansible-tmp-1653484883.342571-21201-234108973790497/AnsiballZ_setup.py <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\r\x1b[?2004h\x1b[?2004l\r\r', '') <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC sudo -H -S -n -u root /bin/sh -c 'echo BECOME-SUCCESS-sfoqjiryaeqmfzpqvguespjphnvpenos ; /usr/bin/python3 /home/ssm-user/.ansible/tmp/ansible-tmp-1653484883.342571-21201-234108973790497/AnsiballZ_setup.py' ``` I suspect the issue has to do with the strange control characters in the output, but I am unclear as to the source of them. The machine image is a minimal Ubuntu install, and if I switch back to a 20.04 image instead of 22.04, everything works properly. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Hello, I got the same problem, is someone can look to this problem. Thanks I'm running into the same issue. Some debugging points me to the following where it hangs waiting on the self._stdout.readline() on line 421 after receiving the command output: https://github.com/ansible-collections/community.aws/blob/cb9716e14d44357aaadd2be733bbaa0dd8a522bc/plugins/connection/aws_ssm.py#L420-L422 It should run four commands which are wrapped together in the `_wrap_command` function: ``` echo FPJvOAbODNLrUBDNwlzdtqUVqn sudo sudo -H -S -n -u root /bin/sh -c 'echo BECOME-SUCCESS-mjvhieztmhqnhbcfiprxpidwfjxyvqzj ; /usr/bin/python3 /home/ssm-user/.ansible/tmp/ansible-tmp-1661540567.1878705-101559-269564787724829/AnsiballZ_ping.py' echo $'\n'$? echo iXsDFJCFrsWXYtdqfofbGhMbzm ``` Then it provides to output: ``` EXEC stdout line: FPJvOAbODNLrUBDNwlzdtqUVqn EXEC stdout line: EXEC stdout line: {"ping": "pong", "invocation": {"module_args": {"data": "pong"}}} ``` What it should be: ``` EXEC stdout line: FPJvOAbODNLrUBDNwlzdtqUVqn EXEC stdout line: EXEC stdout line: {"ping": "pong", "invocation": {"module_args": {"data": "pong"}}} EXEC stdout line: EXEC stdout line: 0 EXEC stdout line: iXsDFJCFrsWXYtdqfofbGhMbzm POST_PROCESS: {"ping": "pong", "invocation": {"module_args": {"data": "pong"}}} 0 ``` I also have the same issue that I need to provide the Python interpreter, this might be linked to each other because the query result is working: ``` echo PLATFORM; uname; echo FOUND; command -v 'python3.10'; command -v 'python3.9'; command -v 'python3.8'; command -v 'python3.7'; command -v 'python3.6'; command -v 'python3.5'; command -v '/usr/bin/python3'; command -v '/usr/libexec/platform-python'; command -v 'python2.7'; command -v 'python2.6'; command -v '/usr/bin/python'; command -v 'python'; echo ENDFOUND POST_PROCESS: PLATFORM Linux FOUND /usr/bin/python3.10 /usr/bin/python3 ENDFOUND ``` Unfortunately my Python knowledge isn't enough to further debug and find a solution. Experiencing the exact same issue. Is there any workaround available? Experiencing the same issue on amazonlinux ```<i-0f7a67efa9c5edae5> ESTABLISH SSM CONNECTION TO: i-0f7a67efa9c5edae5 <i-0f7a67efa9c5edae5> SSM CONNECTION ID: gl_gitlab_traefik-06221e159cef6ebda <i-0f7a67efa9c5edae5> EXEC echo ~ <i-0f7a67efa9c5edae5> (0, '/home/ssm-user\r\r', '') <i-0f7a67efa9c5edae5> EXEC ( umask 77 && mkdir -p "` echo /home/ssm-user/.ansible/tmp `"&& mkdir "` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205 `" && echo ansible-tmp-1 666267435.5919795-7743-117523018265205="` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205 `" ) <i-0f7a67efa9c5edae5> (0, 'ansible-tmp-1666267435.5919795-7743-117523018265205=/home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205\r\r', '') Using module file /usr/lib/python3/dist-packages/ansible/modules/setup.py <i-0f7a67efa9c5edae5> PUT /root/.ansible/tmp/ansible-local-774056h3q49o/tmpj0326k_f TO /home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205/AnsiballZ_setup.py <i-0f7a67efa9c5edae5> EXEC curl 'https://ansible-ssm-gitlab.s3.amazonaws.com/i-0f7a67efa9c5edae5//home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205/AnsiballZ_setup.py?X-Amz-Algorit hm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIATJO4GSHEUW7EIKGG%2F20221020%2Fap-southeast-1%2Fs3%2Faws4_request&X-Amz-Date=20221020T120356Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=bca081bf63d6234 339fd966289a572af4a3eb059068f13174d83718674b68ec8' -o '/home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205/AnsiballZ_setup.py' <i-0f7a67efa9c5edae5> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 472 0 472 0 0 550 0 --:--:-- --:--: -- --:--:-- 550\r\r', '') <i-0f7a67efa9c5edae5> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 472 0 472 0 0 550 0 --:--:-- --:--: -- --:--:-- 550\r\r', '') <i-0f7a67efa9c5edae5> EXEC chmod u+x /home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205/ /home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205/AnsiballZ_s etup.py <i-0f7a67efa9c5edae5> (0, '\r', '') <i-0f7a67efa9c5edae5> EXEC /bin/python3 /home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205/AnsiballZ_setup.py <i-0f7a67efa9c5edae5> (1, ' File "/home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205/AnsiballZ_setup.py", line 1\r\r\n <?xml version="1.0" encoding="UTF-8"?>\r\r\n ^\r\r\nSy ntaxError: invalid syntax\r\r', '') <i-0f7a67efa9c5edae5> EXEC rm -f -r /home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205/ > /dev/null 2>&1 <i-0f7a67efa9c5edae5> (0, '\r', '') <i-0f7a67efa9c5edae5> CLOSING SSM CONNECTION TO: i-0f7a67efa9c5edae5 fatal: [my-instance]: FAILED! => { "ansible_facts": {}, "changed": false, "failed_modules": { "ansible.legacy.setup": { "failed": true, "module_stderr": "", "module_stdout": " File \"/home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205/AnsiballZ_setup.py\", line 1\r\r\n <?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\r\n ^\r\r\nSyntaxError: invalid syntax\r\r", "msg": "MODULE FAILURE\nSee stdout/stderr for the exact error", "rc": 1 } }, "msg": "The following modules failed to execute: ansible.legacy.setup\n" } ``` I am now on the following versions, and I no longer experience the problem: ``` Collection Version ----------------------------- ------- amazon.aws 3.5.0 community.aws 3.6.0 The issues seems to be on line 466 of aws_ssm.py. https://github.com/ansible-collections/community.aws/blob/cb9716e14d44357aaadd2be733bbaa0dd8a522bc/plugins/connection/aws_ssm.py#L465-L467 In Dash (which is symlinked as /bin/sh), an `echo $'\n'$?`, returns ``` $ 0 ``` While Bash returns ``` 0 ``` Same problem here, did some debugging between an instance running Ubuntu 22.94 minimal (default shell is dash) and another one that is working and use Amazon Linux 2. What I see is that on Ubuntu the command itself is parsed instead of it results, comparing the outputs Amazon Linux 2 ``` <i-0846ef408bafd44b1> POST_PROCESS: > ansible-tmp-1668587955.2343445-102-113985281649023=/tmp/.ansible-ssm/ansible-tmp-1668587955.2343445-102-113985281649023 0 <i-0846ef408bafd44b1> (0, '> ansible-tmp-1668587955.2343445-102-113985281649023=/tmp/.ansible-ssm/ansible-tmp-1668587955.2343445-102-113985281649023\r\r', '') ``` Ubuntu 22.04 ``` <i-0845d1adbe853d3e0> POST_PROCESS: ( umask 77 && mkdir -p "` echo /tmp/.ansible-ssm `"&& mkdir -p "` echo /tmp/.ansible-ssm/ansible-tmp-1668533193.4742794-568-272265113538326 `" && echo ansible-tmp-1668533193.4742794-568-272265113538326="` echo /tmp/.ansible-ssm/ansible-tmp-1668533193.4742794-568-272265113538326 `" ) echo $'\n'$? <i-0845d1adbe853d3e0> ssm_retry: attempt: 0, caught exception(invalid literal for int() with base 10: "echo $'\\n'$?") from cmd (( umask 77 && mkdir -p "` echo /tmp/.ansible-ssm `"&& mkdir -p "` echo /tmp/.ansible-ssm/ansible-tmp-1668533193.4742794-568-272265113538326 `" && echo ansible-tmp-1668533193.4742794-568-272265113538326="` echo /tmp/.ansible-ssm/ansible-tmp-1668533193.4742794-568-272265113538326 `" )...), pausing for 0 seconds ``` I still do not know why, but writing the command all on a single line works, so if you want a work around apply this patch, I am going to open a PR too ``` index 3734d64c..a558f47c 100644 --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -463,7 +463,7 @@ class Connection(ConnectionBase): else: if sudoable: cmd = "sudo " + cmd - cmd = "echo " + mark_start + "\n" + cmd + "\necho $'\\n'$?\n" + "echo " + mark_end + "\n" + cmd = " echo " + mark_start + "; " + cmd + "; echo $'\\n'$?; " + " echo " + mark_end + ";\n" display.vvvv(u"_wrap_command: '{0}'".format(to_text(cmd)), host=self.host) return cmd ```
2023-01-12T16:33:11
ansible-collections/community.aws
1,654
ansible-collections__community.aws-1654
[ "1163" ]
8dfe3649f68f2a24b66051bed83636ba8ca11dd1
diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -447,11 +447,94 @@ def exec_command(self, cmd, in_data=None, sudoable=True): def _prepare_terminal(self): ''' perform any one-time terminal settings ''' + # No windows setup for now + if self.is_windows: + return + + # *_complete variables are 3 valued: + # - None: not started + # - False: started + # - True: complete + + startup_complete = False + disable_echo_complete = None + disable_echo_cmd = to_bytes("stty -echo\n", errors="surrogate_or_strict") + + disable_prompt_complete = None + end_mark = "".join( + [random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH)] + ) + disable_prompt_cmd = to_bytes( + "PS1='' ; printf '\\n%s\\n' '" + end_mark + "'\n", + errors="surrogate_or_strict", + ) + disable_prompt_reply = re.compile( + r"\r\r\n" + re.escape(end_mark) + r"\r\r\n", re.MULTILINE + ) - if not self.is_windows: - cmd = "stty -echo\n" + "PS1=''\n" - cmd = to_bytes(cmd, errors='surrogate_or_strict') - self._session.stdin.write(cmd) + stdout = "" + # Custom command execution for when we're waiting for startup + stop_time = int(round(time.time())) + self.get_option("ssm_timeout") + while (not disable_prompt_complete) and (self._session.poll() is None): + remaining = stop_time - int(round(time.time())) + if remaining < 1: + self._timeout = True + display.vvvv( + "PRE timeout stdout: {0}".format(to_bytes(stdout)), host=self.host + ) + raise AnsibleConnectionFailure( + "SSM start_session timeout on host: %s" % self.instance_id + ) + if self._poll_stdout.poll(1000): + stdout += to_text(self._stdout.read(1024)) + display.vvvv( + "PRE stdout line: {0}".format(to_bytes(stdout)), host=self.host + ) + else: + display.vvvv("PRE remaining: {0}".format(remaining), host=self.host) + + # wait til prompt is ready + if startup_complete is False: + match = str(stdout).find("Starting session with SessionId") + if match != -1: + display.vvvv("PRE startup output received", host=self.host) + startup_complete = True + + # disable echo + if startup_complete and (disable_echo_complete is None): + display.vvvv( + "PRE Disabling Echo: {0}".format(disable_echo_cmd), host=self.host + ) + self._session.stdin.write(disable_echo_cmd) + disable_echo_complete = False + + if disable_echo_complete is False: + match = str(stdout).find("stty -echo") + if match != -1: + disable_echo_complete = True + + # disable prompt + if disable_echo_complete and disable_prompt_complete is None: + display.vvvv( + "PRE Disabling Prompt: {0}".format(disable_prompt_cmd), + host=self.host, + ) + self._session.stdin.write(disable_prompt_cmd) + disable_prompt_complete = False + + if disable_prompt_complete is False: + match = disable_prompt_reply.search(stdout) + if match: + stdout = stdout[match.end():] + disable_prompt_complete = True + + if not disable_prompt_complete: + raise AnsibleConnectionFailure( + "SSM process closed during _prepare_terminal on host: %s" + % self.instance_id + ) + else: + display.vvv("PRE Terminal configured", host=self.host) def _wrap_command(self, cmd, sudoable, mark_start, mark_end): ''' wrap command so stdout and status can be extracted ''' @@ -463,7 +546,11 @@ def _wrap_command(self, cmd, sudoable, mark_start, mark_end): else: if sudoable: cmd = "sudo " + cmd - cmd = "echo " + mark_start + "\n" + cmd + "\necho $'\\n'$?\n" + "echo " + mark_end + "\n" + cmd = ( + f"printf '%s\\n' '{mark_start}';\n" + f"echo | {cmd};\n" + f"printf '\\n%s\\n%s\\n' \"$?\" '{mark_end}';\n" + ) display.vvvv(u"_wrap_command: '{0}'".format(to_text(cmd)), host=self.host) return cmd
Unable to use ansible_connection aws_ssm to configure Ubuntu 22.04 Jammy targets ### Summary Using the aws_ssm connection to provision a Ubuntu 22.04 Jammy target machine fails presumably due to unexpected characters in the output that it receives (see examples below). ### Issue Type Bug Report ### Component Name aws_ssm ### Ansible Version ```console ❱ ~ ansible --version /usr/local/Cellar/ansible/5.7.1/libexec/lib/python3.10/site-packages/paramiko/transport.py:236: CryptographyDeprecationWarning: Blowfish has been deprecated "class": algorithms.Blowfish, ansible [core 2.12.5] config file = None configured module search path = ['/Users/abent/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/Cellar/ansible/5.7.1/libexec/lib/python3.10/site-packages/ansible ansible collection location = /Users/abent/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.10.4 (main, Apr 26 2022, 19:42:59) [Clang 13.1.6 (clang-1316.0.21.2)] jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console ❱ ~ ansible-galaxy collection list # /Users/abent/.ansible/collections/ansible_collections Collection Version ------------- ------- amazon.aws 3.2.0 community.aws 3.2.1 # /usr/local/Cellar/ansible/5.7.1/libexec/lib/python3.10/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 2.2.0 ansible.netcommon 2.6.1 ansible.posix 1.3.0 ansible.utils 2.6.0 ansible.windows 1.9.0 arista.eos 3.1.0 awx.awx 19.4.0 azure.azcollection 1.12.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.2.0 cisco.aci 2.2.0 cisco.asa 2.1.0 cisco.intersight 1.0.18 cisco.ios 2.8.1 cisco.iosxr 2.9.0 cisco.ise 1.2.1 cisco.meraki 2.6.1 cisco.mso 1.4.0 cisco.nso 1.0.3 cisco.nxos 2.9.1 cisco.ucs 1.8.0 cloud.common 2.1.1 cloudscale_ch.cloud 2.2.1 community.aws 2.4.0 community.azure 1.1.0 community.ciscosmb 1.0.4 community.crypto 2.2.4 community.digitalocean 1.16.0 community.dns 2.1.0 community.docker 2.4.0 community.fortios 1.0.0 community.general 4.8.0 community.google 1.0.0 community.grafana 1.4.0 community.hashi_vault 2.4.0 community.hrobot 1.3.0 community.kubernetes 2.0.1 community.kubevirt 1.0.0 community.libvirt 1.0.2 community.mongodb 1.3.3 community.mysql 2.3.5 community.network 3.1.0 community.okd 2.1.0 community.postgresql 1.7.2 community.proxysql 1.3.2 community.rabbitmq 1.1.0 community.routeros 2.0.0 community.sap 1.0.0 community.skydive 1.0.0 community.sops 1.2.1 community.vmware 1.18.0 community.windows 1.9.0 community.zabbix 1.6.0 containers.podman 1.9.3 cyberark.conjur 1.1.0 cyberark.pas 1.0.13 dellemc.enterprise_sonic 1.1.0 dellemc.openmanage 4.4.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.16.0 fortinet.fortimanager 2.1.5 fortinet.fortios 2.1.4 frr.frr 1.0.3 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.6.0 hpe.nimble 1.1.4 ibm.qradar 1.0.3 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.2.1 inspur.sm 1.3.0 junipernetworks.junos 2.10.0 kubernetes.core 2.3.0 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.16.0 netapp.elementsw 21.7.0 netapp.ontap 21.18.1 netapp.storagegrid 21.10.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.0 netbox.netbox 3.7.0 ngine_io.cloudstack 2.2.3 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.1 openstack.cloud 1.8.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 1.6.6 purestorage.flasharray 1.12.1 purestorage.flashblade 1.9.0 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 1.0.2 t_systems_mms.icinga_director 1.29.0 theforeman.foreman 2.2.0 vyos.vyos 2.8.0 wti.remote 1.0.3 ``` ### AWS SDK versions ```console ❱ ~ pip show boto boto3 botocore WARNING: Package(s) not found: boto Name: boto3 Version: 1.23.0 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.10/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.26.0 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.10/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: boto3, s3transfer ``` ### Configuration ```console (paste below) # Inventory (aws_ec2.yaml) plugin: aws_ec2 aws_profile: qa strict: False regions: - us-east-2 # Example Automation (site.yaml) - hosts: all become: true vars: ansible_connection: aws_ssm ansible_python_interpreter: /usr/bin/python3 #Added to overcome python detection failure aws_s3_region: us-east-2 ansible_aws_ssm_profile: qa ansible_aws_ssm_region: us-east-2 ansible_aws_ssm_bucket_name: <redacted bucket name> ansible_aws_ssm_instance_id: <redacted instance id> tasks: - name: Test command ansible.builtin.shell: mkdir /tmp/foo ``` ### OS / Environment Ubuntu 22.04 LTS ### Steps to Reproduce 1. Set SSM Linux shell profile to `/bin/bash` in order to work around incompatibility with `/bin/dash`, which exists for Ubuntu 20.04 LTS. 2. Run the configuration provided above ```yaml ansible-playbook -i aws_ec2.yml site.yml ``` ### Expected Results 1. Python interpreter is detected 2. Empty folder `/tmp/foo` is created ### Actual Results 1. Python version fails to detect - verbose output from Gathering Facts, some IDs redacted with XXXX ``` <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> ESTABLISH SSM CONNECTION TO: i-03bd947822XXXXXXX <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> SSM CONNECTION ID: [email protected] <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC echo ~ <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\r/home/ssm-user\r\r', '') <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC ( umask 77 && mkdir -p "` echo /home/ssm-user/.ansible/tmp `"&& mkdir "` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1653405370.894557-13636-81324261820323 `" && echo ansible-tmp-1653405370.894557-13636-81324261820323="` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1653405370.894557-13636-81324261820323 `" ) <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\ransible-tmp-1653405370.894557-13636-81324261820323=/home/ssm-user/.ansible/tmp/ansible-tmp-1653405370.894557-13636-81324261820323\r\r', '') <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> Attempting python interpreter discovery <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC echo PLATFORM; uname; echo FOUND; command -v 'python3.10'; command -v 'python3.9'; command -v 'python3.8'; command -v 'python3.7'; command -v 'python3.6'; command -v 'python3.5'; command -v '/usr/bin/python3'; command -v '/usr/libexec/platform-python'; command -v 'python2.7'; command -v 'python2.6'; command -v '/usr/bin/python'; command -v 'python'; echo ENDFOUND <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\rPLATFORM\r\r\nLinux\r\r\nFOUND\r\r\n/usr/bin/python3.10\r\r\n/usr/bin/python3\r\r\nENDFOUND\r\r', '') [WARNING]: Unhandled error in Python interpreter discovery for host ec2-3-16-81-57.us-east-2.compute.amazonaws.com: unexpected output from Python interpreter discovery Using module file /usr/local/Cellar/ansible/5.7.1/libexec/lib/python3.10/site-packages/ansible/modules/setup.py <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> PUT /Users/abent/.ansible/tmp/ansible-local-13610fw9y0i6k/tmpnfywx2vw TO /home/ssm-user/.ansible/tmp/ansible-tmp-1653405370.894557-13636-81324261820323/AnsiballZ_setup.py <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC curl 'https://XXXX.s3.amazonaws.com/i-03bd94782282fe77c//home/ssm-user/.ansible/tmp/ansible-tmp-1653405370.894557-13636-81324261820323/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=ASIASE6ULJM5CWCSSYVQ%2F20220524%2Fus-east-2%2Fs3%2Faws4_request&X-Amz-Date=20220524T151612Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Security-Token=IQoJb3JpZ2luX2VjEJD%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEaCXVzLWVhc3QtMSJHMEUCIQDH2GZyL9nZYpBxHBFH3%2FH%2BHEpHGIQhgtbktk%2FGcY0LPAIgJZw9%2BYrZ62Dok15Agfy40vFcfN8zLa%2B9xxhzoj8caMEqiAMIeBADGgwxNDgwODQ4MzcxNzgiDE2enjGjMDfRwEomgirlArjBOtuUi0t0haBKlfdyi0KK3NOaivLGdT9Q8fomo29gHNdR3dCXePMHXPb29D3mjpUYyfbgQg7PIUeyW2OexxFgDWzLm3Td%2BuHFDm%2B6WKM7x8OpRrjnGpdcIXYRBMCRSYlWZS%2Fjcd2AD5DYSo6V4nd9jlRG5th24bIJ48rUtpKX2Jn2HOwrwvJg9CC64sxHgKS%2BZoFcEo1fqUutY3P2aBoJzRGGl%2BrYDjUYew3PQLiqa15LXixtnS66Ppltg5G6Zrm9RFNxxPEccw5r2FdNZxR5CLqZFC7%2BXFLoCSMEQy5J2%2FAlRq2z7k0IL9f7YnSGZgcwCprrHvz3GQVv%2Bp439siS5cez2RMVfTOJTnNCQIDOi0YSFcRqYvuWdeIycNIsZGBEJTOckvXNUuI3ZLxBpjxyI6xTFF3%2FFvc%2BncDMjRUv8YhRaakyD%2FsycwggSjGyfNBTSalnJPGTDKc2kVVTrlbQR%2BIEmDC87bOUBjqmARSckHcf%2BQ27lMTsgWO%2BuxO%2BVjbzx3ZX8VO4RLkQmWvmaEROCOhSWw2vVxKh1IiVAbM7i9Fb%2Br1WF27YK1mfHiMIcZu1fXWYcGmI4nRPjCNSpse7vTi%2FeXEPqKCWyWkjTlpGHCPPrD%2BMWMdZptWECslkk%2BSyu%2BEoaDUVYy%2FCSzlXZi1uxMBGvKaMwldLsvYNYKCnWpWX2Q1M7oN5vbB8NzmYPYGoFyo%3D&X-Amz-Signature=4c0b039a9c2c1a87dbc44bee09bdb18e5a99da85b41593b18f0d2d9bc31ca13b' -o '/home/ssm-user/.ansible/tmp/ansible-tmp-1653405370.894557-13636-81324261820323/AnsiballZ_setup.py' <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\r % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 509 0 509 0 0 4129 0 --:--:-- --:--:-- --:--:-- 4138\r\r', '') <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\r % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 509 0 509 0 0 4129 0 --:--:-- --:--:-- --:--:-- 4138\r\r', '') <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC chmod u+x /home/ssm-user/.ansible/tmp/ansible-tmp-1653405370.894557-13636-81324261820323/ /home/ssm-user/.ansible/tmp/ansible-tmp-1653405370.894557-13636-81324261820323/AnsiballZ_setup.py <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\r\x1b[?2004h\x1b[?2004l\r\r', '') <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC sudo -H -S -n -u root /bin/sh -c 'echo BECOME-SUCCESS-wmigxosgjbdymebxzjuthvcxpronrokr ; /usr/bin/python /home/ssm-user/.ansible/tmp/ansible-tmp-1653405370.894557-13636-81324261820323/AnsiballZ_setup.py' ``` 2. If the python interpreter is manually specified, the `AnsiballZ_setup.py` script hangs waiting for output. However, investigating on the target machine, the script did indeed complete running, and is able to run successfully. ``` <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> ESTABLISH SSM CONNECTION TO: i-03bd947822XXXXXXX <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> SSM CONNECTION ID: [email protected] <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC echo ~ <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\r/home/ssm-user\r\r', '') <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC ( umask 77 && mkdir -p "` echo /home/ssm-user/.ansible/tmp `"&& mkdir "` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1653484883.342571-21201-234108973790497 `" && echo ansible-tmp-1653484883.342571-21201-234108973790497="` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1653484883.342571-21201-234108973790497 `" ) <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\ransible-tmp-1653484883.342571-21201-234108973790497=/home/ssm-user/.ansible/tmp/ansible-tmp-1653484883.342571-21201-234108973790497\r\r', '') Using module file /usr/local/Cellar/ansible/5.7.1/libexec/lib/python3.10/site-packages/ansible/modules/setup.py <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> PUT /Users/abent/.ansible/tmp/ansible-local-2119271p8m00m/tmp1r3pqqr0 TO /home/ssm-user/.ansible/tmp/ansible-tmp-1653484883.342571-21201-234108973790497/AnsiballZ_setup.py <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC curl 'https://imt-ansible-aws-ssm-bucket-qa-us-east-2.s3.amazonaws.com/i-03bd947822XXXXXXX//home/ssm-user/.ansible/tmp/ansible-tmp-1653484883.342571-21201-234108973790497/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=ASIASE6ULJM5NTXR7R5U%2F20220525%2Fus-east-2%2Fs3%2Faws4_request&X-Amz-Date=20220525T132124Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Security-Token=IQoJb3JpZ2luX2VjEKb%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEaCXVzLWVhc3QtMSJHMEUCIQDI6v71%2FadgKOOSpRmoqr8D4YBeGNTeJJYOQNFOZ%2B5O1AIgN%2F4EhrywCxPmlZYQG81P5H4%2FHvLx7KWNQwosNMLREX8qkQMIjv%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FARADGgwxNDgwODQ4MzcxNzgiDJmqMfH5Rm%2BRBoa%2BWSrlAp3NBPulNa255uMjuq2tUJJeDY60qSRvK9%2BA5Zk0MiOsc2eOfezBPVddZu93aaujhy9yQ23sNAVQ%2BrO04IO%2Br2NKqu0WCJhMLtx3OyHLDdqqZy1%2BFuyRkz%2FkgJeDimgrTPQy99B2VTHo3Ax%2F0G%2BXUi8MuZLlKzwp5I2LKoGYp91PIXjnoIjv5zHyVU5nx8c6SjZ5vDFt1avxXqc6jgZeLip8aUn6GM7z1dXpjhKUgXeW8q7oawSWNmD93ko2ceDqn7VzWvw6Va%2Fpy4YdnD2IHCcP0cdVxD5Cx1D2PtQ51PjOsiE6Ltkk4w5Ua7mVxZ%2BOxu6okTjdx56vMj7fPp1FkRgikyxp8UCElNgCmjDd7SOfoFqunrX9HaP23QLC9dTvHtMpviBDsEZqiU%2BIGuxmrNUlvp2R%2FhKKy9VKXKCjzIsMWLUrXuTOiu5wslWkNHieFOtvvcc1iObj1sYmll26vVq5Uj9E3zDU2riUBjqmAYveqZhqsCSRUWCZ3PH3MB8%2BHSzGCLNZ4HE1Foeu9QdOmL6kYCyFAOkm9uevonuVlXbcVshNinKUtWk%2Fam1L2%2BaxpU7uAgYiVOp8Fa3y97I35zcz%2BNDhzIHlf0TF%2F7I%2Bs%2Bclr4HmjuHC8nn%2BnQ8MFGW5OvVtsxdVEacIvP2qSl9uN6DosqGHKvbjFKQnCA43aJnfcMYpTVWQrIsvcd%2FeepD3isr1TdA%3D&X-Amz-Signature=e518065e2f831f3ad4f785825507c029e1423ca11044da4178714f97496ca13e' -o '/home/ssm-user/.ansible/tmp/ansible-tmp-1653484883.342571-21201-234108973790497/AnsiballZ_setup.py' <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\r % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 285k 100 285k 0 0 1885k 0 --:--:-- --:--:-- --:--:-- 1887k\r\r', '') <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\r % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 285k 100 285k 0 0 1885k 0 --:--:-- --:--:-- --:--:-- 1887k\r\r', '') <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC chmod u+x /home/ssm-user/.ansible/tmp/ansible-tmp-1653484883.342571-21201-234108973790497/ /home/ssm-user/.ansible/tmp/ansible-tmp-1653484883.342571-21201-234108973790497/AnsiballZ_setup.py <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> (0, '\x1b[?2004h\x1b[?2004l\r\x1b[?2004h\x1b[?2004l\r\r', '') <ec2-3-16-81-57.us-east-2.compute.amazonaws.com> EXEC sudo -H -S -n -u root /bin/sh -c 'echo BECOME-SUCCESS-sfoqjiryaeqmfzpqvguespjphnvpenos ; /usr/bin/python3 /home/ssm-user/.ansible/tmp/ansible-tmp-1653484883.342571-21201-234108973790497/AnsiballZ_setup.py' ``` I suspect the issue has to do with the strange control characters in the output, but I am unclear as to the source of them. The machine image is a minimal Ubuntu install, and if I switch back to a 20.04 image instead of 22.04, everything works properly. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Hello, I got the same problem, is someone can look to this problem. Thanks I'm running into the same issue. Some debugging points me to the following where it hangs waiting on the self._stdout.readline() on line 421 after receiving the command output: https://github.com/ansible-collections/community.aws/blob/cb9716e14d44357aaadd2be733bbaa0dd8a522bc/plugins/connection/aws_ssm.py#L420-L422 It should run four commands which are wrapped together in the `_wrap_command` function: ``` echo FPJvOAbODNLrUBDNwlzdtqUVqn sudo sudo -H -S -n -u root /bin/sh -c 'echo BECOME-SUCCESS-mjvhieztmhqnhbcfiprxpidwfjxyvqzj ; /usr/bin/python3 /home/ssm-user/.ansible/tmp/ansible-tmp-1661540567.1878705-101559-269564787724829/AnsiballZ_ping.py' echo $'\n'$? echo iXsDFJCFrsWXYtdqfofbGhMbzm ``` Then it provides to output: ``` EXEC stdout line: FPJvOAbODNLrUBDNwlzdtqUVqn EXEC stdout line: EXEC stdout line: {"ping": "pong", "invocation": {"module_args": {"data": "pong"}}} ``` What it should be: ``` EXEC stdout line: FPJvOAbODNLrUBDNwlzdtqUVqn EXEC stdout line: EXEC stdout line: {"ping": "pong", "invocation": {"module_args": {"data": "pong"}}} EXEC stdout line: EXEC stdout line: 0 EXEC stdout line: iXsDFJCFrsWXYtdqfofbGhMbzm POST_PROCESS: {"ping": "pong", "invocation": {"module_args": {"data": "pong"}}} 0 ``` I also have the same issue that I need to provide the Python interpreter, this might be linked to each other because the query result is working: ``` echo PLATFORM; uname; echo FOUND; command -v 'python3.10'; command -v 'python3.9'; command -v 'python3.8'; command -v 'python3.7'; command -v 'python3.6'; command -v 'python3.5'; command -v '/usr/bin/python3'; command -v '/usr/libexec/platform-python'; command -v 'python2.7'; command -v 'python2.6'; command -v '/usr/bin/python'; command -v 'python'; echo ENDFOUND POST_PROCESS: PLATFORM Linux FOUND /usr/bin/python3.10 /usr/bin/python3 ENDFOUND ``` Unfortunately my Python knowledge isn't enough to further debug and find a solution. Experiencing the exact same issue. Is there any workaround available? Experiencing the same issue on amazonlinux ```<i-0f7a67efa9c5edae5> ESTABLISH SSM CONNECTION TO: i-0f7a67efa9c5edae5 <i-0f7a67efa9c5edae5> SSM CONNECTION ID: gl_gitlab_traefik-06221e159cef6ebda <i-0f7a67efa9c5edae5> EXEC echo ~ <i-0f7a67efa9c5edae5> (0, '/home/ssm-user\r\r', '') <i-0f7a67efa9c5edae5> EXEC ( umask 77 && mkdir -p "` echo /home/ssm-user/.ansible/tmp `"&& mkdir "` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205 `" && echo ansible-tmp-1 666267435.5919795-7743-117523018265205="` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205 `" ) <i-0f7a67efa9c5edae5> (0, 'ansible-tmp-1666267435.5919795-7743-117523018265205=/home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205\r\r', '') Using module file /usr/lib/python3/dist-packages/ansible/modules/setup.py <i-0f7a67efa9c5edae5> PUT /root/.ansible/tmp/ansible-local-774056h3q49o/tmpj0326k_f TO /home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205/AnsiballZ_setup.py <i-0f7a67efa9c5edae5> EXEC curl 'https://ansible-ssm-gitlab.s3.amazonaws.com/i-0f7a67efa9c5edae5//home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205/AnsiballZ_setup.py?X-Amz-Algorit hm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIATJO4GSHEUW7EIKGG%2F20221020%2Fap-southeast-1%2Fs3%2Faws4_request&X-Amz-Date=20221020T120356Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=bca081bf63d6234 339fd966289a572af4a3eb059068f13174d83718674b68ec8' -o '/home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205/AnsiballZ_setup.py' <i-0f7a67efa9c5edae5> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 472 0 472 0 0 550 0 --:--:-- --:--: -- --:--:-- 550\r\r', '') <i-0f7a67efa9c5edae5> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 472 0 472 0 0 550 0 --:--:-- --:--: -- --:--:-- 550\r\r', '') <i-0f7a67efa9c5edae5> EXEC chmod u+x /home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205/ /home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205/AnsiballZ_s etup.py <i-0f7a67efa9c5edae5> (0, '\r', '') <i-0f7a67efa9c5edae5> EXEC /bin/python3 /home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205/AnsiballZ_setup.py <i-0f7a67efa9c5edae5> (1, ' File "/home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205/AnsiballZ_setup.py", line 1\r\r\n <?xml version="1.0" encoding="UTF-8"?>\r\r\n ^\r\r\nSy ntaxError: invalid syntax\r\r', '') <i-0f7a67efa9c5edae5> EXEC rm -f -r /home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205/ > /dev/null 2>&1 <i-0f7a67efa9c5edae5> (0, '\r', '') <i-0f7a67efa9c5edae5> CLOSING SSM CONNECTION TO: i-0f7a67efa9c5edae5 fatal: [my-instance]: FAILED! => { "ansible_facts": {}, "changed": false, "failed_modules": { "ansible.legacy.setup": { "failed": true, "module_stderr": "", "module_stdout": " File \"/home/ssm-user/.ansible/tmp/ansible-tmp-1666267435.5919795-7743-117523018265205/AnsiballZ_setup.py\", line 1\r\r\n <?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\r\n ^\r\r\nSyntaxError: invalid syntax\r\r", "msg": "MODULE FAILURE\nSee stdout/stderr for the exact error", "rc": 1 } }, "msg": "The following modules failed to execute: ansible.legacy.setup\n" } ``` I am now on the following versions, and I no longer experience the problem: ``` Collection Version ----------------------------- ------- amazon.aws 3.5.0 community.aws 3.6.0 The issues seems to be on line 466 of aws_ssm.py. https://github.com/ansible-collections/community.aws/blob/cb9716e14d44357aaadd2be733bbaa0dd8a522bc/plugins/connection/aws_ssm.py#L465-L467 In Dash (which is symlinked as /bin/sh), an `echo $'\n'$?`, returns ``` $ 0 ``` While Bash returns ``` 0 ``` Same problem here, did some debugging between an instance running Ubuntu 22.94 minimal (default shell is dash) and another one that is working and use Amazon Linux 2. What I see is that on Ubuntu the command itself is parsed instead of it results, comparing the outputs Amazon Linux 2 ``` <i-0846ef408bafd44b1> POST_PROCESS: > ansible-tmp-1668587955.2343445-102-113985281649023=/tmp/.ansible-ssm/ansible-tmp-1668587955.2343445-102-113985281649023 0 <i-0846ef408bafd44b1> (0, '> ansible-tmp-1668587955.2343445-102-113985281649023=/tmp/.ansible-ssm/ansible-tmp-1668587955.2343445-102-113985281649023\r\r', '') ``` Ubuntu 22.04 ``` <i-0845d1adbe853d3e0> POST_PROCESS: ( umask 77 && mkdir -p "` echo /tmp/.ansible-ssm `"&& mkdir -p "` echo /tmp/.ansible-ssm/ansible-tmp-1668533193.4742794-568-272265113538326 `" && echo ansible-tmp-1668533193.4742794-568-272265113538326="` echo /tmp/.ansible-ssm/ansible-tmp-1668533193.4742794-568-272265113538326 `" ) echo $'\n'$? <i-0845d1adbe853d3e0> ssm_retry: attempt: 0, caught exception(invalid literal for int() with base 10: "echo $'\\n'$?") from cmd (( umask 77 && mkdir -p "` echo /tmp/.ansible-ssm `"&& mkdir -p "` echo /tmp/.ansible-ssm/ansible-tmp-1668533193.4742794-568-272265113538326 `" && echo ansible-tmp-1668533193.4742794-568-272265113538326="` echo /tmp/.ansible-ssm/ansible-tmp-1668533193.4742794-568-272265113538326 `" )...), pausing for 0 seconds ``` I still do not know why, but writing the command all on a single line works, so if you want a work around apply this patch, I am going to open a PR too ``` index 3734d64c..a558f47c 100644 --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -463,7 +463,7 @@ class Connection(ConnectionBase): else: if sudoable: cmd = "sudo " + cmd - cmd = "echo " + mark_start + "\n" + cmd + "\necho $'\\n'$?\n" + "echo " + mark_end + "\n" + cmd = " echo " + mark_start + "; " + cmd + "; echo $'\\n'$?; " + " echo " + mark_end + ";\n" display.vvvv(u"_wrap_command: '{0}'".format(to_text(cmd)), host=self.host) return cmd ```
2023-01-12T16:33:24
ansible-collections/community.aws
1,661
ansible-collections__community.aws-1661
[ "1203" ]
b642edfc59d543a31e3b49cfa0366dce9cdfda38
diff --git a/plugins/modules/ecs_ecr.py b/plugins/modules/ecs_ecr.py --- a/plugins/modules/ecs_ecr.py +++ b/plugins/modules/ecs_ecr.py @@ -85,6 +85,24 @@ default: false type: bool version_added: 1.3.0 + encryption_configuration: + description: + - The encryption configuration for the repository. + required: false + suboptions: + encryption_type: + description: + - The encryption type to use. + choices: [AES256, KMS] + default: 'AES256' + type: str + kms_key: + description: + - If I(encryption_type=KMS), specify the KMS key to use for encryption. + - The alias, key ID, or full ARN of the KMS key can be specified. + type: str + type: dict + version_added: 5.2.0 author: - David M. Lee (@leedm777) extends_documentation_fragment: @@ -161,6 +179,13 @@ community.aws.ecs_ecr: name: needs-no-lifecycle-policy purge_lifecycle_policy: true + +- name: set-encryption-configuration + community.aws.ecs_ecr: + name: uses-custom-kms-key + encryption_configuration: + encryption_type: KMS + kms_key: custom-kms-key-alias ''' RETURN = ''' @@ -201,6 +226,7 @@ except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible.module_utils.six import string_types from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule @@ -249,17 +275,21 @@ def get_repository_policy(self, registry_id, name): except is_boto3_error_code(['RepositoryNotFoundException', 'RepositoryPolicyNotFoundException']): return None - def create_repository(self, registry_id, name, image_tag_mutability): + def create_repository(self, registry_id, name, image_tag_mutability, encryption_configuration): if registry_id: default_registry_id = self.sts.get_caller_identity().get('Account') if registry_id != default_registry_id: raise Exception('Cannot create repository in registry {0}.' 'Would be created in {1} instead.'.format(registry_id, default_registry_id)) + if encryption_configuration is None: + encryption_configuration = dict(encryptionType='AES256') + if not self.check_mode: repo = self.ecr.create_repository( repositoryName=name, - imageTagMutability=image_tag_mutability).get('repository') + imageTagMutability=image_tag_mutability, + encryptionConfiguration=encryption_configuration).get('repository') self.changed = True return repo else: @@ -412,6 +442,7 @@ def run(ecr, params): lifecycle_policy_text = params['lifecycle_policy'] purge_lifecycle_policy = params['purge_lifecycle_policy'] scan_on_push = params['scan_on_push'] + encryption_configuration = snake_dict_to_camel_dict(params['encryption_configuration']) # Parse policies, if they are given try: @@ -438,10 +469,16 @@ def run(ecr, params): result['created'] = False if not repo: - repo = ecr.create_repository(registry_id, name, image_tag_mutability) + repo = ecr.create_repository( + registry_id, name, image_tag_mutability, encryption_configuration) result['changed'] = True result['created'] = True else: + if encryption_configuration is not None: + if repo.get('encryptionConfiguration') != encryption_configuration: + result['msg'] = 'Cannot modify repository encryption type' + return False, result + repo = ecr.put_image_tag_mutability(registry_id, name, image_tag_mutability) result['repository'] = repo @@ -557,7 +594,18 @@ def main(): purge_policy=dict(required=False, type='bool'), lifecycle_policy=dict(required=False, type='json'), purge_lifecycle_policy=dict(required=False, type='bool'), - scan_on_push=(dict(required=False, type='bool', default=False)) + scan_on_push=(dict(required=False, type='bool', default=False)), + encryption_configuration=dict( + required=False, + type='dict', + options=dict( + encryption_type=dict(required=False, type='str', default='AES256', choices=['AES256', 'KMS']), + kms_key=dict(required=False, type='str', no_log=False), + ), + required_if=[ + ['encryption_type', 'KMS', ['kms_key']], + ], + ), ) mutually_exclusive = [ ['policy', 'purge_policy'],
diff --git a/tests/integration/targets/ecs_ecr/tasks/main.yml b/tests/integration/targets/ecs_ecr/tasks/main.yml --- a/tests/integration/targets/ecs_ecr/tasks/main.yml +++ b/tests/integration/targets/ecs_ecr/tasks/main.yml @@ -10,6 +10,24 @@ - set_fact: ecr_name: '{{ resource_prefix }}-ecr' + - name: get ARN of calling user + aws_caller_info: + register: aws_caller_info + + - name: create KMS key for testing + aws_kms: + alias: "{{ resource_prefix }}-ecr" + description: a key used for testing ECR + state: present + enabled: yes + key_spec: SYMMETRIC_DEFAULT + key_usage: ENCRYPT_DECRYPT + policy: "{{ lookup('template', 'kms_policy.j2') }}" + tags: + Name: "{{ resource_prefix }}-ecr" + AnsibleTest: AnsibleTestVpc + register: kms_test_key + - name: When creating with check mode ecs_ecr: name: '{{ ecr_name }}' @@ -54,6 +72,11 @@ that: - result.repository.imageTagMutability == "MUTABLE" + - name: it should use AES256 encryption by default + assert: + that: + - result.repository.encryptionConfiguration.encryptionType == "AES256" + - name: When pulling an existing repository that has no existing policy ecs_ecr: name: '{{ ecr_name }}' @@ -538,9 +561,52 @@ - result is changed - not result.repository.imageScanningConfiguration.scanOnPush + - name: When modifying the encryption setting of an existing repository + ecs_ecr: + name: '{{ ecr_name }}' + encryption_configuration: + encryption_type: KMS + kms_key: '{{ kms_test_key.key_arn }}' + register: result + ignore_errors: true + + - name: it should fail + assert: + that: + - result is failed + + - name: delete repository + ecs_ecr: + name: '{{ ecr_name }}' + state: absent + + - name: When creating a repo using KMS encryption + ecs_ecr: + name: '{{ ecr_name }}' + encryption_configuration: + encryption_type: KMS + kms_key: '{{ kms_test_key.key_arn }}' + register: result + + - name: it should create the repo and use KMS encryption + assert: + that: + - result is changed + - result.repository.encryptionConfiguration.encryptionType == "KMS" + + - name: it should use the provided KMS key + assert: + that: + - result.repository.encryptionConfiguration.kmsKey == '{{ kms_test_key.key_arn }}' + always: - name: Delete lingering ECR repository ecs_ecr: name: '{{ ecr_name }}' state: absent + + - name: Delete KMS key + aws_kms: + key_id: '{{ kms_test_key.key_arn }}' + state: absent diff --git a/tests/integration/targets/ecs_ecr/templates/kms_policy.j2 b/tests/integration/targets/ecs_ecr/templates/kms_policy.j2 new file mode 100644 --- /dev/null +++ b/tests/integration/targets/ecs_ecr/templates/kms_policy.j2 @@ -0,0 +1,72 @@ +{ + "Id": "key-ansible-test-policy-123", + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Allow access for root user", + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::{{ aws_caller_info.account }}:root" + }, + "Action": "kms:*", + "Resource": "*" + }, + { + "Sid": "Allow access for calling user", + "Effect": "Allow", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": [ + "kms:Create*", + "kms:Describe*", + "kms:Enable*", + "kms:List*", + "kms:Put*", + "kms:Update*", + "kms:Revoke*", + "kms:Disable*", + "kms:Get*", + "kms:Delete*", + "kms:TagResource", + "kms:UntagResource", + "kms:ScheduleKeyDeletion", + "kms:CancelKeyDeletion" + ], + "Resource": "*" + }, + { + "Sid": "Allow use of the key", + "Effect": "Allow", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": [ + "kms:Encrypt", + "kms:Decrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + "kms:DescribeKey" + ], + "Resource": "*" + }, + { + "Sid": "Allow attachment of persistent resources", + "Effect": "Allow", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": [ + "kms:CreateGrant", + "kms:ListGrants", + "kms:RevokeGrant" + ], + "Resource": "*", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": "true" + } + } + } + ] +}
ecs_ecr - support for specifying KMS key ### Summary Unless I'm missing something, I don't see a way to specify KMS key usage in this module. Is it possible to integrate this? ### Issue Type Feature Idea ### Component Name ecs_ecr ### Additional Information <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: None If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> Yes. It's possible. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecr.html But only on create. It's not changeable once the ECR is created. Files identified in the description: * [`plugins/modules/ecs_ecr.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/ecs_ecr.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @leedm777 @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> @markuman > Yes. It's possible. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecr.html > > But only on create. It's not changeable once the ECR is created. I have this working to some extent, but is it possible to pass variables to it, in an Ansible role? Currently, it only works if I hard code the values in the client.create_repository function within the module. I would like to do something like this: ``` - name: create repo from custom module ecr_kms_module: repositoryName: "{{ repo_name }}" encryptionConfiguration.kmsKey: "{{ kms_key }}" ``` > @markuman > > > Yes. It's possible. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecr.html > > But only on create. It's not changeable once the ECR is created. > > I have this working to some extent, but is it possible to pass variables to it, in an Ansible role? Currently, it only works if I hard code the values in the client.create_repository function within the module. I would like to do something like this: > > ``` > - name: create repo from custom module > ecr_kms_module: > repositoryName: "{{ repo_name }}" > encryptionConfiguration.kmsKey: "{{ kms_key }}" > ``` Yes, it should work just out of the box. Maybe something is wrong with your key-name decision and handling. I suggest not to create a new module for this task. Instead patch the existing `ecs_ecr` module and add just the missing `kms_key` parameter. If the ecr already exists, the `kms_key` parameter can be ignored, because it's not changeable. Maybe through a warning if the requestes key differs from the existing key. Are you willing to prepare a PR @GreNIX ?
2023-01-18T19:49:53
ansible-collections/community.aws
1,669
ansible-collections__community.aws-1669
[ "637" ]
50d829f5c593d13ab229aa5ecae76337f2122a7e
diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -87,10 +87,36 @@ vars: - name: ansible_aws_ssm_document version_added: 5.2.0 + s3_addressing_style: + description: + - The addressing style to use when using S3 URLs. + - When the S3 bucket isn't in the same region as the Instance + explicitly setting the addressing style to 'virtual' may be necessary + U(https://repost.aws/knowledge-center/s3-http-307-response) as this forces + the use of a specific endpoint. + choices: [ 'path', 'virtual', 'auto' ] + default: 'auto' + version_added: 5.2.0 + vars: + - name: ansible_aws_ssm_s3_addressing_style ''' EXAMPLES = r''' +# Wait for SSM Agent to be available on the Instance +- name: Wait for connection to be available + vars: + ansible_connection: aws_ssm + ansible_aws_ssm_bucket_name: nameofthebucket + ansible_aws_ssm_region: us-west-2 + # When the S3 bucket isn't in the same region as the Instance + # Explicitly setting the addressing style to 'virtual' may be necessary + # https://repost.aws/knowledge-center/s3-http-307-response + ansible_aws_ssm_s3_addressing_style: virtual + tasks: + - name: Wait for connection + wait_for_connection: + # Stop Spooler Process on Windows Instances - name: Stop Spooler Service on Windows Instances vars: @@ -708,7 +734,10 @@ def _get_boto_client(self, service, region_name=None, profile_name=None): client = session.client( service, - config=Config(signature_version="s3v4") + config=Config( + signature_version="s3v4", + s3={'addressing_style': self.get_option('s3_addressing_style')} + ) ) return client
diff --git a/tests/integration/targets/connection_aws_ssm_addressing/aliases b/tests/integration/targets/connection_aws_ssm_addressing/aliases new file mode 100644 --- /dev/null +++ b/tests/integration/targets/connection_aws_ssm_addressing/aliases @@ -0,0 +1,4 @@ +time=20m + +cloud/aws +connection_aws_ssm diff --git a/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_setup.yml b/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_setup.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_setup.yml @@ -0,0 +1,9 @@ +- hosts: localhost + roles: + - role: ../setup_connection_aws_ssm + vars: + target_os: fedora + encrypted_bucket: False + s3_bucket_region: 'eu-central-1' + s3_addressing_style: virtual + test_suffix: addressing diff --git a/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_teardown.yml b/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_teardown.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_teardown.yml @@ -0,0 +1,5 @@ +- hosts: localhost + tasks: + - include_role: + name: ../setup_connection_aws_ssm + tasks_from: cleanup.yml diff --git a/tests/integration/targets/connection_aws_ssm_addressing/meta/main.yml b/tests/integration/targets/connection_aws_ssm_addressing/meta/main.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/connection_aws_ssm_addressing/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - connection + - setup_connection_aws_ssm diff --git a/tests/integration/targets/connection_aws_ssm_addressing/runme.sh b/tests/integration/targets/connection_aws_ssm_addressing/runme.sh new file mode 100755 --- /dev/null +++ b/tests/integration/targets/connection_aws_ssm_addressing/runme.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +PLAYBOOK_DIR=$(pwd) +set -eux + +CMD_ARGS=("$@") + +# Destroy Environment +cleanup() { + + cd "${PLAYBOOK_DIR}" + ansible-playbook -c local aws_ssm_integration_test_teardown.yml "${CMD_ARGS[@]}" + +} + +trap "cleanup" EXIT + +# Setup Environment +ansible-playbook -c local aws_ssm_integration_test_setup.yml "$@" + +# Export the AWS Keys +set +x +. ./aws-env-vars.sh +set -x + +cd ../connection + +# Execute Integration tests +INVENTORY="${PLAYBOOK_DIR}/ssm_inventory" ./test.sh \ + -e target_hosts=aws_ssm \ + "$@" diff --git a/tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_setup.yml b/tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_setup.yml --- a/tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_setup.yml +++ b/tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_setup.yml @@ -4,3 +4,4 @@ vars: target_os: fedora encrypted_bucket: True + test_suffix: encrypteds3 diff --git a/tests/integration/targets/connection_aws_ssm_ssm_document/aws_ssm_integration_test_setup.yml b/tests/integration/targets/connection_aws_ssm_ssm_document/aws_ssm_integration_test_setup.yml --- a/tests/integration/targets/connection_aws_ssm_ssm_document/aws_ssm_integration_test_setup.yml +++ b/tests/integration/targets/connection_aws_ssm_ssm_document/aws_ssm_integration_test_setup.yml @@ -4,3 +4,4 @@ vars: target_os: fedora use_ssm_document: True + test_suffix: document diff --git a/tests/integration/targets/setup_connection_aws_ssm/defaults/main.yml b/tests/integration/targets/setup_connection_aws_ssm/defaults/main.yml --- a/tests/integration/targets/setup_connection_aws_ssm/defaults/main.yml +++ b/tests/integration/targets/setup_connection_aws_ssm/defaults/main.yml @@ -40,11 +40,6 @@ ami_details: </powershell> os_type: windows -# see: -# - https://github.com/mattclay/aws-terminator/pull/181 -# - https://github.com/ansible-collections/community.aws/pull/763 -encrypted_s3_bucket_name: ssm-encrypted-test-bucket - -s3_bucket_name: "{{ resource_prefix }}-connection-ssm" +s3_bucket_name: "{{ tiny_prefix }}-connection-ssm-{{ test_suffix | default(target_os) }}" kms_key_name: "{{ resource_prefix }}-connection-ssm" ssm_document_name: "{{ resource_prefix }}-connection-ssm" diff --git a/tests/integration/targets/setup_connection_aws_ssm/templates/inventory-combined.aws_ssm.j2 b/tests/integration/targets/setup_connection_aws_ssm/templates/inventory-combined.aws_ssm.j2 --- a/tests/integration/targets/setup_connection_aws_ssm/templates/inventory-combined.aws_ssm.j2 +++ b/tests/integration/targets/setup_connection_aws_ssm/templates/inventory-combined.aws_ssm.j2 @@ -32,6 +32,9 @@ ansible_aws_ssm_plugin=/usr/local/sessionmanagerplugin/bin/session-manager-plugi ansible_python_interpreter=/usr/bin/env python3 local_tmp=/tmp/ansible-local-{{ tiny_prefix }} ansible_aws_ssm_bucket_name={{ s3_bucket_name }} +{% if s3_addressing_style | default(False) %} +ansible_aws_ssm_s3_addressing_style={{ s3_addressing_style }} +{% endif %} {% if encrypted_bucket | default(False) %} {% if not (s3_bucket_encryption | default(False)) %} ansible_aws_ssm_bucket_sse_mode='aws:kms'
aws_ssm connection plugin: S3 Signed Url invalid for newly created S3 Bucket ### Summary When I try to execute a playbook against an Amazon Linux 2 instance in EC2 using the aws_ssm connection plugin and a recently created (less than an hour old) S3 bucket, it fails to correctly download `AnsiballZ_setup.py`, resulting in a python syntax error `" File \"/home/ssm-user/.ansible/tmp/ansible-tmp-1626190404.700778-20074-247496938615569/AnsiballZ_setup.py\", line 1\r\r\n <?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\r\n ^\r\r\nSyntaxError: invalid syntax\r\r",`. The curl is writing out the S3 XML error response to file, due to S3 returning a HTTP 307 redirect which the curl does not follow. This HTTP 307 from S3 is expected, as per [this AWS documentation](https://aws.amazon.com/premiumsupport/knowledge-center/s3-http-307-response/), because the bucket is too new for the global S3 DNS to have propagated out yet, so a regional endpoint has to be used. This overall seems similar to [this issue](https://github.com/ansible-collections/community.aws/issues/307), but is still happening for me when using the `main` branch of this repository where the fix has been applied. I believe the underlying problem is that when the signed url is generated in the function `_file_transport_command`, it is a global URL rather than a regional URL: For example, the URL below does not work and returns a 307; [https://test-bucket-garethsaxby-20210713-153159.**s3.amazonaws.com**/i-089c1ec0c85524f5d//home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIA5QGXVMSCMPOQVZH3%2F20210713%2Feu-west-2%2Fs3%2Faws4_request&X-Amz-Date=20210713T151841Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=18d520a539227540bef2ba06a6000dd6569c868aeb4cc6ae042fb895e5e2f880](https://test-bucket-garethsaxby-20210713-153159.s3.amazonaws.com/i-089c1ec0c85524f5d//home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIA5QGXVMSCMPOQVZH3%2F20210713%2Feu-west-2%2Fs3%2Faws4_request&X-Amz-Date=20210713T151841Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=18d520a539227540bef2ba06a6000dd6569c868aeb4cc6ae042fb895e5e2f880) Whilst the URL below, redirected by the 307, -does- work; [https://test-bucket-garethsaxby-20210713-153159.**s3.eu-west-2.amazonaws.com**/i-089c1ec0c85524f5d//home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIA5QGXVMSCMPOQVZH3%2F20210713%2Feu-west-2%2Fs3%2Faws4_request&X-Amz-Date=20210713T151841Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=18d520a539227540bef2ba06a6000dd6569c868aeb4cc6ae042fb895e5e2f880](https://test-bucket-garethsaxby-20210713-153159.s3.eu-west-2.amazonaws.com/i-089c1ec0c85524f5d//home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIA5QGXVMSCMPOQVZH3%2F20210713%2Feu-west-2%2Fs3%2Faws4_request&X-Amz-Date=20210713T151841Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=18d520a539227540bef2ba06a6000dd6569c868aeb4cc6ae042fb895e5e2f880) If I force the plugin to use a regional endpoint for S3, and use a region when creating the client, as per [my branch](https://github.com/ansible-collections/community.aws/compare/main...garethsaxby:fix/s3_regional_url?expand=1), it does work, albeit I'm not really sure -how- best to implement this to properly put a Pull Request together to fix the problem, given my branch feels like a really ugly hack. ### Issue Type Bug Report ### Component Name plugins/connection/aws_ssm ### Ansible Version ```console (paste below) ansible [core 2.11.2] config file = None configured module search path = ['/Users/gsaxby/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/Cellar/ansible/4.2.0/libexec/lib/python3.9/site-packages/ansible ansible collection location = /Users/gsaxby/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.9.6 (default, Jun 29 2021, 06:20:32) [Clang 12.0.0 (clang-1200.0.32.29)] jinja version = 3.0.1 libyaml = True ``` ### Collection Versions ```console (paste below) # /Users/gsaxby/.ansible/collections/ansible_collections Collection Version -------------------- ------- amazon.aws 1.5.0 ansible.netcommon 1.3.0 ansible.posix 1.1.1 community.aws 1.5.0 # Actually has been taken from main; I have shared my requirements.yml later on community.general 1.3.0 community.kubernetes 1.0.0 google.cloud 1.0.1 # /usr/local/Cellar/ansible/4.2.0/libexec/lib/python3.9/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 1.5.0 ansible.netcommon 2.2.0 ansible.posix 1.2.0 ansible.utils 2.3.0 ansible.windows 1.7.0 arista.eos 2.2.0 awx.awx 19.2.2 azure.azcollection 1.7.0 check_point.mgmt 2.0.0 chocolatey.chocolatey 1.1.0 cisco.aci 2.0.0 cisco.asa 2.0.2 cisco.intersight 1.0.15 cisco.ios 2.3.0 cisco.iosxr 2.3.0 cisco.meraki 2.4.2 cisco.mso 1.2.0 cisco.nso 1.0.3 cisco.nxos 2.4.0 cisco.ucs 1.6.0 cloudscale_ch.cloud 2.2.0 community.aws 1.5.0 community.azure 1.0.0 community.crypto 1.7.1 community.digitalocean 1.7.0 community.docker 1.8.0 community.fortios 1.0.0 community.general 3.3.0 community.google 1.0.0 community.grafana 1.2.1 community.hashi_vault 1.3.0 community.hrobot 1.1.1 community.kubernetes 1.2.1 community.kubevirt 1.0.0 community.libvirt 1.0.1 community.mongodb 1.2.1 community.mysql 2.1.0 community.network 3.0.0 community.okd 1.1.2 community.postgresql 1.3.0 community.proxysql 1.0.0 community.rabbitmq 1.0.3 community.routeros 1.2.0 community.skydive 1.0.0 community.sops 1.1.0 community.vmware 1.11.0 community.windows 1.5.0 community.zabbix 1.3.0 containers.podman 1.6.1 cyberark.conjur 1.1.0 cyberark.pas 1.0.7 dellemc.enterprise_sonic 1.1.0 dellemc.openmanage 3.5.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.10.1 fortinet.fortimanager 2.1.2 fortinet.fortios 2.1.1 frr.frr 1.0.3 gluster.gluster 1.0.1 google.cloud 1.0.2 hetzner.hcloud 1.4.3 hpe.nimble 1.1.3 ibm.qradar 1.0.3 infinidat.infinibox 1.2.4 inspur.sm 1.2.0 junipernetworks.junos 2.3.0 kubernetes.core 1.2.1 mellanox.onyx 1.0.0 netapp.aws 21.2.0 netapp.azure 21.7.0 netapp.cloudmanager 21.7.0 netapp.elementsw 21.6.1 netapp.ontap 21.7.0 netapp.um_info 21.6.0 netapp_eseries.santricity 1.2.13 netbox.netbox 3.1.1 ngine_io.cloudstack 2.1.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.0 openstack.cloud 1.5.0 openvswitch.openvswitch 2.0.0 ovirt.ovirt 1.5.3 purestorage.flasharray 1.8.0 purestorage.flashblade 1.6.0 sensu.sensu_go 1.11.1 servicenow.servicenow 1.0.6 splunk.es 1.0.2 t_systems_mms.icinga_director 1.18.0 theforeman.foreman 2.1.1 vyos.vyos 2.3.1 wti.remote 1.0.1 ``` ### AWS SDK versions ```console (paste below) WARNING: Package(s) not found: boto Name: boto3 Version: 1.17.110 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: None License: Apache License 2.0 Location: /usr/local/Cellar/ansible/4.2.0/libexec/lib/python3.9/site-packages Requires: botocore, s3transfer, jmespath Required-by: --- Name: botocore Version: 1.20.110 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: None License: Apache License 2.0 Location: /usr/local/Cellar/ansible/4.2.0/libexec/lib/python3.9/site-packages Requires: python-dateutil, urllib3, jmespath Required-by: s3transfer, boto3 ``` ### Configuration ```console (paste below) INTERPRETER_PYTHON(/Users/gsaxby/Code/DOG/ansible-testing/ansible/ansible.cfg) = auto INVENTORY_ENABLED(/Users/gsaxby/Code/DOG/ansible-testing/ansible/ansible.cfg) = ['amazon.aws.aws_ec2'] ``` ### OS / Environment Client: macOS Catalina 10.15.7, Ansible installed via Brew Remote: Amazon Linux 2, eu-west-2, ami-03ac5a9b225e99b02, amzn2-ami-hvm-2.0.20210701.0-x86_64-gp2 ### Steps to Reproduce 1. Create a new S3 bucket. I believe this is crucial, as it needs to be returning 307's when using the global endpoint, as per [this AWS knowledge centre article](https://aws.amazon.com/premiumsupport/knowledge-center/s3-http-307-response/). ``` Region: eu-west-2 ``` 2. Create the EC2 instance running the SSM agent. ``` Region: eu-west-2 AMI: ami-03ac5a9b225e99b02 (amzn2-ami-hvm-2.0.20210701.0-x86_64-gp2) IAM Policy Attached: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore Tags: Name: ansible-ssm-testing ``` 3. Execute the ansible playbook as below: **requirements.yml**: ```yaml --- collections: - name: amazon.aws version: 1.5.0 - name: https://github.com/ansible-collections/community.aws.git type: git version: main ``` **ansible.cfg**: ``` [defaults] interpreter_python = auto [inventory] enable_plugins = amazon.aws.aws_ec2 ``` **inventory.aws_ec2.yml**: ```yml # File name must end in `.aws_ec2.yml` otherwise the plugin will not read it plugin: amazon.aws.aws_ec2 regions: - eu-west-2 filters: tag:Name: ansible-ssm-testing ``` **playbook.yml** ```yml --- - hosts: all gather_facts: true vars: ansible_connection: aws_ssm ansible_aws_ssm_region: "eu-west-2" ansible_aws_ssm_instance_id: "{{ instance_id }}" ansible_aws_ssm_bucket_name: test-bucket-garethsaxby-20210713-153159 ansible_python_interpreter: /usr/bin/python3 tasks: - name: Ping Instance ansible.builtin.ping: ``` ```console $ ansible-galaxy install -r requirements.yml --force $ ansible-playbook -i inventory.aws_ec2.yml playbook.yml ``` ### Expected Results I'm expecting the curl against the S3 signed URL on the remote host to pull down `AnsiballZ_setup.py` correctly and continue running the playbook, returning the ping successfully. ### Actual Results ```console (paste below) $ ansible-playbook -vvvv -i inventory.aws_ec2.yml playbook.yml ansible-playbook [core 2.11.2] config file = /Users/gsaxby/Code/DOG/ansible-testing/ansible/ansible.cfg configured module search path = ['/Users/gsaxby/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/Cellar/ansible/4.2.0/libexec/lib/python3.9/site-packages/ansible ansible collection location = /Users/gsaxby/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible-playbook python version = 3.9.6 (default, Jun 29 2021, 06:20:32) [Clang 12.0.0 (clang-1200.0.32.29)] jinja version = 3.0.1 libyaml = True Using /Users/gsaxby/Code/DOG/ansible-testing/ansible/ansible.cfg as config file setting up inventory plugins Loading collection amazon.aws from /Users/gsaxby/.ansible/collections/ansible_collections/amazon/aws Parsed /Users/gsaxby/Code/DOG/ansible-testing/ansible/inventory.aws_ec2.yml inventory source with ansible_collections.amazon.aws.plugins.inventory.aws_ec2 plugin Loading callback plugin default of type stdout, v2.0 from /usr/local/Cellar/ansible/4.2.0/libexec/lib/python3.9/site-packages/ansible/plugins/callback/default.py Skipping callback 'default', as we already have a stdout callback. Skipping callback 'minimal', as we already have a stdout callback. Skipping callback 'oneline', as we already have a stdout callback. PLAYBOOK: playbook.yml ******************************************************************************************************************************************** Positional arguments: playbook.yml verbosity: 4 connection: smart timeout: 10 become_method: sudo tags: ('all',) inventory: ('/Users/gsaxby/Code/DOG/ansible-testing/ansible/inventory.aws_ec2.yml',) forks: 5 1 plays in playbook.yml PLAY [all] ******************************************************************************************************************************************************** TASK [Gathering Facts] ******************************************************************************************************************************************** task path: /Users/gsaxby/Code/DOG/ansible-testing/ansible/playbook.yml:2 redirecting (type: connection) ansible.builtin.aws_ssm to community.aws.aws_ssm Loading collection community.aws from /Users/gsaxby/.ansible/collections/ansible_collections/community/aws <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> ESTABLISH SSM CONNECTION TO: i-089c1ec0c85524f5d <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> SSM COMMAND: ['/usr/local/bin/session-manager-plugin', '{"SessionId": "gareth-saxby-temp-072af2adf96185184", "TokenValue": "AAEAAQl1UTpN1tP3cQsnNCTUvKP/y0eAIq8BgKoOVgNzAN4aAAAAAGDtrs9TgYC1XyDzkw5Y6le3Wt9fzFIXrw2thaxAz8Gvts868wSMlpFm+M7syYnedzJfgOMUIxN9/PDA/ph9qL8qZocUy9IdVmBC9oO6Z/yQr94sVYVvWvVHGFY3k9O/9oO8Eklc4SN6r2pl2Mmj3bFKDxH1mbTv15Fks3ieMIiZyxahkg2rwCxFplua+nFlja3w9bQVl+LUXogw19V9MNjy2UrrUiXSMWhwKPPE6Y/VWOgZrNu72mg2mbvAvRKjCV+hZ2vBSt7WZ+gKfGV/U3yUAEEUtnNIsCJz3fAUPlZXUKnIVnviMNL0HnBZzE1YA3BwtbF8R0390a0dNuQuMqrTzrOHI4hJVL0oDBnMXAISZaJ1UUFJ4L5jYMuwVHa8dJA2d4w=", "StreamUrl": "wss://ssmmessages.eu-west-2.amazonaws.com/v1/data-channel/gareth-saxby-temp-072af2adf96185184?role=publish_subscribe", "ResponseMetadata": {"RequestId": "4c322e4e-e0c8-4384-92e3-323414253881", "HTTPStatusCode": 200, "HTTPHeaders": {"server": "Server", "date": "Tue, 13 Jul 2021 15:18:39 GMT", "content-type": "application/x-amz-json-1.1", "content-length": "642", "connection": "keep-alive", "x-amzn-requestid": "4c322e4e-e0c8-4384-92e3-323414253881"}, "RetryAttempts": 0}}', 'eu-west-2', 'StartSession', '', '{"Target": "i-089c1ec0c85524f5d"}', 'https://ssm.eu-west-2.amazonaws.com'] <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> SSM CONNECTION ID: gareth-saxby-temp-072af2adf96185184 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC echo ~ <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> _wrap_command: 'echo IlyexxlDYKICMeyOVUZQHcnTAj echo ~ echo $'\n'$? echo qIhbHBrlBzTeHjwGNvWljDHeGw ' <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: Starting session with SessionId: gareth-saxby-temp-072af2adf96185184 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: sh-4.2$ stty -echo <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: sh-4.2$ IlyexxlDYKICMeyOVUZQHcnTAj <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: /home/ssm-user <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: qIhbHBrlBzTeHjwGNvWljDHeGw <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> POST_PROCESS: /home/ssm-user 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> (0, '/home/ssm-user\r\r', '') <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC ( umask 77 && mkdir -p "` echo /home/ssm-user/.ansible/tmp `"&& mkdir "` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193 `" && echo ansible-tmp-1626189520.659307-19800-45563405192193="` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193 `" ) <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> _wrap_command: 'echo wzCNiIYognSiHXqfCJMBRaRvKS ( umask 77 && mkdir -p "` echo /home/ssm-user/.ansible/tmp `"&& mkdir "` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193 `" && echo ansible-tmp-1626189520.659307-19800-45563405192193="` echo /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193 `" ) echo $'\n'$? echo CqzxJpqOSlFCtzcRaDElqmzLuB ' <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: wzCNiIYognSiHXqfCJMBRaRvKS <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: ansible-tmp-1626189520.659307-19800-45563405192193=/home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: CqzxJpqOSlFCtzcRaDElqmzLuB <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> POST_PROCESS: ansible-tmp-1626189520.659307-19800-45563405192193=/home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> (0, 'ansible-tmp-1626189520.659307-19800-45563405192193=/home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193\r\r', '') Using module file /usr/local/Cellar/ansible/4.2.0/libexec/lib/python3.9/site-packages/ansible/modules/setup.py <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> PUT /Users/gsaxby/.ansible/tmp/ansible-local-19794ck3x4sje/tmpyrhzimx3 TO /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC curl 'https://test-bucket-garethsaxby-20210713-153159.s3.amazonaws.com/i-089c1ec0c85524f5d//home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIA5QGXVMSCMPOQVZH3%2F20210713%2Feu-west-2%2Fs3%2Faws4_request&X-Amz-Date=20210713T151841Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=18d520a539227540bef2ba06a6000dd6569c868aeb4cc6ae042fb895e5e2f880' -o '/home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py' <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> _wrap_command: 'echo YBqOPfzvxcGDqXREpbYSUvrIeG curl 'https://test-bucket-garethsaxby-20210713-153159.s3.amazonaws.com/i-089c1ec0c85524f5d//home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIA5QGXVMSCMPOQVZH3%2F20210713%2Feu-west-2%2Fs3%2Faws4_request&X-Amz-Date=20210713T151841Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=18d520a539227540bef2ba06a6000dd6569c868aeb4cc6ae042fb895e5e2f880' -o '/home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py' echo $'\n'$? echo dfIOAXVAOIcrDMPKZdUkrKCwKw ' <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: YBqOPfzvxcGDqXREpbYSUvrIeG <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: % Total % Received % Xferd Average Speed Time Time Time Current <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: Dload Upload Total Spent Left Speed 100 509 0 509 0 0 1641 0 --:--:-- --:--:-- --:--:-- 1641 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: dfIOAXVAOIcrDMPKZdUkrKCwKw <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> POST_PROCESS: % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 509 0 509 0 0 1641 0 --:--:-- --:--:-- --:--:-- 1641 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 509 0 509 0 0 1641 0 --:--:-- --:--:-- --:--:-- 1641\r\r', '') <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 509 0 509 0 0 1641 0 --:--:-- --:--:-- --:--:-- 1641\r\r', '') <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC chmod u+x /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/ /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> _wrap_command: 'echo xgTDBjuFPZtDfVuwGaZyflDWjV chmod u+x /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/ /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py echo $'\n'$? echo xOHqRiUNImldbBrdKmcVWrfEEd ' <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: xgTDBjuFPZtDfVuwGaZyflDWjV <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: xOHqRiUNImldbBrdKmcVWrfEEd <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> POST_PROCESS: 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> (0, '\r', '') <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC /usr/bin/python3 /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> _wrap_command: 'echo gzDaIZxdrfBqdBvcQHhExANgdr sudo /usr/bin/python3 /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py echo $'\n'$? echo CtSWJSGjdVuhayekdfLdqRalyk ' <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: gzDaIZxdrfBqdBvcQHhExANgdr <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: File "/home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py", line 1 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: <?xml version="1.0" encoding="UTF-8"?> <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: ^ <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: SyntaxError: invalid syntax <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: 1 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: CtSWJSGjdVuhayekdfLdqRalyk <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> POST_PROCESS: File "/home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py", line 1 <?xml version="1.0" encoding="UTF-8"?> ^ SyntaxError: invalid syntax 1 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> (1, ' File "/home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py", line 1\r\r\n <?xml version="1.0" encoding="UTF-8"?>\r\r\n ^\r\r\nSyntaxError: invalid syntax\r\r', '') <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC rm -f -r /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/ > /dev/null 2>&1 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> _wrap_command: 'echo JARmCoxpzNaNaBblIFeROdZYey rm -f -r /home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/ > /dev/null 2>&1 echo $'\n'$? echo BifmuhtspkBDKrtrAJmsOZBxKg ' <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: JARmCoxpzNaNaBblIFeROdZYey <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> EXEC stdout line: BifmuhtspkBDKrtrAJmsOZBxKg <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> POST_PROCESS: 0 <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> (0, '\r', '') <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> CLOSING SSM CONNECTION TO: i-089c1ec0c85524f5d <ec2-52-56-84-142.eu-west-2.compute.amazonaws.com> TERMINATE SSM SESSION: gareth-saxby-temp-072af2adf96185184 fatal: [ec2-52-56-84-142.eu-west-2.compute.amazonaws.com]: FAILED! => { "ansible_facts": {}, "changed": false, "failed_modules": { "ansible.legacy.setup": { "failed": true, "module_stderr": "", "module_stdout": " File \"/home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py\", line 1\r\r\n <?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\r\n ^\r\r\nSyntaxError: invalid syntax\r\r", "msg": "MODULE FAILURE\nSee stdout/stderr for the exact error", "rc": 1 } }, "msg": "The following modules failed to execute: ansible.legacy.setup\n" } PLAY RECAP ******************************************************************************************************************************************************** ec2-52-56-84-142.eu-west-2.compute.amazonaws.com : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 ``` When I curl the signed URL from the remote instance using SSM Session Manager, I get the following response, showing that a 307 is being returned and I'm being redirected to the regional endpoint: ```console $ curl -i "https://test-bucket-garethsaxby-20210713-153159.s3.amazonaws.com/i-089c1ec0c85524f5d//home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIA5QGXVMSCMPOQVZH3%2F20210713%2Feu-west-2%2Fs3%2Faws4_request&X-Amz-Date=20210713T151841Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=18d520a539227540bef2ba06a6000dd6569c868aeb4cc6ae042fb895e5e2f880" HTTP/1.1 307 Temporary Redirect x-amz-bucket-region: eu-west-2 x-amz-request-id: 6077E9R1H8G7Z0S1 x-amz-id-2: OU6kvbWvdu79rmzjEa8YpVK8z1X5J0y1axM9o0bsEvQyA6vvdY+xZZDJFdUwez4leqMV5UUzxUw= Location: https://test-bucket-garethsaxby-20210713-153159.s3.eu-west-2.amazonaws.com/i-089c1ec0c85524f5d//home/ssm-user/.ansible/tmp/ansible-tmp-1626189520.659307-19800-45563405192193/AnsiballZ_setup.py?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIA5QGXVMSCMPOQVZH3%2F20210713%2Feu-west-2%2Fs3%2Faws4_request&X-Amz-Date=20210713T151841Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=18d520a539227540bef2ba06a6000dd6569c868aeb4cc6ae042fb895e5e2f880 Content-Type: application/xml Transfer-Encoding: chunked Date: Tue, 13 Jul 2021 15:21:02 GMT Server: AmazonS3 <?xml version="1.0" encoding="UTF-8"?> <Error><Code>TemporaryRedirect</Code><Message>Please re-send this request to the specified temporary endpoint. Continue to use the original request endpoint for future requests.</Message><Endpoint>test-bucket-garethsaxby-20210713-153159.s3.eu-west-2.amazonaws.com</Endpoint><Bucket>test-bucket-garethsaxby-20210713-153159</Bucket><RequestId>6077E9R1H8G7Z0S1</RequestId><HostId>OU6kvbWvdu79rmzjEa8YpVK8z1X5J0y1axM9o0bsEvQyA6vvdY+xZZDJFdUwez4leqMV5UUzxUw=</HostId></Error> ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`lib/ansible/plugins/connection`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/lib/ansible/plugins/connection) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> Closing this temporarily as I think I need to revisit some of this first, apologies! To be more specific; the errors are still happening, but my determination of the cause may not be right, and I want to dig into that more before someone else starts looking. EDIT: I've reopened the issue now, as I've realised that I was just confusing myself a bit reading the fix I'd hacked together myself. The issue is still present when using the latest `main` from this repository. Files identified in the description: * [`plugins/connection/aws_ssm.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/connection/aws_ssm.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> @garethsaxby thx for the detailed report! Sadly I'm not familiar with ssm connection plugin. > If I force the plugin to use a regional endpoint for S3, and use a region when creating the client, as per my branch, it does work, albeit I'm not really sure -how- best to implement this to properly put a Pull Request together to fix the problem, given my branch feels like a really ugly hack. There is a similar PR (not merged yet) that introduce a `bucket_region` parameter: https://github.com/ansible-collections/community.aws/pull/603/files#diff-8000d7fb9262e11565b61882567d729fe0958cd9b1a0105683d5bbe0e5b4e585R51 You can try to introduce that too ```python if bucket_region: client = session.client( service, config=Config(signature_version="s3v4"), endpoint_url=f'https://s3.{bucket_region}.amazonaws.com' ) else: client = session.client( service, config=Config(signature_version="s3v4") ) ``` I also confirm this issue. I think a issue fix would be to use '-L' option with curl. I'm definitely having issues with bucket regions and encrypted buckets too. I hit this issue yesterday when attempting to use the aws ssm connection with using a newly created bucket in us-east-2 yesterday. Can we reopen this issue? The referenced PR in the close action above (https://github.com/ansible-collections/community.aws/pull/1176) does not seem to resolve this particular issue. That PR was targeting a fix for pulling the region information for the S3 bucket used for SSM file transfers from the bucket metadata itself, but the pre-signed URLs generated for the S3 downloads are still pointing at the global S3 endpoint, and not the region specific one. Thus, attempting to use the ssm plugin with a newly created transfer bucket in a region like us-east-2 continues to return the presigned URLs targeting the global S3 endpoint, which results in the 307 redirect to the regional endpoint, which then causes the presigned URL to fail with a signature mismatch error as the url was signed for the global endpoint and not the regional endpoint. I believe that #1190 needs to be further addressed to resolve this issue. Hi @bodnarbm please see https://github.com/ansible-collections/community.aws/pull/743/files to use virtual addressing. There is a PR from @phene but that relies on a hardcoded region to be defined. @charles-paul-mox Thank you, but that pr looks to be closed unmerged and I would prefer to not patch the plugin separately (if I was I would probably add the s3 client endpoint url as a separate variable, that way I could also get it to work with other endpoints also (like fips endpoints)) I'm hoping that someone like @tremble could reopen this issue though. Yes, I cannot merge PRs due to company policies. The virtual addressing is the important part. @charles-paul-mox My PR doesn't rely on a hard-coded region _unless you are using a non-default partition like GovCloud_. It uses the default global region just to query information about the S3 bucket's region, then uses the bucket's region from there on. Any real resolution to this problem? I'm using [5.1.0](https://github.com/ansible-collections/community.aws/releases/tag/5.1.0) release and there is still issue with AnsiballZ file: ``` sh-4.2$ cat AnsiballZ_yum.py <?xml version="1.0" encoding="UTF-8"?> <Error><Code>TemporaryRedirect</Code><Message>Please re-send this request to the specified temporary endpoint. Continue to use the original request endpoint for future requests.</Message><Endpoint>testbucketkochan.s3.eu-west-3.amazonaws.com</Endpoint><Bucket>testbucketkochan</Bucket><RequestId>W5B7ZAZZ........</RequestId><HostId>DR//pSU97KgA8ZLVD/............................+lC/xwAuIBO/W4RuWIXqyFp+MZj0ZuI=</HostId><sh-4.2$ ``` Any solution for this ? Cause: This is mainly because of the TemporaryRedirect error from AWS s3 with the resigned URL. due to this when we transfer the setup.py file from host to remote it will download with below content on the remote. <?xml version="1.0" encoding="UTF-8"?> <Error><Code>TemporaryRedirect</Code><Message>Please re-send this request to the specified temporary endpoint. Continue to use the original request endpoint for future requests.</Message><Endpoint>enactortestssm123.s3.us-east-2.amazonaws.com</Endpoint><Bucket>enactortestssm123</Bucket><RequestId>RZYTVTWNVV6V0ET1</RequestId><HostId>jDf+7m1brAHn98LcbqJXDHfraqX5i4DadfixrNM+qqEu3abyB67zLMYK9o/+6lU+Y3jwg/KtQ30=</HostId></Error> to avoid this you can modify _get_boto_client() function client initializing as below to support addressing_style virtual for s3. Thanks! Feel free to correct me. client = session.client( service, config=Config(signature_version="s3v4", s3={'addressing_style': 'virtual'}) )
2023-01-20T16:55:57
ansible-collections/community.aws
1,678
ansible-collections__community.aws-1678
[ "770" ]
026534a13a23001343767d35cc9f51d0d6c4ed48
diff --git a/plugins/modules/ecs_cluster.py b/plugins/modules/ecs_cluster.py --- a/plugins/modules/ecs_cluster.py +++ b/plugins/modules/ecs_cluster.py @@ -41,6 +41,42 @@ required: false type: int default: 10 + capacity_providers: + version_added: 5.2.0 + description: + - List of capacity providers to use for the cluster. + required: false + type: list + elements: str + capacity_provider_strategy: + version_added: 5.2.0 + description: + - List of capacity provider strategies to use for the cluster. + required: false + type: list + elements: dict + suboptions: + capacity_provider: + description: + - Name of capacity provider. + type: str + weight: + description: + - The relative percentage of the total number of launched tasks that should use the specified provider. + type: int + base: + description: + - How many tasks, at a minimum, should use the specified provider. + type: int + default: 0 + purge_capacity_providers: + version_added: 5.2.0 + description: + - Toggle overwriting of existing capacity providers or strategy. This is needed for backwards compatibility. + - By default I(purge_capacity_providers=false). In a release after 2024-06-01 this will be changed to I(purge_capacity_providers=true). + required: false + type: bool + default: false extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 @@ -56,6 +92,21 @@ name: default state: present +- name: Cluster creation with capacity providers and strategies. + community.aws.ecs_cluster: + name: default + state: present + capacity_providers: + - FARGATE + - FARGATE_SPOT + capacity_provider_strategy: + - capacity_provider: FARGATE + base: 1 + weight: 1 + - capacity_provider: FARGATE_SPOT + weight: 100 + purge_capacity_providers: True + - name: Cluster deletion community.aws.ecs_cluster: name: default @@ -75,6 +126,16 @@ description: how many services are active in this cluster returned: 0 if a new cluster type: int +capacityProviders: + version_added: 5.2.0 + description: list of capacity providers used in this cluster + returned: always + type: list +defaultCapacityProviderStrategy: + version_added: 5.2.0 + description: list of capacity provider strategies used in this cluster + returned: always + type: list clusterArn: description: the ARN of the cluster just created type: str @@ -112,6 +173,8 @@ pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict class EcsClusterManager: @@ -145,8 +208,26 @@ def describe_cluster(self, cluster_name): return c raise Exception("Unknown problem describing cluster %s." % cluster_name) - def create_cluster(self, clusterName='default'): - response = self.ecs.create_cluster(clusterName=clusterName) + def create_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy): + params = dict(clusterName=cluster_name) + if capacity_providers: + params['capacityProviders'] = snake_dict_to_camel_dict(capacity_providers) + if capacity_provider_strategy: + params['defaultCapacityProviderStrategy'] = snake_dict_to_camel_dict(capacity_provider_strategy) + response = self.ecs.create_cluster(**params) + return response['cluster'] + + def update_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy): + params = dict(cluster=cluster_name) + if capacity_providers: + params['capacityProviders'] = snake_dict_to_camel_dict(capacity_providers) + else: + params['capacityProviders'] = [] + if capacity_provider_strategy: + params['defaultCapacityProviderStrategy'] = snake_dict_to_camel_dict(capacity_provider_strategy) + else: + params['defaultCapacityProviderStrategy'] = [] + response = self.ecs.put_cluster_capacity_providers(**params) return response['cluster'] def delete_cluster(self, clusterName): @@ -159,7 +240,17 @@ def main(): state=dict(required=True, choices=['present', 'absent', 'has_instances']), name=dict(required=True, type='str'), delay=dict(required=False, type='int', default=10), - repeat=dict(required=False, type='int', default=10) + repeat=dict(required=False, type='int', default=10), + purge_capacity_providers=dict(required=False, type='bool', default=False), + capacity_providers=dict(required=False, type='list', elements='str'), + capacity_provider_strategy=dict(required=False, + type='list', + elements='dict', + options=dict(capacity_provider=dict(type='str'), + weight=dict(type='int'), + base=dict(type='int', default=0) + ) + ), ) required_together = [['state', 'name']] @@ -177,12 +268,53 @@ def main(): results = dict(changed=False) if module.params['state'] == 'present': + # Pull requested and existing capacity providers and strategies. + purge_capacity_providers = module.params['purge_capacity_providers'] + requested_cp = module.params['capacity_providers'] + requested_cps = module.params['capacity_provider_strategy'] if existing and 'status' in existing and existing['status'] == "ACTIVE": - results['cluster'] = existing + existing_cp = existing['capacityProviders'] + existing_cps = existing['defaultCapacityProviderStrategy'] + + if requested_cp is None: + requested_cp = [] + + # Check if capacity provider strategy needs to trigger an update. + cps_update_needed = False + if requested_cps is not None: + for strategy in requested_cps: + if snake_dict_to_camel_dict(strategy) not in existing_cps: + cps_update_needed = True + for strategy in existing_cps: + if camel_dict_to_snake_dict(strategy) not in requested_cps: + cps_update_needed = True + elif requested_cps is None and existing_cps != []: + cps_update_needed = True + + # Unless purge_capacity_providers is true, we will not be updating the providers or strategy. + if not purge_capacity_providers: + module.deprecate('After 2024-06-01 the default value of purge_capacity_providers will change from false to true.' + ' To maintain the existing behaviour explicitly set purge_capacity_providers=true', + date='2024-06-01', collection_name='community.aws') + cps_update_needed = False + requested_cp = existing_cp + requested_cps = existing_cps + + # If either the providers or strategy differ, update the cluster. + if requested_cp != existing_cp or cps_update_needed: + if not module.check_mode: + results['cluster'] = cluster_mgr.update_cluster(cluster_name=module.params['name'], + capacity_providers=requested_cp, + capacity_provider_strategy=requested_cps) + results['changed'] = True + else: + results['cluster'] = existing else: if not module.check_mode: # doesn't exist. create it. - results['cluster'] = cluster_mgr.create_cluster(module.params['name']) + results['cluster'] = cluster_mgr.create_cluster(cluster_name=module.params['name'], + capacity_providers=requested_cp, + capacity_provider_strategy=requested_cps) results['changed'] = True # delete the cluster
diff --git a/tests/integration/targets/ecs_cluster/tasks/main.yml b/tests/integration/targets/ecs_cluster/tasks/main.yml --- a/tests/integration/targets/ecs_cluster/tasks/main.yml +++ b/tests/integration/targets/ecs_cluster/tasks/main.yml @@ -63,6 +63,30 @@ that: - not ecs_cluster_again.changed + - name: add capacity providers and strategy + ecs_cluster: + name: "{{ ecs_cluster_name }}" + state: present + purge_capacity_providers: True + capacity_providers: + - FARGATE + - FARGATE_SPOT + capacity_provider_strategy: + - capacity_provider: FARGATE + base: 1 + weight: 1 + - capacity_provider: FARGATE_SPOT + weight: 100 + register: ecs_cluster_update + + - name: check that ecs_cluster was correctly updated + assert: + that: + - ecs_cluster_update.changed + - ecs_cluster_update.cluster is defined + - ecs_cluster_update.cluster.capacityProviders is defined + - "'FARGATE' in ecs_cluster_update.cluster.capacityProviders" + - name: create a VPC to work in ec2_vpc_net: cidr_block: 10.0.0.0/16
Add AWS ECS Capacity Provider Strategy Support ### Summary Migrated from https://github.com/ansible/ansible/issues/67997 (it doesn't seem to have made it over automatically) Add support for AWS ECS Cluster Capacity Provider Strategy configuration. Additional note, I noticed this because I was creating a cluster to use with the GitLab CI Fargate driver and started getting `The platform version must be null when specifying an EC2 launch type.` when trying to launch a job. It worked with a manually created Cluster and Task Definition, so I looked closely and found the difference was the manually created cluster had two Capacity Providers and the Ansible created one had none, nor can you manually add them. It's clearly something the AWS UI takes care of, which you can do with the API (see additional info) but this module currently does not support. It means you can't really use it to set up a Fargate cluster at all. ### Issue Type Feature Idea ### Component Name ecs_cluster ### Additional Information Enable configuration of ECS cluster capacity providers and strategies thereof. ```yaml ecs_cluster: ... capacity_providers: - "FARGATE" - "FARGATE_SPOT" capacity_provider_strategy: - capacity_provider: "FARGATE" base: 1 weight: 1 - capacity_provider: "FARGATE_SPOT" weight: 100 ``` hashicorp/terraform-provider-aws#11150 https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-capacity-providers.html https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.put_cluster_capacity_providers ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/ecs_cluster.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/ecs_cluster.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @Java1Guy @jillr @markuman @s-hertel @tremble @wimnat [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> > It means you can't really use it to set up a Fargate cluster at all. hm I'm not sure about it. At work we create some fargate ecs clusters just with ```yml - name: create ecs cluster ecs_cluster: name: serverless-housekeeping state: present ``` and we can run ecs taskdefinitions with `launch_type: FARGATE` without any problems in that cluster. ```yml - name: letsencrypt taskdefinition ecs_taskdefinition: family: letsencrypt cpu: "256" memory: "512" state: present network_mode: awsvpc launch_type: FARGATE execution_role_arn: "arn:aws:iam::{{ caller_facts.account }}:role/ecsTaskExecutionRole" task_role_arn: "arn:aws:iam::{{ caller_facts.account }}:role/letsencryptECSTask" region: eu-central-1 containers: - name: letsencrypt environment: - name: KMS value: "{{ kms.ssm }}" essential: true image: "{{ caller_facts.account }}.dkr.ecr.eu-central-1.amazonaws.com/letsencrypt:latest" logConfiguration: logDriver: awslogs options: awslogs-group: /ecs/letsencrypt awslogs-region: eu-central-1 awslogs-stream-prefix: ecs register: letsTD ``` >> It means you can't really use it to set up a Fargate cluster at all. > hm I'm not sure about it. ...which does not mean that the parameters should not be supported by `community.aws.ecs_cluster` I think its not that hard to implement. Hi @markuman, thanks for the reply. That's interesting, I wonder what I'm doing wrong then? It's OT for the issue but for some reason my Ansible-created cluster can't launch Fargate task definitions but my manually created one *can* ... and I can't see any other difference. I'll keep digging though, if it works for you then at least I know it's possible! :-) @gregharvey ```yml --- - hosts: localhost connection: local tags: - example vars: region: eu-central-1 subnet: subnet-d8309db2 security_group: sg-f32f0196 ecs_trusted_relationship: | { "Version": "2012-10-17", "Statement": [ { "Sid": "", "Effect": "Allow", "Principal": { "Service": "ecs-tasks.amazonaws.com" }, "Action": "sts:AssumeRole" } ] } tasks: - name: Get the current caller identity facts aws_caller_info: register: caller_facts - name: create ecsTaskExecution role iam_role: name: ecsTaskExecutionRole description: ecsTaskExecutionRole with to many permissions state: present purge_policies: yes managed_policy: - arn:aws:iam::aws:policy/CloudWatchLogsFullAccess - arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role - arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole - arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceEventsRole - arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly assume_role_policy_document: "{{ ecs_trusted_relationship }}" - name: create ecs cluster ecs_cluster: name: sometcluster state: present region: "{{ region }}" - name: create cloudwatch log group cloudwatchlogs_log_group: log_group_name: /ecs/fargate-test retention: 1 region: "{{ region }}" - name: some fargate task definition ecs_taskdefinition: family: something cpu: "256" memory: "512" state: present network_mode: awsvpc launch_type: FARGATE execution_role_arn: ecsTaskExecutionRole task_role_arn: ecsTaskExecutionRole region: "{{ region }}" containers: - name: something command: - uptime essential: true image: "alpine:latest" logConfiguration: logDriver: awslogs options: awslogs-group: /ecs/fargate-test awslogs-region: "{{ region }}" awslogs-stream-prefix: ecs register: td_output - name: Run task community.aws.ecs_task: operation: run cluster: sometcluster task_definition: something count: 1 started_by: ansible_user launch_type: FARGATE network_configuration: subnets: - "{{ subnet }}" security_groups: - "{{ security_group }}" register: task_output - debug: var: task_output - hosts: localhost connection: local tags: - cleanup vars: region: eu-central-1 subnet: subnet-d8309db2 security_group: sg-f32f0196 ecs_trusted_relationship: | { "Version": "2012-10-17", "Statement": [ { "Sid": "", "Effect": "Allow", "Principal": { "Service": "ecs-tasks.amazonaws.com" }, "Action": "sts:AssumeRole" } ] } tasks: - name: remove iam role iam_role: name: ecsTaskExecutionRole description: ecsTaskExecutionRole with to many permissions state: absent purge_policies: yes managed_policy: - arn:aws:iam::aws:policy/CloudWatchLogsFullAccess - arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role - arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole - arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceEventsRole - arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly assume_role_policy_document: "{{ ecs_trusted_relationship }}" - name: remove ecs cluster ecs_cluster: name: sometcluster state: absent region: "{{ region }}" - name: remove cloudwatch log group cloudwatchlogs_log_group: log_group_name: /ecs/fargate-test retention: 1 region: "{{ region }}" state: absent ``` adjust just the vars `AWS_PROFILE=yourprofile ansible-playbook 770.yml --tags example` and `AWS_PROFILE=yourprofile ansible-playbook 770.yml --tags cleanup` to remove the resources. just the image is failing to pull (_no idea atm_) ``` Stopped reason CannotPullContainerError: inspect image has been retried 5 time(s): failed to resolve ref "docker.io/library/alpine:latest": failed to do request: Head https://registry-1.docker.io/v2/library/alpine/manifests/latest: dial tcp 52.204.76.244:443: i/o tim... ``` but at least, it works without any issue to run fargate container in a cluster made by `ecs_cluster` module. Thank you so much, I'll give it a go! :+1: Just to follow up here, in case someone has a similar problem. The code above works perfectly, so indeed you clearly *can* create a cluster and run a task. However, my GitLab `fargate` custom executor still wasn't working. I reviewed the docs to try and understand what's different, and for reasons I don't know there's steps 7 and 8 here to add a default capacity provider strategy: * https://docs.gitlab.com/runner/configuration/runner_autoscale_aws_fargate/#step-5-create-an-ecs-fargate-cluster Without that step it doesn't work. I presume the driver does not set the `launch_type` when it runs a task, and that it defaults to `EC2` if you don't either specify `FARGATE` when you launch the task *or* tell your cluster to favour `FARGATE`. This is really a bug in the `fargate` driver for GitLab Runner, in fairness, but I could work around it if Ansible let me set that default capacity provider strategy. So it would be handy. :-)
2023-01-23T18:22:54
ansible-collections/community.aws
1,680
ansible-collections__community.aws-1680
[ "1106" ]
86c60b4957a2baa0bc8b00ef1ed9adf90a56ed9c
diff --git a/plugins/modules/ecs_service.py b/plugins/modules/ecs_service.py --- a/plugins/modules/ecs_service.py +++ b/plugins/modules/ecs_service.py @@ -44,7 +44,7 @@ task_definition: description: - The task definition the service will run. - - This parameter is required when I(state=present). + - This parameter is required when I(state=present) unless I(force_new_deployment=True). - This parameter is ignored when updating a service with a C(CODE_DEPLOY) deployment controller in which case the task definition is managed by Code Pipeline and cannot be updated. required: false @@ -971,14 +971,15 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, - required_if=[('state', 'present', ['task_definition']), - ('launch_type', 'FARGATE', ['network_configuration'])], + required_if=[('launch_type', 'FARGATE', ['network_configuration'])], required_together=[['load_balancers', 'role']], mutually_exclusive=[['launch_type', 'capacity_provider_strategy']]) - if module.params['state'] == 'present' and module.params['scheduling_strategy'] == 'REPLICA': - if module.params['desired_count'] is None: + if module.params['state'] == 'present': + if module.params['scheduling_strategy'] == 'REPLICA' and module.params['desired_count'] is None: module.fail_json(msg='state is present, scheduling_strategy is REPLICA; missing desired_count') + if module.params['task_definition'] is None and not module.params['force_new_deployment']: + module.fail_json(msg='Either task_definition or force_new_deployment is required when status is present.') if len(module.params['capacity_provider_strategy']) > 6: module.fail_json(msg='AWS allows a maximum of six capacity providers in the strategy.') @@ -1075,6 +1076,9 @@ def main(): updatedLoadBalancers = loadBalancers if existing['deploymentController']['type'] == 'ECS' else [] + if task_definition is None and module.params['force_new_deployment']: + task_definition = existing['taskDefinition'] + # update required response = service_mgr.update_service(module.params['name'], module.params['cluster'],
diff --git a/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml b/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml --- a/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml +++ b/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml @@ -112,6 +112,32 @@ that: - ecs_service_again.changed +- name: force_new_deployment should work without providing a task_definition + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + state: present + force_new_deployment: yes + name: "{{ ecs_service_name }}" + cluster: "{{ ecs_cluster_name }}" + desired_count: 1 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + placement_strategy: "{{ ecs_service_placement_strategy }}" + placement_constraints: + - type: distinctInstance + health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + role: "{{ ecs_service_role_name }}" + register: ecs_service_notaskdef + +- name: check that ECS service changed again due to force_new_deployment with no task definition + assert: + that: + - ecs_service_notaskdef.changed + - name: attempt to use ECS network configuration on task definition without awsvpc network_mode (expected to fail) vars: ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
ecs_service - Support force_new_deployment without having to specify a task definition ### Summary Sometimes, it can be helpful to just restart all tasks from a service, without having to deal with all tasks definitions (since they do not need to change). For that purpose, the aws cli offers the following possibility : ``` aws ecs update-service --cluster cluster_name --service service_name --force-new-deployment ``` This is particularly interesting when we want some external files to be taken (but the task definition itself does not need any change). We can note that boto3 already support this option (at least since `v1.14.2` i'm working with) : ```python import boto3 client = boto3.client('ecs') client.update_service(cluster='cluster_name', service='service_name', forceNewDeployment=True) ``` However, the `ecs_service` module can not do that since when we specify `state: present`, we get the following error ``` fatal: [localhost]: FAILED! => {"changed": false, "msg": "state is present but all of the following are missing: task_definition"} ``` ### Issue Type Feature Idea ### Component Name ecs_service ### Additional Information An rough idea would be to add an additional parameter (like `preserve_tasks_definitions`) which would, when true, avoid the check about the presence of `task_definition`. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/ecs_service.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/ecs_service.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @Java1Guy @jillr @kaczynskid @markuman @s-hertel @tremble @zacblazic [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> I forgot to mention : It is indeed workaroundable but requires some more tasks to retrieve informations about loadBalancers & taskdefinition. ```ansible - name: Retrieve service details community.aws.ecs_service_info: cluster: "{{ cluster_name }}" service: "{{ service_name }}" details: true register: service_details - name: Reload ECS service community.aws.ecs_service: cluster: "{{ cluster_name }}" name: "{{ service_name }}" state: present force_new_deployment: yes load_balancers: "{{ service_details.services | map(attribute='loadBalancers') | first }}" task_definition: "{{ service_details.services | map(attribute='taskDefinition') | first }}" ``` The main idea of this feature request it to improve the user experience by not having to deal with data we do not care about at the moment. > We can note that boto3 already support this option (at least since `v1.14.2` i'm working with) : > > ```python > import boto3 > client = boto3.client('ecs') > client.update_service(cluster='cluster_name', service='service_name', forceNewDeployment=True) > ``` > > However, the `ecs_service` module can not do that since when we specify `state: present`, we get the following error > > ``` > fatal: [localhost]: FAILED! => {"changed": false, "msg": "state is present but all of the following are missing: task_definition"} > ``` So a new boolean parameter `force_new_deployment` must be introduced. When it got no default value, it can be mutually exclusive with the `state` parameter. @giom-l do you have some time to implement this new feature? Hi, Not this week but I can take a look at it the next one, sure. Just to be sure about what to do : you write about a new Boolean parameter, but there is already an existing one with this name : https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_service_module.html#parameter-force_new_deployment It is just the behaviour of this one that should be changed, right ? (NB : I haven't the whole code in front of me right now so I may have missed something) Ah sorry. I thought the entire parameter was missing. So yes, imo the internally logic must just be fixed, so that `force_new_deployment: yes` works also on existing service without the need to specify the taskdefinition.
2023-01-24T16:50:36
ansible-collections/community.aws
1,683
ansible-collections__community.aws-1683
[ "343" ]
35cd1784bdbb7ca67ac06c7bcb367a3d450ed5f6
diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -24,16 +24,22 @@ description: The STS access key to use when connecting via session-manager. vars: - name: ansible_aws_ssm_access_key_id + env: + - name: AWS_ACCESS_KEY_ID version_added: 1.3.0 secret_access_key: description: The STS secret key to use when connecting via session-manager. vars: - name: ansible_aws_ssm_secret_access_key + env: + - name: AWS_SECRET_ACCESS_KEY version_added: 1.3.0 session_token: description: The STS session token to use when connecting via session-manager. vars: - name: ansible_aws_ssm_session_token + env: + - name: AWS_SESSION_TOKEN version_added: 1.3.0 instance_id: description: The EC2 instance ID. @@ -43,6 +49,9 @@ description: The region the EC2 instance is located. vars: - name: ansible_aws_ssm_region + env: + - name: AWS_REGION + - name: AWS_DEFAULT_REGION default: 'us-east-1' bucket_name: description: The name of the S3 bucket used for file transfers. @@ -57,6 +66,8 @@ description: Sets AWS profile to use. vars: - name: ansible_aws_ssm_profile + env: + - name: AWS_PROFILE version_added: 1.5.0 reconnection_retries: description: Number of attempts to connect. @@ -736,15 +747,6 @@ def _get_boto_client(self, service, region_name=None, profile_name=None, endpoin aws_secret_access_key = self.get_option('secret_access_key') aws_session_token = self.get_option('session_token') - if aws_access_key_id is None: - aws_access_key_id = os.environ.get("AWS_ACCESS_KEY_ID", None) - if aws_secret_access_key is None: - aws_secret_access_key = os.environ.get("AWS_SECRET_ACCESS_KEY", None) - if aws_session_token is None: - aws_session_token = os.environ.get("AWS_SESSION_TOKEN", None) - if not profile_name: - profile_name = os.environ.get("AWS_PROFILE", None) - session_args = dict( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key,
Cannot connect to ec2 instance via aws_ssm if AWS_SESSION_TOKEN is missing <!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> Cannot connect to ec2 instance via aws_ssm if AWS_SESSION_TOKEN is not set. (I do not configure any AWS variables in OS via `aws configure`) ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> `ansible-collections/community.aws/blob/main/plugins/connection/aws_ssm.py ` ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible 2.9.9 config file = None configured module search path = ['/Users/it-ops/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.7/site-packages/ansible executable location = /usr/local/bin/ansible python version = 3.7.7 (default, Mar 10 2020, 15:43:03) [Clang 11.0.0 (clang-1100.0.33.17)] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> OSX 10.14.6 (18G3020) ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> I created a role to ping ec2 instance. <!--- Paste example playbooks or commands between quotes below --> ```yaml # roles/ssm/tasks/main.yml - name: ping ping: # roles/ssm/defaults/main.yml ansible_connection: aws_ssm ansible_aws_ssm_region: ap-southeast-1 ansible_aws_ssm_access_key_id: XXXXXXXXX ansible_aws_ssm_secret_access_key: XXXXXXXXX ansible_aws_ssm_instance_id: i-XXXXXXXX ansible_aws_ssm_bucket_name: bucket-name ``` <!--- HINT: You can paste gist.github.com links for larger files --> I guess this is related with this loc: ``` def _get_boto_client(self, service, region_name=None): ''' Gets a boto3 client based on the STS token ''' aws_access_key_id = self.get_option('access_key_id') aws_secret_access_key = self.get_option('secret_access_key') aws_session_token = self.get_option('session_token') if aws_access_key_id is None or aws_secret_access_key is None or aws_session_token is None: aws_access_key_id = os.environ.get("AWS_ACCESS_KEY_ID", None) aws_secret_access_key = os.environ.get("AWS_SECRET_ACCESS_KEY", None) aws_session_token = os.environ.get("AWS_SESSION_TOKEN", None) ``` Because I do not need `aws_session_token` (yet) to connect to ec2 instances via aws_ssm to do a ping, and I do not set any OS environment variables, all my `aws_access_key_id` and `aws_secret_access_key` are set to None again because the `aws_session_token` is empty. Suggested fixes: ``` def _get_boto_client(self, service, region_name=None): ''' Gets a boto3 client based on the STS token ''' aws_access_key_id = self.get_option('access_key_id') aws_secret_access_key = self.get_option('secret_access_key') aws_session_token = self.get_option('session_token') if aws_access_key_id is None: aws_access_key_id = os.environ.get("AWS_ACCESS_KEY_ID", None) if aws_secret_access_key is None: aws_secret_access_key = os.environ.get("AWS_SECRET_ACCESS_KEY", None) if aws_session_token is None: aws_session_token = os.environ.get("AWS_SESSION_TOKEN", None) ``` ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> ``` TASK [ssm : ping] ***************************************************************************************************************************************************************************** ok: [ec2] PLAY RECAP ************************************************************************************************************************************************************************************ ec2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 ``` ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below PLAY [nodes] ********************************************************************************************************************************************************************************** TASK [ssm : ping] ***************************************************************************************************************************************************************************** An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.NoCredentialsError: Unable to locate credentials fatal: [ec2]: FAILED! => {"msg": "Unexpected failure during module execution.", "stdout": ""} PLAY RECAP ************************************************************************************************************************************************************************************ ec2 : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 ```
Files identified in the description: * [`plugins/modules/aws_ssm_parameter_store.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws']/blob/main/plugins/modules/aws_ssm_parameter_store.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @mikedlr @nathanwebsterdotme @ozbillwang @s-hertel @tremble @wimnat [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> @ru-rocker thank you for filing the issue and the suggested fix. Would you like to open a PR for this? cc @116davinder [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> Perhaps this issue could be addressed differently. The ansible options parser already supports the ability to pick-up environment variables. Why not use that instead? Right now these fallbacks including region and remote_addr behavior could be addressed by updating configs. Say something like: ``` session_token: description: The STS session token to use when connecting via session-manager. vars: - name: ansible_aws_ssm_session_token env: - name: AWS_SESSION_TOKEN version_added: 1.5.0 version_added: 1.3.0 ``` I just had to address this in our environment because as designed, the code will override the configured vars with `None`'s because there are no such environment variables set. I'll look at attaching a pull request to address this since I just had to work-around it manually. Sorry for the late reply. The reason I opened this issue is I need dynamic access_key_id and access_secret_key_id due to security requirements. I need to store those keys inside some password manager and it is prohibited to store them as environment variables.
2023-01-31T12:49:38
ansible-collections/community.aws
1,688
ansible-collections__community.aws-1688
[ "964" ]
5c3934323f4681d1b702c47333f2e89907b238aa
diff --git a/plugins/module_utils/sns.py b/plugins/module_utils/sns.py --- a/plugins/module_utils/sns.py +++ b/plugins/module_utils/sns.py @@ -12,6 +12,10 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags @AWSRetry.jittered_backoff() @@ -87,6 +91,16 @@ def canonicalize_endpoint(protocol, endpoint): return endpoint +def get_tags(client, module, topic_arn): + try: + return boto3_tag_list_to_ansible_dict(client.list_tags_for_resource(ResourceArn=topic_arn)['Tags']) + except is_boto3_error_code('AuthorizationError'): + module.warn("Permission denied accessing tags") + return {} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't obtain topic tags") + + def get_info(connection, module, topic_arn): name = module.params.get('name') topic_type = module.params.get('topic_type') @@ -121,5 +135,34 @@ def get_info(connection, module, topic_arn): info.update(camel_dict_to_snake_dict(connection.get_topic_attributes(TopicArn=topic_arn)['Attributes'])) info['delivery_policy'] = info.pop('effective_delivery_policy') info['subscriptions'] = [camel_dict_to_snake_dict(sub) for sub in list_topic_subscriptions(connection, module, topic_arn)] - + info["tags"] = get_tags(connection, module, topic_arn) return info + + +def update_tags(client, module, topic_arn): + + if module.params.get('tags') is None: + return False + + existing_tags = get_tags(client, module, topic_arn) + to_update, to_delete = compare_aws_tags(existing_tags, module.params['tags'], module.params['purge_tags']) + + if not bool(to_delete or to_update): + return False + + if module.check_mode: + return True + + if to_update: + try: + client.tag_resource(ResourceArn=topic_arn, + Tags=ansible_dict_to_boto3_tag_list(to_update)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't add tags to topic") + if to_delete: + try: + client.untag_resource(ResourceArn=topic_arn, TagKeys=to_delete) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't remove tags from topic") + + return True diff --git a/plugins/modules/sns_topic.py b/plugins/modules/sns_topic.py --- a/plugins/modules/sns_topic.py +++ b/plugins/modules/sns_topic.py @@ -12,8 +12,7 @@ short_description: Manages AWS SNS topics and subscriptions version_added: 1.0.0 description: - - The M(community.aws.sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics. - - As of 2.6, this module can be use to subscribe and unsubscribe to topics outside of your AWS account. + - The M(community.aws.sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics. author: - "Joel Thompson (@joelthompson)" - "Fernando Jose Pando (@nand0p)" @@ -149,10 +148,13 @@ Blame Amazon." default: true type: bool +notes: + - Support for I(tags) and I(purge_tags) was added in release 5.3.0. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 ''' EXAMPLES = r""" @@ -328,12 +330,14 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.community.aws.plugins.module_utils.sns import list_topics from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup from ansible_collections.community.aws.plugins.module_utils.sns import compare_delivery_policies from ansible_collections.community.aws.plugins.module_utils.sns import list_topic_subscriptions from ansible_collections.community.aws.plugins.module_utils.sns import canonicalize_endpoint from ansible_collections.community.aws.plugins.module_utils.sns import get_info +from ansible_collections.community.aws.plugins.module_utils.sns import update_tags class SnsTopicManager(object): @@ -349,6 +353,8 @@ def __init__(self, delivery_policy, subscriptions, purge_subscriptions, + tags, + purge_tags, check_mode): self.connection = module.client('sns') @@ -371,6 +377,8 @@ def __init__(self, self.topic_deleted = False self.topic_arn = None self.attributes_set = [] + self.tags = tags + self.purge_tags = purge_tags def _create_topic(self): attributes = {} @@ -383,6 +391,9 @@ def _create_topic(self): if not self.name.endswith('.fifo'): self.name = self.name + '.fifo' + if self.tags: + tags = ansible_dict_to_boto3_tag_list(self.tags) + if not self.check_mode: try: response = self.connection.create_topic(Name=self.name, @@ -542,12 +553,13 @@ def ensure_ok(self): elif self.display_name or self.policy or self.delivery_policy: self.module.fail_json(msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account") changed |= self._set_topic_subs() - self._init_desired_subscription_attributes() if self.topic_arn in list_topics(self.connection, self.module): changed |= self._set_topic_subs_attributes() elif any(self.desired_subscription_attributes.values()): self.module.fail_json(msg="Cannot set subscription attributes for SNS topics not owned by this account") + # Check tagging + changed |= update_tags(self.connection, self.module, self.topic_arn) return changed @@ -600,6 +612,8 @@ def main(): delivery_policy=dict(type='dict', options=delivery_args), subscriptions=dict(default=[], type='list', elements='dict'), purge_subscriptions=dict(type='bool', default=True), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), ) module = AnsibleAWSModule(argument_spec=argument_spec, @@ -614,6 +628,8 @@ def main(): subscriptions = module.params.get('subscriptions') purge_subscriptions = module.params.get('purge_subscriptions') check_mode = module.check_mode + tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') sns_topic = SnsTopicManager(module, name, @@ -624,6 +640,8 @@ def main(): delivery_policy, subscriptions, purge_subscriptions, + tags, + purge_tags, check_mode) if state == 'present':
diff --git a/tests/integration/targets/sns_topic/tasks/main.yml b/tests/integration/targets/sns_topic/tasks/main.yml --- a/tests/integration/targets/sns_topic/tasks/main.yml +++ b/tests/integration/targets/sns_topic/tasks/main.yml @@ -14,13 +14,9 @@ create_instance_profile: false managed_policies: - 'arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess' + wait: True register: iam_role - - name: pause if role was created - pause: - seconds: 10 - when: iam_role is changed - - name: list all the topics (check_mode) sns_topic_info: check_mode: true @@ -428,6 +424,170 @@ - third_party_deletion is failed - third_party_topic.sns_topic.subscriptions|length == third_party_deletion_facts.sns_topic.subscriptions|length + # Test tags + - name: create standard SNS topic + sns_topic: + name: '{{ sns_topic_topic_name }}' + display_name: My topic name + register: sns_topic_create + + - name: assert that creation worked + assert: + that: + - sns_topic_create.changed + + - name: set sns_arn fact + set_fact: + sns_arn: '{{ sns_topic_create.sns_arn }}' + + - name: Add tags to topic - CHECK_MODE + sns_topic: + name: '{{ sns_topic_topic_name }}' + tags: + tag_one: '{{ tiny_prefix }} One' + "Tag Two": 'two {{ tiny_prefix }}' + check_mode: true + register: sns_topic_tags + + - assert: + that: + - sns_topic_tags is changed + + - name: Add tags to topic + sns_topic: + name: '{{ sns_topic_topic_name }}' + tags: + tag_one: '{{ tiny_prefix }} One' + "Tag Two": 'two {{ tiny_prefix }}' + register: sns_topic_tags + + - assert: + that: + - sns_topic_tags is changed + + - name: Add tags to topic to verify idempotency - CHECK_MODE + sns_topic: + name: '{{ sns_topic_topic_name }}' + tags: + tag_one: '{{ tiny_prefix }} One' + "Tag Two": 'two {{ tiny_prefix }}' + check_mode: true + register: sns_topic_tags + + - assert: + that: + - sns_topic_tags is not changed + + - name: Add tags to topic to verify idempotency + sns_topic: + name: '{{ sns_topic_topic_name }}' + tags: + tag_one: '{{ tiny_prefix }} One' + "Tag Two": 'two {{ tiny_prefix }}' + register: sns_topic_tags + + - assert: + that: + - sns_topic_tags is not changed + + - name: Update (add/remove) tags - CHECK_MODE + sns_topic: + name: '{{ sns_topic_topic_name }}' + tags: + tag_three: '{{ tiny_prefix }} Three' + "Tag Two": 'two {{ tiny_prefix }}' + check_mode: true + register: sns_topic_tags + + - assert: + that: + - sns_topic_tags is changed + + - name: Update tags to verify idempotency + sns_topic: + name: '{{ sns_topic_topic_name }}' + tags: + tag_three: '{{ tiny_prefix }} Three' + "Tag Two": 'two {{ tiny_prefix }}' + register: sns_topic_tags + + - assert: + that: + - sns_topic_tags is changed + + - name: Update tags without purge - CHECK_MODE + sns_topic: + name: '{{ sns_topic_topic_name }}' + purge_tags: no + tags: + tag_one: '{{ tiny_prefix }} One' + check_mode: true + register: sns_topic_tags + + - assert: + that: + - sns_topic_tags is changed + + - name: Update tags without purge + sns_topic: + name: '{{ sns_topic_topic_name }}' + purge_tags: no + tags: + tag_one: '{{ tiny_prefix }} One' + register: sns_topic_tags + + - assert: + that: + - sns_topic_tags is changed + + - name: Remove all the tags - CHECK_MODE + sns_topic: + name: '{{ sns_topic_topic_name }}' + purge_tags: yes + tags: {} + check_mode: true + register: sns_topic_tags + + - assert: + that: + - sns_topic_tags is changed + + - name: Remove all the tags + sns_topic: + name: '{{ sns_topic_topic_name }}' + purge_tags: yes + tags: {} + register: sns_topic_tags + + - assert: + that: + - sns_topic_tags is changed + + - name: Update with CamelCase tags + sns_topic: + name: '{{ sns_topic_topic_name }}' + purge_tags: no + tags: + "lowercase spaced": 'hello cruel world' + "Title Case": 'Hello Cruel World' + CamelCase: 'SimpleCamelCase' + snake_case: 'simple_snake_case' + register: sns_topic_tags + + - assert: + that: + - sns_topic_tags is changed + + - name: Do not specify any tag to ensure previous tags are not removed + sns_topic: + name: '{{ sns_topic_topic_name }}' + purge_tags: no + register: sns_topic_tags + + - assert: + that: + - sns_topic_tags is not changed + always: - name: announce teardown start
sns_topic: tagging support ### Summary sns_topic doesn't not currently support resources tagging. It should have a `tags` parameter for AWS tags ### Issue Type Feature Idea ### Component Name sns_topic ### Additional Information This issue was previously raised in the main ansible repo https://github.com/ansible/ansible/issues/65613 before the community migration but doesn't seems like it was raised here. There was also a PR for the issue https://github.com/ansible/ansible/pull/68287. Similar to the previous proposal, I would like the ability to add tags dict as a parameter similar to `s3_bucket` and `ec2` ```yaml - sns_topic: name: "alarms" state: present tags: Environment: Production ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/sns_topic.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/sns_topic.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @joelthompson @markuman @nand0p @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> @dtnyn Thank you for bringing this. Would you be willing to open a PR with that feature here? For the required policies you should open a PR in this repo https://github.com/mattclay/aws-terminator. Hi @alinabuzachis unfortunately I'm not looking to becoming a contributor, currently I have a work around by just using the awscli tagging as part of the playbook. I have only brought it up since it seems like it could be a good feature to have.
2023-02-01T15:50:36
ansible-collections/community.aws
1,690
ansible-collections__community.aws-1690
[ "1686", "1686" ]
a67cb9edf20549981f1ced0cc02b2a9ef12c699b
diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -817,33 +817,66 @@ def _generate_commands(self, bucket_name, s3_path, in_path, out_path): if self.is_windows: put_command_headers = "; ".join([f"'{h}' = '{v}'" for h, v in put_headers.items()]) - put_command = ( - "Invoke-WebRequest -Method PUT " - f"-Headers @{{{put_command_headers}}} " # @{'key' = 'value'; 'key2' = 'value2'} - f"-InFile '{in_path}' " - f"-Uri '{put_url}' " - f"-UseBasicParsing" - ) - get_command = ( - "Invoke-WebRequest " - f"'{get_url}' " - f"-OutFile '{out_path}'" - ) + put_commands = [ + ( + "Invoke-WebRequest -Method PUT " + f"-Headers @{{{put_command_headers}}} " # @{'key' = 'value'; 'key2' = 'value2'} + f"-InFile '{in_path}' " + f"-Uri '{put_url}' " + f"-UseBasicParsing" + ), + ] + get_commands = [ + ( + "Invoke-WebRequest " + f"'{get_url}' " + f"-OutFile '{out_path}'" + ), + ] else: put_command_headers = " ".join([f"-H '{h}: {v}'" for h, v in put_headers.items()]) - put_command = ( - "curl --request PUT " - f"{put_command_headers} " - f"--upload-file '{in_path}' " - f"'{put_url}'" - ) - get_command = ( - "curl " - f"-o '{out_path}' " - f"'{get_url}'" - ) + put_commands = [ + ( + "curl --request PUT " + f"{put_command_headers} " + f"--upload-file '{in_path}' " + f"'{put_url}'" + ), + ] + get_commands = [ + ( + "curl " + f"-o '{out_path}' " + f"'{get_url}'" + ), + # Due to https://github.com/curl/curl/issues/183 earlier + # versions of curl did not create the output file, when the + # response was empty. Although this issue was fixed in 2015, + # some actively maintained operating systems still use older + # versions of it (e.g. CentOS 7) + ( + "touch " + f"'{out_path}'" + ) + ] + + return get_commands, put_commands, put_args + + def _exec_transport_commands(self, in_path, out_path, commands): + stdout_combined, stderr_combined = '', '' + for command in commands: + (returncode, stdout, stderr) = self.exec_command(command, in_data=None, sudoable=False) - return get_command, put_command, put_args + # Check the return code + if returncode != 0: + raise AnsibleError( + f"failed to transfer file to {in_path} {out_path}:\n" + f"{stdout}\n{stderr}") + + stdout_combined += stdout + stderr_combined += stderr + + return (returncode, stdout_combined, stderr_combined) @_ssm_retry def _file_transport_command(self, in_path, out_path, ssm_action): @@ -852,30 +885,25 @@ def _file_transport_command(self, in_path, out_path, ssm_action): bucket_name = self.get_option("bucket_name") s3_path = self._escape_path(f"{self.instance_id}/{out_path}") - get_command, put_command, put_args = self._generate_commands( + get_commands, put_commands, put_args = self._generate_commands( bucket_name, s3_path, in_path, out_path, ) client = self._s3_client - if ssm_action == 'get': - (returncode, stdout, stderr) = self.exec_command(put_command, in_data=None, sudoable=False) - with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') as data: - client.download_fileobj(bucket_name, s3_path, data) - else: - with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as data: - client.upload_fileobj(data, bucket_name, s3_path, ExtraArgs=put_args) - (returncode, stdout, stderr) = self.exec_command(get_command, in_data=None, sudoable=False) - # Remove the files from the bucket after they've been transferred - client.delete_object(Bucket=bucket_name, Key=s3_path) - - # Check the return code - if returncode == 0: + try: + if ssm_action == 'get': + (returncode, stdout, stderr) = self._exec_transport_commands(in_path, out_path, put_commands) + with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') as data: + client.download_fileobj(bucket_name, s3_path, data) + else: + with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as data: + client.upload_fileobj(data, bucket_name, s3_path, ExtraArgs=put_args) + (returncode, stdout, stderr) = self._exec_transport_commands(in_path, out_path, get_commands) return (returncode, stdout, stderr) - - raise AnsibleError( - f"failed to transfer file to {in_path} {out_path}:\n" - f"{stdout}\n{stderr}") + finally: + # Remove the files from the bucket after they've been transferred + client.delete_object(Bucket=bucket_name, Key=s3_path) def put_file(self, in_path, out_path): ''' transfer a file from local to remote '''
diff --git a/tests/integration/targets/connection/test_connection.yml b/tests/integration/targets/connection/test_connection.yml --- a/tests/integration/targets/connection/test_connection.yml +++ b/tests/integration/targets/connection/test_connection.yml @@ -6,6 +6,7 @@ local_file: '{{ local_dir }}/汉语.txt' remote_dir: '{{ remote_tmp }}-汉语' remote_file: '{{ remote_dir }}/汉语.txt' + remote_empty_file: '{{ remote_dir }}/empty.txt' tasks: ### test wait_for_connection plugin @@ -77,3 +78,15 @@ - root loop_control: loop_var: user_name + + ### copy an empty file + - name: copy an empty file + action: "{{ action_prefix }}copy content= dest={{ remote_empty_file }}" + - name: stat empty file + action: "{{ action_prefix }}stat path={{ remote_empty_file }}" + register: stat_empty_file_cmd + - name: check that empty file exists + assert: + that: + - stat_empty_file_cmd.stat.isreg # it is a regular file + - stat_empty_file_cmd.stat.size == 0 \ No newline at end of file
copy module crashes with empty content when using the SSM plugin ### Summary When I execute the builtin copy module with the `content` parameter set to empty string, I get the following error (taken from the `-vvvv` logs): ``` <i-XXXXXXXXd68> EXEC stdout line: chmod: cannot access ‘/tmp/.ansible-/ansible-tmp-1675251475.8058956-7383-181169058616495/source’: No such file or directory ``` ### Issue Type Bug Report ### Component Name ssm_plugin ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.14.1] config file = /XXXX/ansible.cfg configured module search path = ['/XXXX/library'] ansible python module location = /home/bence/.local/lib/python3.10/site-packages/ansible ansible collection location = /home/bence/.ansible/collections:/usr/share/ansible/collections executable location = /home/bence/.local/bin/ansible python version = 3.10.6 (main, Nov 14 2022, 16:10:14) [GCC 11.3.0] (/usr/bin/python3) jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /XXX/lib/python3.10/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 5.1.0 ansible.netcommon 4.1.0 ansible.posix 1.4.0 ansible.utils 2.8.0 ansible.windows 1.12.0 arista.eos 6.0.0 awx.awx 21.10.0 azure.azcollection 1.14.0 check_point.mgmt 4.0.0 chocolatey.chocolatey 1.3.1 cisco.aci 2.3.0 cisco.asa 4.0.0 cisco.dnac 6.6.1 cisco.intersight 1.0.22 cisco.ios 4.0.0 cisco.iosxr 4.0.3 cisco.ise 2.5.9 cisco.meraki 2.13.0 cisco.mso 2.1.0 cisco.nso 1.0.3 cisco.nxos 4.0.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.3 community.aws 5.0.0 community.azure 2.0.0 community.ciscosmb 1.0.5 community.crypto 2.9.0 community.digitalocean 1.22.0 community.dns 2.4.2 community.docker 3.3.1 community.fortios 1.0.0 community.general 6.1.0 community.google 1.0.0 community.grafana 1.5.3 community.hashi_vault 4.0.0 community.hrobot 1.6.0 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 5.0.0 community.okd 2.2.0 community.postgresql 2.3.1 community.proxysql 1.4.0 community.rabbitmq 1.2.3 community.routeros 2.5.0 community.sap 1.0.0 community.sap_libs 1.4.0 community.skydive 1.0.0 community.sops 1.5.0 community.vmware 3.2.0 community.windows 1.11.1 community.zabbix 1.9.0 containers.podman 1.10.1 cyberark.conjur 1.2.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 2.0.0 dellemc.openmanage 6.3.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.21.0 fortinet.fortimanager 2.1.7 fortinet.fortios 2.2.1 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.0.2 grafana.grafana 1.1.0 hetzner.hcloud 1.9.0 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.10.0 infinidat.infinibox 1.3.12 infoblox.nios_modules 1.4.1 inspur.ispim 1.2.0 inspur.sm 2.3.0 junipernetworks.junos 4.1.0 kubernetes.core 2.3.2 lowlydba.sqlserver 1.2.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.21.0 netapp.elementsw 21.7.0 netapp.ontap 22.0.1 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.1 netbox.netbox 3.9.0 ngine_io.cloudstack 2.3.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.4.1 purestorage.flasharray 1.15.0 purestorage.flashblade 1.10.0 purestorage.fusion 1.2.0 sensu.sensu_go 1.13.1 splunk.es 2.1.0 t_systems_mms.icinga_director 1.31.4 theforeman.foreman 3.7.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.3.1 vyos.vyos 4.0.0 wti.remote 1.0.4 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore pip show boto boto3 botocore WARNING: Package(s) not found: boto Name: boto3 Version: 1.26.52 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /home/bence/gls/devops/ansible-parcelos/tests/.env/lib/python3.10/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.29.52 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /home/bence/gls/devops/ansible-parcelos/tests/.env/lib/python3.10/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ANY_ERRORS_FATAL(/XXXXXXXXXXXXXX/ansible.cfg) = True CALLBACKS_ENABLED(/XXXXXXXXXXXXXX/ansible.cfg) = ['profile_tasks'] CONFIG_FILE() = /XXXXXXXXXXXXXX/ansible.cfg DEFAULT_FORKS(/XXXXXXXXXXXXXX/ansible.cfg) = 25 DEFAULT_MODULE_PATH(/XXXXXXXXXXXXXX/ansible.cfg) = ['/XXXXXXXXXXXXXX/library'] DEFAULT_ROLES_PATH(/XXXXXXXXXXXXXX/ansible.cfg) = ['/XXXXXXXXXXXXXX/roles'] DEFAULT_TIMEOUT(/XXXXXXXXXXXXXX/ansible.cfg) = 20 DISPLAY_SKIPPED_HOSTS(/XXXXXXXXXXXXXX/ansible.cfg) = False HOST_KEY_CHECKING(/XXXXXXXXXXXXXX/ansible.cfg) = False MAX_FILE_SIZE_FOR_DIFF(/XXXXXXXXXXXXXX/ansible.cfg) = 1044480 ``` ### OS / Environment Windows 10 with WSL (Ubuntu 20) ### Steps to Reproduce Use the built-in `copy` module with the `content` set to empty string. ```console ansible -i inventories/aws_ec2.yml -m copy -a "dest=/tmp/test content=" target_host -vvv ``` ### Expected Results The temporary `source` file should be an empty file (e.g. `/tmp/.ansible-/ansible-tmp-1675251475.8058956-7383-181169058616495/source`) ### Actual Results The temporary `source` file is not present, so the Ansible step crashes. Output (sensitive parts are masked): ```console ansible -i inventories/aws_ec2.yml -m copy -a "dest=/tmp/test content=" target_host -vvv ... redirecting (type: inventory) ansible.builtin.aws_ec2 to amazon.aws.aws_ec2 Using inventory plugin 'ansible_collections.amazon.aws.plugins.inventory.aws_ec2' to process inventory source '/XXXXXXXXXXXXXXXX/inventories/aws_ec2.yml' Parsed /XXXXXXXXXXXXXXXX/inventories/aws_ec2.yml inventory source with auto plugin redirecting (type: callback) ansible.builtin.profile_tasks to ansible.posix.profile_tasks Skipping callback 'default', as we already have a stdout callback. Skipping callback 'minimal', as we already have a stdout callback. Skipping callback 'oneline', as we already have a stdout callback. redirecting (type: connection) ansible.builtin.aws_ssm to community.aws.aws_ssm <i-XXXXXXXXXXXXXXX68> ESTABLISH SSM CONNECTION TO: i-XXXXXXXXXXXXXXX68 <i-XXXXXXXXXXXXXXX68> SSM CONNECTION ID: XXXXXXXXXXXXXXXXXXX <i-XXXXXXXXXXXXXXX68> EXEC ( umask 77 && mkdir -p "` echo /tmp/.ansible-${USER}/ `"&& mkdir "` echo /tmp/.ansible-${USER}/ansible-tmp-1675252624.7794013-9014-861916818482 `" && echo ansible-tmp-1675252624.7794013-9014-861916818482="` echo /tmp/.ansible-${USER}/ansible-tmp-1675252624.7794013-9014-861916818482 `" ) <i-XXXXXXXXXXXXXXX68> (0, 'ansible-tmp-1675252624.7794013-9014-861916818482=/tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482\r\r', '') <i-XXXXXXXXXXXXXXX68> Attempting python interpreter discovery <i-XXXXXXXXXXXXXXX68> EXEC echo PLATFORM; uname; echo FOUND; command -v 'python3.11'; command -v 'python3.10'; command -v 'python3.9'; command -v 'python3.8'; command -v 'python3.7'; command -v 'python3.6'; command -v 'python3.5'; command -v '/usr/bin/python3'; command -v '/usr/libexec/platform-python'; command -v 'python2.7'; command -v '/usr/bin/python'; command -v 'python'; echo ENDFOUND <i-XXXXXXXXXXXXXXX68> (0, 'PLATFORM\r\r\nLinux\r\r\nFOUND\r\r\n/usr/bin/python3.6\r\r\n/usr/bin/python3\r\r\n/usr/libexec/platform-python\r\r\n/usr/bin/python2.7\r\r\n/usr/bin/python\r\r\n/usr/bin/python\r\r\nENDFOUND\r\r', '') <i-XXXXXXXXXXXXXXX68> Python interpreter discovery fallback (pipelining support required for extended interpreter discovery) Using module file /home/bence/.local/lib/python3.10/site-packages/ansible/modules/stat.py <i-XXXXXXXXXXXXXXX68> PUT /home/bence/.ansible/tmp/ansible-local-8998ncfzil01/tmpg1ordya2 TO /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/AnsiballZ_stat.py <i-XXXXXXXXXXXXXXX68> EXEC curl 'https://XXXXXXXXXXXXXXXXXXXXXXX.s3.amazonaws.com/i-XXXXXXXXXXXXXXX68//tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/AnsiballZ_stat.py' -o '/tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/AnsiballZ_stat.py' <i-XXXXXXXXXXXXXXX68> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 129k 100 129k 0 0 542k 0 --:--:-- --:--:-- --:--:-- 545k\r\r', '') <i-XXXXXXXXXXXXXXX68> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 129k 100 129k 0 0 542k 0 --:--:-- --:--:-- --:--:-- 545k\r\r', '') <i-XXXXXXXXXXXXXXX68> EXEC chmod u+x /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/ /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/AnsiballZ_stat.py <i-XXXXXXXXXXXXXXX68> (0, '\r', '') <i-XXXXXXXXXXXXXXX68> EXEC /usr/bin/python3.6 /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/AnsiballZ_stat.py <i-XXXXXXXXXXXXXXX68> (0, '\r\r\n{"changed": false, "stat": {"exists": false}, "invocation": {"module_args": {"path": "/tmp/test", "follow": false, "get_checksum": true, "checksum_algorithm": "sha1", "get_md5": false, "get_mime": true, "get_attributes": true}}}\r\r', '') <i-XXXXXXXXXXXXXXX68> PUT /home/bence/.ansible/tmp/ansible-local-8998ncfzil01/tmp3b0hfmbo TO /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/source <i-XXXXXXXXXXXXXXX68> EXEC curl 'https://XXXXXXXXXXXXXXXXXXXXXXX.s3.amazonaws.com/i-XXXXXXXXXXXXXXX68//tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/source?XXXXXXXXXXX' -o '/tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/source' <i-XXXXXXXXXXXXXXX68> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r\r', '') <i-XXXXXXXXXXXXXXX68> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r\r', '') <i-XXXXXXXXXXXXXXX68> EXEC chmod u+x /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/ /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/source <i-XXXXXXXXXXXXXXX68> (1, 'chmod: cannot access ‘/tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/source’: No such file or directory\r\r', '') <i-XXXXXXXXXXXXXXX68> EXEC rm -f -r /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/ > /dev/null 2>&1 <i-XXXXXXXXXXXXXXX68> (0, '\r', '') <i-XXXXXXXXXXXXXXX68> CLOSING SSM CONNECTION TO: i-XXXXXXXXXXXXXXX68 i-XXXXXXXXXXXXXXX68 | FAILED! => { "msg": "Failed to set execute bit on remote files (rc: 1, err: )" } ``` **Note:** if you repeat the steps with a host with SSH connection, the step just completes without error. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct copy module crashes with empty content when using the SSM plugin ### Summary When I execute the builtin copy module with the `content` parameter set to empty string, I get the following error (taken from the `-vvvv` logs): ``` <i-XXXXXXXXd68> EXEC stdout line: chmod: cannot access ‘/tmp/.ansible-/ansible-tmp-1675251475.8058956-7383-181169058616495/source’: No such file or directory ``` ### Issue Type Bug Report ### Component Name ssm_plugin ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.14.1] config file = /XXXX/ansible.cfg configured module search path = ['/XXXX/library'] ansible python module location = /home/bence/.local/lib/python3.10/site-packages/ansible ansible collection location = /home/bence/.ansible/collections:/usr/share/ansible/collections executable location = /home/bence/.local/bin/ansible python version = 3.10.6 (main, Nov 14 2022, 16:10:14) [GCC 11.3.0] (/usr/bin/python3) jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /XXX/lib/python3.10/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 5.1.0 ansible.netcommon 4.1.0 ansible.posix 1.4.0 ansible.utils 2.8.0 ansible.windows 1.12.0 arista.eos 6.0.0 awx.awx 21.10.0 azure.azcollection 1.14.0 check_point.mgmt 4.0.0 chocolatey.chocolatey 1.3.1 cisco.aci 2.3.0 cisco.asa 4.0.0 cisco.dnac 6.6.1 cisco.intersight 1.0.22 cisco.ios 4.0.0 cisco.iosxr 4.0.3 cisco.ise 2.5.9 cisco.meraki 2.13.0 cisco.mso 2.1.0 cisco.nso 1.0.3 cisco.nxos 4.0.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.3 community.aws 5.0.0 community.azure 2.0.0 community.ciscosmb 1.0.5 community.crypto 2.9.0 community.digitalocean 1.22.0 community.dns 2.4.2 community.docker 3.3.1 community.fortios 1.0.0 community.general 6.1.0 community.google 1.0.0 community.grafana 1.5.3 community.hashi_vault 4.0.0 community.hrobot 1.6.0 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 5.0.0 community.okd 2.2.0 community.postgresql 2.3.1 community.proxysql 1.4.0 community.rabbitmq 1.2.3 community.routeros 2.5.0 community.sap 1.0.0 community.sap_libs 1.4.0 community.skydive 1.0.0 community.sops 1.5.0 community.vmware 3.2.0 community.windows 1.11.1 community.zabbix 1.9.0 containers.podman 1.10.1 cyberark.conjur 1.2.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 2.0.0 dellemc.openmanage 6.3.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.21.0 fortinet.fortimanager 2.1.7 fortinet.fortios 2.2.1 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.0.2 grafana.grafana 1.1.0 hetzner.hcloud 1.9.0 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.10.0 infinidat.infinibox 1.3.12 infoblox.nios_modules 1.4.1 inspur.ispim 1.2.0 inspur.sm 2.3.0 junipernetworks.junos 4.1.0 kubernetes.core 2.3.2 lowlydba.sqlserver 1.2.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.21.0 netapp.elementsw 21.7.0 netapp.ontap 22.0.1 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.1 netbox.netbox 3.9.0 ngine_io.cloudstack 2.3.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.4.1 purestorage.flasharray 1.15.0 purestorage.flashblade 1.10.0 purestorage.fusion 1.2.0 sensu.sensu_go 1.13.1 splunk.es 2.1.0 t_systems_mms.icinga_director 1.31.4 theforeman.foreman 3.7.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.3.1 vyos.vyos 4.0.0 wti.remote 1.0.4 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore pip show boto boto3 botocore WARNING: Package(s) not found: boto Name: boto3 Version: 1.26.52 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /home/bence/gls/devops/ansible-parcelos/tests/.env/lib/python3.10/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.29.52 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /home/bence/gls/devops/ansible-parcelos/tests/.env/lib/python3.10/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ANY_ERRORS_FATAL(/XXXXXXXXXXXXXX/ansible.cfg) = True CALLBACKS_ENABLED(/XXXXXXXXXXXXXX/ansible.cfg) = ['profile_tasks'] CONFIG_FILE() = /XXXXXXXXXXXXXX/ansible.cfg DEFAULT_FORKS(/XXXXXXXXXXXXXX/ansible.cfg) = 25 DEFAULT_MODULE_PATH(/XXXXXXXXXXXXXX/ansible.cfg) = ['/XXXXXXXXXXXXXX/library'] DEFAULT_ROLES_PATH(/XXXXXXXXXXXXXX/ansible.cfg) = ['/XXXXXXXXXXXXXX/roles'] DEFAULT_TIMEOUT(/XXXXXXXXXXXXXX/ansible.cfg) = 20 DISPLAY_SKIPPED_HOSTS(/XXXXXXXXXXXXXX/ansible.cfg) = False HOST_KEY_CHECKING(/XXXXXXXXXXXXXX/ansible.cfg) = False MAX_FILE_SIZE_FOR_DIFF(/XXXXXXXXXXXXXX/ansible.cfg) = 1044480 ``` ### OS / Environment Windows 10 with WSL (Ubuntu 20) ### Steps to Reproduce Use the built-in `copy` module with the `content` set to empty string. ```console ansible -i inventories/aws_ec2.yml -m copy -a "dest=/tmp/test content=" target_host -vvv ``` ### Expected Results The temporary `source` file should be an empty file (e.g. `/tmp/.ansible-/ansible-tmp-1675251475.8058956-7383-181169058616495/source`) ### Actual Results The temporary `source` file is not present, so the Ansible step crashes. Output (sensitive parts are masked): ```console ansible -i inventories/aws_ec2.yml -m copy -a "dest=/tmp/test content=" target_host -vvv ... redirecting (type: inventory) ansible.builtin.aws_ec2 to amazon.aws.aws_ec2 Using inventory plugin 'ansible_collections.amazon.aws.plugins.inventory.aws_ec2' to process inventory source '/XXXXXXXXXXXXXXXX/inventories/aws_ec2.yml' Parsed /XXXXXXXXXXXXXXXX/inventories/aws_ec2.yml inventory source with auto plugin redirecting (type: callback) ansible.builtin.profile_tasks to ansible.posix.profile_tasks Skipping callback 'default', as we already have a stdout callback. Skipping callback 'minimal', as we already have a stdout callback. Skipping callback 'oneline', as we already have a stdout callback. redirecting (type: connection) ansible.builtin.aws_ssm to community.aws.aws_ssm <i-XXXXXXXXXXXXXXX68> ESTABLISH SSM CONNECTION TO: i-XXXXXXXXXXXXXXX68 <i-XXXXXXXXXXXXXXX68> SSM CONNECTION ID: XXXXXXXXXXXXXXXXXXX <i-XXXXXXXXXXXXXXX68> EXEC ( umask 77 && mkdir -p "` echo /tmp/.ansible-${USER}/ `"&& mkdir "` echo /tmp/.ansible-${USER}/ansible-tmp-1675252624.7794013-9014-861916818482 `" && echo ansible-tmp-1675252624.7794013-9014-861916818482="` echo /tmp/.ansible-${USER}/ansible-tmp-1675252624.7794013-9014-861916818482 `" ) <i-XXXXXXXXXXXXXXX68> (0, 'ansible-tmp-1675252624.7794013-9014-861916818482=/tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482\r\r', '') <i-XXXXXXXXXXXXXXX68> Attempting python interpreter discovery <i-XXXXXXXXXXXXXXX68> EXEC echo PLATFORM; uname; echo FOUND; command -v 'python3.11'; command -v 'python3.10'; command -v 'python3.9'; command -v 'python3.8'; command -v 'python3.7'; command -v 'python3.6'; command -v 'python3.5'; command -v '/usr/bin/python3'; command -v '/usr/libexec/platform-python'; command -v 'python2.7'; command -v '/usr/bin/python'; command -v 'python'; echo ENDFOUND <i-XXXXXXXXXXXXXXX68> (0, 'PLATFORM\r\r\nLinux\r\r\nFOUND\r\r\n/usr/bin/python3.6\r\r\n/usr/bin/python3\r\r\n/usr/libexec/platform-python\r\r\n/usr/bin/python2.7\r\r\n/usr/bin/python\r\r\n/usr/bin/python\r\r\nENDFOUND\r\r', '') <i-XXXXXXXXXXXXXXX68> Python interpreter discovery fallback (pipelining support required for extended interpreter discovery) Using module file /home/bence/.local/lib/python3.10/site-packages/ansible/modules/stat.py <i-XXXXXXXXXXXXXXX68> PUT /home/bence/.ansible/tmp/ansible-local-8998ncfzil01/tmpg1ordya2 TO /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/AnsiballZ_stat.py <i-XXXXXXXXXXXXXXX68> EXEC curl 'https://XXXXXXXXXXXXXXXXXXXXXXX.s3.amazonaws.com/i-XXXXXXXXXXXXXXX68//tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/AnsiballZ_stat.py' -o '/tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/AnsiballZ_stat.py' <i-XXXXXXXXXXXXXXX68> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 129k 100 129k 0 0 542k 0 --:--:-- --:--:-- --:--:-- 545k\r\r', '') <i-XXXXXXXXXXXXXXX68> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 129k 100 129k 0 0 542k 0 --:--:-- --:--:-- --:--:-- 545k\r\r', '') <i-XXXXXXXXXXXXXXX68> EXEC chmod u+x /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/ /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/AnsiballZ_stat.py <i-XXXXXXXXXXXXXXX68> (0, '\r', '') <i-XXXXXXXXXXXXXXX68> EXEC /usr/bin/python3.6 /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/AnsiballZ_stat.py <i-XXXXXXXXXXXXXXX68> (0, '\r\r\n{"changed": false, "stat": {"exists": false}, "invocation": {"module_args": {"path": "/tmp/test", "follow": false, "get_checksum": true, "checksum_algorithm": "sha1", "get_md5": false, "get_mime": true, "get_attributes": true}}}\r\r', '') <i-XXXXXXXXXXXXXXX68> PUT /home/bence/.ansible/tmp/ansible-local-8998ncfzil01/tmp3b0hfmbo TO /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/source <i-XXXXXXXXXXXXXXX68> EXEC curl 'https://XXXXXXXXXXXXXXXXXXXXXXX.s3.amazonaws.com/i-XXXXXXXXXXXXXXX68//tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/source?XXXXXXXXXXX' -o '/tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/source' <i-XXXXXXXXXXXXXXX68> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r\r', '') <i-XXXXXXXXXXXXXXX68> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r\r', '') <i-XXXXXXXXXXXXXXX68> EXEC chmod u+x /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/ /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/source <i-XXXXXXXXXXXXXXX68> (1, 'chmod: cannot access ‘/tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/source’: No such file or directory\r\r', '') <i-XXXXXXXXXXXXXXX68> EXEC rm -f -r /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/ > /dev/null 2>&1 <i-XXXXXXXXXXXXXXX68> (0, '\r', '') <i-XXXXXXXXXXXXXXX68> CLOSING SSM CONNECTION TO: i-XXXXXXXXXXXXXXX68 i-XXXXXXXXXXXXXXX68 | FAILED! => { "msg": "Failed to set execute bit on remote files (rc: 1, err: )" } ``` **Note:** if you repeat the steps with a host with SSH connection, the step just completes without error. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
@bencehornak-gls Thanks for taking the time to open this issue. You're currently using release 5.0.0, we released 5.2.0 last week with a number of bugfixes, please could you try again with 5.2.0 Unfortunately the issue persists after upgrading to 5.2.0 <details> <summary>galaxy modules</summary> ansible-galaxy collection list: ```console # /home/bence/.ansible/collections/ansible_collections Collection Version ------------- ------- amazon.aws 5.2.0 community.aws 5.2.0 # /home/bence/.local/lib/python3.10/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 5.1.0 ansible.netcommon 4.1.0 ansible.posix 1.4.0 ansible.utils 2.8.0 ansible.windows 1.12.0 arista.eos 6.0.0 awx.awx 21.10.0 azure.azcollection 1.14.0 check_point.mgmt 4.0.0 chocolatey.chocolatey 1.3.1 cisco.aci 2.3.0 cisco.asa 4.0.0 cisco.dnac 6.6.1 cisco.intersight 1.0.22 cisco.ios 4.0.0 cisco.iosxr 4.0.3 cisco.ise 2.5.9 cisco.meraki 2.13.0 cisco.mso 2.1.0 cisco.nso 1.0.3 cisco.nxos 4.0.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.3 community.aws 5.0.0 community.azure 2.0.0 community.ciscosmb 1.0.5 community.crypto 2.9.0 community.digitalocean 1.22.0 community.dns 2.4.2 community.docker 3.3.1 community.fortios 1.0.0 community.general 6.1.0 community.google 1.0.0 community.grafana 1.5.3 community.hashi_vault 4.0.0 community.hrobot 1.6.0 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 5.0.0 community.okd 2.2.0 community.postgresql 2.3.1 community.proxysql 1.4.0 community.rabbitmq 1.2.3 community.routeros 2.5.0 community.sap 1.0.0 community.sap_libs 1.4.0 community.skydive 1.0.0 community.sops 1.5.0 community.vmware 3.2.0 community.windows 1.11.1 community.zabbix 1.9.0 containers.podman 1.10.1 cyberark.conjur 1.2.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 2.0.0 dellemc.openmanage 6.3.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.21.0 fortinet.fortimanager 2.1.7 fortinet.fortios 2.2.1 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.0.2 grafana.grafana 1.1.0 hetzner.hcloud 1.9.0 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.10.0 infinidat.infinibox 1.3.12 infoblox.nios_modules 1.4.1 inspur.ispim 1.2.0 inspur.sm 2.3.0 junipernetworks.junos 4.1.0 kubernetes.core 2.3.2 lowlydba.sqlserver 1.2.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.21.0 netapp.elementsw 21.7.0 netapp.ontap 22.0.1 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.1 netbox.netbox 3.9.0 ngine_io.cloudstack 2.3.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.4.1 purestorage.flasharray 1.15.0 purestorage.flashblade 1.10.0 purestorage.fusion 1.2.0 sensu.sensu_go 1.13.1 splunk.es 2.1.0 t_systems_mms.icinga_director 1.31.4 theforeman.foreman 3.7.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.3.1 vyos.vyos 4.0.0 wti.remote 1.0.4 ``` </details> Forgot to mention, but my workaround was to use `content: '\n'`. It is fine for my use case, because extra new-lines are ignored in this file, but might not be an option for some use-cases. That's helpful information. It looks like this is basically curl doing something weird: https://github.com/curl/curl/issues/183 I'm not sure what the appropriate fix would be :/ Yes, this seems to be the consequence of curl's crappy design here. I would also expect curl to create an empty file in this case. For the time-being I don't see any other ways than adding a touch command after the curl command to make sure that it exists (even if it is supposed to be empty). If you're willing to open a Pull Request we can probably get this patched and fixed pretty quickly: https://github.com/ansible-collections/community.aws/blob/main/plugins/connection/aws_ssm.py#L832 with an extra test in https://github.com/ansible-collections/community.aws/blob/main/tests/integration/targets/connection/test_connection.yml @bencehornak-gls Thanks for taking the time to open this issue. You're currently using release 5.0.0, we released 5.2.0 last week with a number of bugfixes, please could you try again with 5.2.0 Unfortunately the issue persists after upgrading to 5.2.0 <details> <summary>galaxy modules</summary> ansible-galaxy collection list: ```console # /home/bence/.ansible/collections/ansible_collections Collection Version ------------- ------- amazon.aws 5.2.0 community.aws 5.2.0 # /home/bence/.local/lib/python3.10/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 5.1.0 ansible.netcommon 4.1.0 ansible.posix 1.4.0 ansible.utils 2.8.0 ansible.windows 1.12.0 arista.eos 6.0.0 awx.awx 21.10.0 azure.azcollection 1.14.0 check_point.mgmt 4.0.0 chocolatey.chocolatey 1.3.1 cisco.aci 2.3.0 cisco.asa 4.0.0 cisco.dnac 6.6.1 cisco.intersight 1.0.22 cisco.ios 4.0.0 cisco.iosxr 4.0.3 cisco.ise 2.5.9 cisco.meraki 2.13.0 cisco.mso 2.1.0 cisco.nso 1.0.3 cisco.nxos 4.0.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.3 community.aws 5.0.0 community.azure 2.0.0 community.ciscosmb 1.0.5 community.crypto 2.9.0 community.digitalocean 1.22.0 community.dns 2.4.2 community.docker 3.3.1 community.fortios 1.0.0 community.general 6.1.0 community.google 1.0.0 community.grafana 1.5.3 community.hashi_vault 4.0.0 community.hrobot 1.6.0 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 5.0.0 community.okd 2.2.0 community.postgresql 2.3.1 community.proxysql 1.4.0 community.rabbitmq 1.2.3 community.routeros 2.5.0 community.sap 1.0.0 community.sap_libs 1.4.0 community.skydive 1.0.0 community.sops 1.5.0 community.vmware 3.2.0 community.windows 1.11.1 community.zabbix 1.9.0 containers.podman 1.10.1 cyberark.conjur 1.2.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 2.0.0 dellemc.openmanage 6.3.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.21.0 fortinet.fortimanager 2.1.7 fortinet.fortios 2.2.1 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.0.2 grafana.grafana 1.1.0 hetzner.hcloud 1.9.0 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.10.0 infinidat.infinibox 1.3.12 infoblox.nios_modules 1.4.1 inspur.ispim 1.2.0 inspur.sm 2.3.0 junipernetworks.junos 4.1.0 kubernetes.core 2.3.2 lowlydba.sqlserver 1.2.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.21.0 netapp.elementsw 21.7.0 netapp.ontap 22.0.1 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.1 netbox.netbox 3.9.0 ngine_io.cloudstack 2.3.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.4.1 purestorage.flasharray 1.15.0 purestorage.flashblade 1.10.0 purestorage.fusion 1.2.0 sensu.sensu_go 1.13.1 splunk.es 2.1.0 t_systems_mms.icinga_director 1.31.4 theforeman.foreman 3.7.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.3.1 vyos.vyos 4.0.0 wti.remote 1.0.4 ``` </details> Forgot to mention, but my workaround was to use `content: '\n'`. It is fine for my use case, because extra new-lines are ignored in this file, but might not be an option for some use-cases. That's helpful information. It looks like this is basically curl doing something weird: https://github.com/curl/curl/issues/183 I'm not sure what the appropriate fix would be :/ Yes, this seems to be the consequence of curl's crappy design here. I would also expect curl to create an empty file in this case. For the time-being I don't see any other ways than adding a touch command after the curl command to make sure that it exists (even if it is supposed to be empty). If you're willing to open a Pull Request we can probably get this patched and fixed pretty quickly: https://github.com/ansible-collections/community.aws/blob/main/plugins/connection/aws_ssm.py#L832 with an extra test in https://github.com/ansible-collections/community.aws/blob/main/tests/integration/targets/connection/test_connection.yml
2023-02-02T01:01:56
ansible-collections/community.aws
1,698
ansible-collections__community.aws-1698
[ "1686" ]
374a8ac68c0049545199fe02ff45326c5dad7d16
diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -802,33 +802,66 @@ def _generate_commands(self, bucket_name, s3_path, in_path, out_path): if self.is_windows: put_command_headers = "; ".join([f"'{h}' = '{v}'" for h, v in put_headers.items()]) - put_command = ( - "Invoke-WebRequest -Method PUT " - f"-Headers @{{{put_command_headers}}} " # @{'key' = 'value'; 'key2' = 'value2'} - f"-InFile '{in_path}' " - f"-Uri '{put_url}' " - f"-UseBasicParsing" - ) - get_command = ( - "Invoke-WebRequest " - f"'{get_url}' " - f"-OutFile '{out_path}'" - ) + put_commands = [ + ( + "Invoke-WebRequest -Method PUT " + f"-Headers @{{{put_command_headers}}} " # @{'key' = 'value'; 'key2' = 'value2'} + f"-InFile '{in_path}' " + f"-Uri '{put_url}' " + f"-UseBasicParsing" + ), + ] + get_commands = [ + ( + "Invoke-WebRequest " + f"'{get_url}' " + f"-OutFile '{out_path}'" + ), + ] else: put_command_headers = " ".join([f"-H '{h}: {v}'" for h, v in put_headers.items()]) - put_command = ( - "curl --request PUT " - f"{put_command_headers} " - f"--upload-file '{in_path}' " - f"'{put_url}'" - ) - get_command = ( - "curl " - f"-o '{out_path}' " - f"'{get_url}'" - ) + put_commands = [ + ( + "curl --request PUT " + f"{put_command_headers} " + f"--upload-file '{in_path}' " + f"'{put_url}'" + ), + ] + get_commands = [ + ( + "curl " + f"-o '{out_path}' " + f"'{get_url}'" + ), + # Due to https://github.com/curl/curl/issues/183 earlier + # versions of curl did not create the output file, when the + # response was empty. Although this issue was fixed in 2015, + # some actively maintained operating systems still use older + # versions of it (e.g. CentOS 7) + ( + "touch " + f"'{out_path}'" + ) + ] + + return get_commands, put_commands, put_args + + def _exec_transport_commands(self, in_path, out_path, commands): + stdout_combined, stderr_combined = '', '' + for command in commands: + (returncode, stdout, stderr) = self.exec_command(command, in_data=None, sudoable=False) - return get_command, put_command, put_args + # Check the return code + if returncode != 0: + raise AnsibleError( + f"failed to transfer file to {in_path} {out_path}:\n" + f"{stdout}\n{stderr}") + + stdout_combined += stdout + stderr_combined += stderr + + return (returncode, stdout_combined, stderr_combined) @_ssm_retry def _file_transport_command(self, in_path, out_path, ssm_action): @@ -837,30 +870,25 @@ def _file_transport_command(self, in_path, out_path, ssm_action): bucket_name = self.get_option("bucket_name") s3_path = self._escape_path(f"{self.instance_id}/{out_path}") - get_command, put_command, put_args = self._generate_commands( + get_commands, put_commands, put_args = self._generate_commands( bucket_name, s3_path, in_path, out_path, ) client = self._s3_client - if ssm_action == 'get': - (returncode, stdout, stderr) = self.exec_command(put_command, in_data=None, sudoable=False) - with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') as data: - client.download_fileobj(bucket_name, s3_path, data) - else: - with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as data: - client.upload_fileobj(data, bucket_name, s3_path, ExtraArgs=put_args) - (returncode, stdout, stderr) = self.exec_command(get_command, in_data=None, sudoable=False) - # Remove the files from the bucket after they've been transferred - client.delete_object(Bucket=bucket_name, Key=s3_path) - - # Check the return code - if returncode == 0: + try: + if ssm_action == 'get': + (returncode, stdout, stderr) = self._exec_transport_commands(in_path, out_path, put_commands) + with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') as data: + client.download_fileobj(bucket_name, s3_path, data) + else: + with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as data: + client.upload_fileobj(data, bucket_name, s3_path, ExtraArgs=put_args) + (returncode, stdout, stderr) = self._exec_transport_commands(in_path, out_path, get_commands) return (returncode, stdout, stderr) - - raise AnsibleError( - f"failed to transfer file to {in_path} {out_path}:\n" - f"{stdout}\n{stderr}") + finally: + # Remove the files from the bucket after they've been transferred + client.delete_object(Bucket=bucket_name, Key=s3_path) def put_file(self, in_path, out_path): ''' transfer a file from local to remote '''
diff --git a/tests/integration/targets/connection/test_connection.yml b/tests/integration/targets/connection/test_connection.yml --- a/tests/integration/targets/connection/test_connection.yml +++ b/tests/integration/targets/connection/test_connection.yml @@ -6,6 +6,7 @@ local_file: '{{ local_dir }}/汉语.txt' remote_dir: '{{ remote_tmp }}-汉语' remote_file: '{{ remote_dir }}/汉语.txt' + remote_empty_file: '{{ remote_dir }}/empty.txt' tasks: ### test wait_for_connection plugin @@ -50,3 +51,15 @@ local_action: file path={{ local_file }} state=absent - name: remove remote temp file action: "{{ action_prefix }}file path={{ remote_file }} state=absent" + + ### copy an empty file + - name: copy an empty file + action: "{{ action_prefix }}copy content= dest={{ remote_empty_file }}" + - name: stat empty file + action: "{{ action_prefix }}stat path={{ remote_empty_file }}" + register: stat_empty_file_cmd + - name: check that empty file exists + assert: + that: + - stat_empty_file_cmd.stat.isreg # it is a regular file + - stat_empty_file_cmd.stat.size == 0
copy module crashes with empty content when using the SSM plugin ### Summary When I execute the builtin copy module with the `content` parameter set to empty string, I get the following error (taken from the `-vvvv` logs): ``` <i-XXXXXXXXd68> EXEC stdout line: chmod: cannot access ‘/tmp/.ansible-/ansible-tmp-1675251475.8058956-7383-181169058616495/source’: No such file or directory ``` ### Issue Type Bug Report ### Component Name ssm_plugin ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.14.1] config file = /XXXX/ansible.cfg configured module search path = ['/XXXX/library'] ansible python module location = /home/bence/.local/lib/python3.10/site-packages/ansible ansible collection location = /home/bence/.ansible/collections:/usr/share/ansible/collections executable location = /home/bence/.local/bin/ansible python version = 3.10.6 (main, Nov 14 2022, 16:10:14) [GCC 11.3.0] (/usr/bin/python3) jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /XXX/lib/python3.10/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 5.1.0 ansible.netcommon 4.1.0 ansible.posix 1.4.0 ansible.utils 2.8.0 ansible.windows 1.12.0 arista.eos 6.0.0 awx.awx 21.10.0 azure.azcollection 1.14.0 check_point.mgmt 4.0.0 chocolatey.chocolatey 1.3.1 cisco.aci 2.3.0 cisco.asa 4.0.0 cisco.dnac 6.6.1 cisco.intersight 1.0.22 cisco.ios 4.0.0 cisco.iosxr 4.0.3 cisco.ise 2.5.9 cisco.meraki 2.13.0 cisco.mso 2.1.0 cisco.nso 1.0.3 cisco.nxos 4.0.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.3 community.aws 5.0.0 community.azure 2.0.0 community.ciscosmb 1.0.5 community.crypto 2.9.0 community.digitalocean 1.22.0 community.dns 2.4.2 community.docker 3.3.1 community.fortios 1.0.0 community.general 6.1.0 community.google 1.0.0 community.grafana 1.5.3 community.hashi_vault 4.0.0 community.hrobot 1.6.0 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 5.0.0 community.okd 2.2.0 community.postgresql 2.3.1 community.proxysql 1.4.0 community.rabbitmq 1.2.3 community.routeros 2.5.0 community.sap 1.0.0 community.sap_libs 1.4.0 community.skydive 1.0.0 community.sops 1.5.0 community.vmware 3.2.0 community.windows 1.11.1 community.zabbix 1.9.0 containers.podman 1.10.1 cyberark.conjur 1.2.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 2.0.0 dellemc.openmanage 6.3.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.21.0 fortinet.fortimanager 2.1.7 fortinet.fortios 2.2.1 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.0.2 grafana.grafana 1.1.0 hetzner.hcloud 1.9.0 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.10.0 infinidat.infinibox 1.3.12 infoblox.nios_modules 1.4.1 inspur.ispim 1.2.0 inspur.sm 2.3.0 junipernetworks.junos 4.1.0 kubernetes.core 2.3.2 lowlydba.sqlserver 1.2.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.21.0 netapp.elementsw 21.7.0 netapp.ontap 22.0.1 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.1 netbox.netbox 3.9.0 ngine_io.cloudstack 2.3.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.4.1 purestorage.flasharray 1.15.0 purestorage.flashblade 1.10.0 purestorage.fusion 1.2.0 sensu.sensu_go 1.13.1 splunk.es 2.1.0 t_systems_mms.icinga_director 1.31.4 theforeman.foreman 3.7.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.3.1 vyos.vyos 4.0.0 wti.remote 1.0.4 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore pip show boto boto3 botocore WARNING: Package(s) not found: boto Name: boto3 Version: 1.26.52 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /home/bence/gls/devops/ansible-parcelos/tests/.env/lib/python3.10/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.29.52 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /home/bence/gls/devops/ansible-parcelos/tests/.env/lib/python3.10/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ANY_ERRORS_FATAL(/XXXXXXXXXXXXXX/ansible.cfg) = True CALLBACKS_ENABLED(/XXXXXXXXXXXXXX/ansible.cfg) = ['profile_tasks'] CONFIG_FILE() = /XXXXXXXXXXXXXX/ansible.cfg DEFAULT_FORKS(/XXXXXXXXXXXXXX/ansible.cfg) = 25 DEFAULT_MODULE_PATH(/XXXXXXXXXXXXXX/ansible.cfg) = ['/XXXXXXXXXXXXXX/library'] DEFAULT_ROLES_PATH(/XXXXXXXXXXXXXX/ansible.cfg) = ['/XXXXXXXXXXXXXX/roles'] DEFAULT_TIMEOUT(/XXXXXXXXXXXXXX/ansible.cfg) = 20 DISPLAY_SKIPPED_HOSTS(/XXXXXXXXXXXXXX/ansible.cfg) = False HOST_KEY_CHECKING(/XXXXXXXXXXXXXX/ansible.cfg) = False MAX_FILE_SIZE_FOR_DIFF(/XXXXXXXXXXXXXX/ansible.cfg) = 1044480 ``` ### OS / Environment Windows 10 with WSL (Ubuntu 20) ### Steps to Reproduce Use the built-in `copy` module with the `content` set to empty string. ```console ansible -i inventories/aws_ec2.yml -m copy -a "dest=/tmp/test content=" target_host -vvv ``` ### Expected Results The temporary `source` file should be an empty file (e.g. `/tmp/.ansible-/ansible-tmp-1675251475.8058956-7383-181169058616495/source`) ### Actual Results The temporary `source` file is not present, so the Ansible step crashes. Output (sensitive parts are masked): ```console ansible -i inventories/aws_ec2.yml -m copy -a "dest=/tmp/test content=" target_host -vvv ... redirecting (type: inventory) ansible.builtin.aws_ec2 to amazon.aws.aws_ec2 Using inventory plugin 'ansible_collections.amazon.aws.plugins.inventory.aws_ec2' to process inventory source '/XXXXXXXXXXXXXXXX/inventories/aws_ec2.yml' Parsed /XXXXXXXXXXXXXXXX/inventories/aws_ec2.yml inventory source with auto plugin redirecting (type: callback) ansible.builtin.profile_tasks to ansible.posix.profile_tasks Skipping callback 'default', as we already have a stdout callback. Skipping callback 'minimal', as we already have a stdout callback. Skipping callback 'oneline', as we already have a stdout callback. redirecting (type: connection) ansible.builtin.aws_ssm to community.aws.aws_ssm <i-XXXXXXXXXXXXXXX68> ESTABLISH SSM CONNECTION TO: i-XXXXXXXXXXXXXXX68 <i-XXXXXXXXXXXXXXX68> SSM CONNECTION ID: XXXXXXXXXXXXXXXXXXX <i-XXXXXXXXXXXXXXX68> EXEC ( umask 77 && mkdir -p "` echo /tmp/.ansible-${USER}/ `"&& mkdir "` echo /tmp/.ansible-${USER}/ansible-tmp-1675252624.7794013-9014-861916818482 `" && echo ansible-tmp-1675252624.7794013-9014-861916818482="` echo /tmp/.ansible-${USER}/ansible-tmp-1675252624.7794013-9014-861916818482 `" ) <i-XXXXXXXXXXXXXXX68> (0, 'ansible-tmp-1675252624.7794013-9014-861916818482=/tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482\r\r', '') <i-XXXXXXXXXXXXXXX68> Attempting python interpreter discovery <i-XXXXXXXXXXXXXXX68> EXEC echo PLATFORM; uname; echo FOUND; command -v 'python3.11'; command -v 'python3.10'; command -v 'python3.9'; command -v 'python3.8'; command -v 'python3.7'; command -v 'python3.6'; command -v 'python3.5'; command -v '/usr/bin/python3'; command -v '/usr/libexec/platform-python'; command -v 'python2.7'; command -v '/usr/bin/python'; command -v 'python'; echo ENDFOUND <i-XXXXXXXXXXXXXXX68> (0, 'PLATFORM\r\r\nLinux\r\r\nFOUND\r\r\n/usr/bin/python3.6\r\r\n/usr/bin/python3\r\r\n/usr/libexec/platform-python\r\r\n/usr/bin/python2.7\r\r\n/usr/bin/python\r\r\n/usr/bin/python\r\r\nENDFOUND\r\r', '') <i-XXXXXXXXXXXXXXX68> Python interpreter discovery fallback (pipelining support required for extended interpreter discovery) Using module file /home/bence/.local/lib/python3.10/site-packages/ansible/modules/stat.py <i-XXXXXXXXXXXXXXX68> PUT /home/bence/.ansible/tmp/ansible-local-8998ncfzil01/tmpg1ordya2 TO /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/AnsiballZ_stat.py <i-XXXXXXXXXXXXXXX68> EXEC curl 'https://XXXXXXXXXXXXXXXXXXXXXXX.s3.amazonaws.com/i-XXXXXXXXXXXXXXX68//tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/AnsiballZ_stat.py' -o '/tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/AnsiballZ_stat.py' <i-XXXXXXXXXXXXXXX68> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 129k 100 129k 0 0 542k 0 --:--:-- --:--:-- --:--:-- 545k\r\r', '') <i-XXXXXXXXXXXXXXX68> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 129k 100 129k 0 0 542k 0 --:--:-- --:--:-- --:--:-- 545k\r\r', '') <i-XXXXXXXXXXXXXXX68> EXEC chmod u+x /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/ /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/AnsiballZ_stat.py <i-XXXXXXXXXXXXXXX68> (0, '\r', '') <i-XXXXXXXXXXXXXXX68> EXEC /usr/bin/python3.6 /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/AnsiballZ_stat.py <i-XXXXXXXXXXXXXXX68> (0, '\r\r\n{"changed": false, "stat": {"exists": false}, "invocation": {"module_args": {"path": "/tmp/test", "follow": false, "get_checksum": true, "checksum_algorithm": "sha1", "get_md5": false, "get_mime": true, "get_attributes": true}}}\r\r', '') <i-XXXXXXXXXXXXXXX68> PUT /home/bence/.ansible/tmp/ansible-local-8998ncfzil01/tmp3b0hfmbo TO /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/source <i-XXXXXXXXXXXXXXX68> EXEC curl 'https://XXXXXXXXXXXXXXXXXXXXXXX.s3.amazonaws.com/i-XXXXXXXXXXXXXXX68//tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/source?XXXXXXXXXXX' -o '/tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/source' <i-XXXXXXXXXXXXXXX68> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r\r', '') <i-XXXXXXXXXXXXXXX68> (0, ' % Total % Received % Xferd Average Speed Time Time Time Current\r\r\n Dload Upload Total Spent Left Speed\r\r\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r\r', '') <i-XXXXXXXXXXXXXXX68> EXEC chmod u+x /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/ /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/source <i-XXXXXXXXXXXXXXX68> (1, 'chmod: cannot access ‘/tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/source’: No such file or directory\r\r', '') <i-XXXXXXXXXXXXXXX68> EXEC rm -f -r /tmp/.ansible-/ansible-tmp-1675252624.7794013-9014-861916818482/ > /dev/null 2>&1 <i-XXXXXXXXXXXXXXX68> (0, '\r', '') <i-XXXXXXXXXXXXXXX68> CLOSING SSM CONNECTION TO: i-XXXXXXXXXXXXXXX68 i-XXXXXXXXXXXXXXX68 | FAILED! => { "msg": "Failed to set execute bit on remote files (rc: 1, err: )" } ``` **Note:** if you repeat the steps with a host with SSH connection, the step just completes without error. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
@bencehornak-gls Thanks for taking the time to open this issue. You're currently using release 5.0.0, we released 5.2.0 last week with a number of bugfixes, please could you try again with 5.2.0 Unfortunately the issue persists after upgrading to 5.2.0 <details> <summary>galaxy modules</summary> ansible-galaxy collection list: ```console # /home/bence/.ansible/collections/ansible_collections Collection Version ------------- ------- amazon.aws 5.2.0 community.aws 5.2.0 # /home/bence/.local/lib/python3.10/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 5.1.0 ansible.netcommon 4.1.0 ansible.posix 1.4.0 ansible.utils 2.8.0 ansible.windows 1.12.0 arista.eos 6.0.0 awx.awx 21.10.0 azure.azcollection 1.14.0 check_point.mgmt 4.0.0 chocolatey.chocolatey 1.3.1 cisco.aci 2.3.0 cisco.asa 4.0.0 cisco.dnac 6.6.1 cisco.intersight 1.0.22 cisco.ios 4.0.0 cisco.iosxr 4.0.3 cisco.ise 2.5.9 cisco.meraki 2.13.0 cisco.mso 2.1.0 cisco.nso 1.0.3 cisco.nxos 4.0.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.3 community.aws 5.0.0 community.azure 2.0.0 community.ciscosmb 1.0.5 community.crypto 2.9.0 community.digitalocean 1.22.0 community.dns 2.4.2 community.docker 3.3.1 community.fortios 1.0.0 community.general 6.1.0 community.google 1.0.0 community.grafana 1.5.3 community.hashi_vault 4.0.0 community.hrobot 1.6.0 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 5.0.0 community.okd 2.2.0 community.postgresql 2.3.1 community.proxysql 1.4.0 community.rabbitmq 1.2.3 community.routeros 2.5.0 community.sap 1.0.0 community.sap_libs 1.4.0 community.skydive 1.0.0 community.sops 1.5.0 community.vmware 3.2.0 community.windows 1.11.1 community.zabbix 1.9.0 containers.podman 1.10.1 cyberark.conjur 1.2.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 2.0.0 dellemc.openmanage 6.3.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.21.0 fortinet.fortimanager 2.1.7 fortinet.fortios 2.2.1 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.0.2 grafana.grafana 1.1.0 hetzner.hcloud 1.9.0 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.10.0 infinidat.infinibox 1.3.12 infoblox.nios_modules 1.4.1 inspur.ispim 1.2.0 inspur.sm 2.3.0 junipernetworks.junos 4.1.0 kubernetes.core 2.3.2 lowlydba.sqlserver 1.2.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.21.0 netapp.elementsw 21.7.0 netapp.ontap 22.0.1 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.1 netbox.netbox 3.9.0 ngine_io.cloudstack 2.3.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.4.1 purestorage.flasharray 1.15.0 purestorage.flashblade 1.10.0 purestorage.fusion 1.2.0 sensu.sensu_go 1.13.1 splunk.es 2.1.0 t_systems_mms.icinga_director 1.31.4 theforeman.foreman 3.7.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.3.1 vyos.vyos 4.0.0 wti.remote 1.0.4 ``` </details> Forgot to mention, but my workaround was to use `content: '\n'`. It is fine for my use case, because extra new-lines are ignored in this file, but might not be an option for some use-cases. That's helpful information. It looks like this is basically curl doing something weird: https://github.com/curl/curl/issues/183 I'm not sure what the appropriate fix would be :/ Yes, this seems to be the consequence of curl's crappy design here. I would also expect curl to create an empty file in this case. For the time-being I don't see any other ways than adding a touch command after the curl command to make sure that it exists (even if it is supposed to be empty). If you're willing to open a Pull Request we can probably get this patched and fixed pretty quickly: https://github.com/ansible-collections/community.aws/blob/main/plugins/connection/aws_ssm.py#L832 with an extra test in https://github.com/ansible-collections/community.aws/blob/main/tests/integration/targets/connection/test_connection.yml
2023-02-03T09:09:40
ansible-collections/community.aws
1,711
ansible-collections__community.aws-1711
[ "1710" ]
549ca71c638fafe6eac30fac16f4e7eb4f8ef7b5
diff --git a/plugins/modules/iam_access_key.py b/plugins/modules/iam_access_key.py --- a/plugins/modules/iam_access_key.py +++ b/plugins/modules/iam_access_key.py @@ -69,8 +69,8 @@ - name: Delete the access_key community.aws.iam_access_key: - name: example_user - access_key_id: AKIA1EXAMPLE1EXAMPLE + user_name: example_user + id: AKIA1EXAMPLE1EXAMPLE state: absent '''
Broken example in iam_access_key ### Summary The "Delete the access key" example in the `iam_access_key` module is broken. It's currently: ```yaml - name: Delete the access_key community.aws.iam_access_key: name: example_user access_key_id: AKIA1EXAMPLE1EXAMPLE state: absent ``` There are two issues: - the `name` attribute doesn't exist - it should be `user_name` (or the `username` alias). - the `access_key_id` attribute should just be `id`. The `access_key_id` attribute specifies credentials for the module to use to access the API, not the ID of the access key we're trying to delete (which is specified by `id`). Corrected example: ```yaml - name: Delete the access_key community.aws.iam_access_key: user_name: example_user id: AKIA1EXAMPLE1EXAMPLE state: absent ``` ### Issue Type Documentation Report ### Component Name iam_access_key ### Ansible Version ```console (paste below) ansible [core 2.14.2] config file = None configured module search path = ['/Users/grt006/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /Users/grt006/ws/argocd/.scratch/external_secrets/iam/ansible/.venv/lib/python3.10/site-packages/ansible ansible collection location = /Users/grt006/.ansible/collections:/usr/share/ansible/collections executable location = /Users/grt006/ws/argocd/.scratch/external_secrets/iam/ansible/.venv/bin/ansible python version = 3.10.9 (main, Dec 15 2022, 17:11:09) [Clang 14.0.0 (clang-1400.0.29.202)] (/Users/grt006/ws/argocd/.scratch/external_secrets/iam/ansible/.venv/bin/python) jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) Collection Version ----------------------------- ------- amazon.aws 5.2.0 ansible.netcommon 4.1.0 ansible.posix 1.5.1 ansible.utils 2.9.0 ansible.windows 1.13.0 arista.eos 6.0.0 awx.awx 21.11.0 azure.azcollection 1.14.0 check_point.mgmt 4.0.0 chocolatey.chocolatey 1.4.0 cisco.aci 2.3.0 cisco.asa 4.0.0 cisco.dnac 6.6.3 cisco.intersight 1.0.23 cisco.ios 4.3.1 cisco.iosxr 4.1.0 cisco.ise 2.5.12 cisco.meraki 2.15.0 cisco.mso 2.2.1 cisco.nso 1.0.3 cisco.nxos 4.0.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.4 community.aws 5.2.0 community.azure 2.0.0 community.ciscosmb 1.0.5 community.crypto 2.10.0 community.digitalocean 1.23.0 community.dns 2.5.0 community.docker 3.4.0 community.fortios 1.0.0 community.general 6.3.0 community.google 1.0.0 community.grafana 1.5.3 community.hashi_vault 4.1.0 community.hrobot 1.7.0 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 5.0.0 community.okd 2.2.0 community.postgresql 2.3.2 community.proxysql 1.5.1 community.rabbitmq 1.2.3 community.routeros 2.7.0 community.sap 1.0.0 community.sap_libs 1.4.0 community.skydive 1.0.0 community.sops 1.6.0 community.vmware 3.3.0 community.windows 1.12.0 community.zabbix 1.9.1 containers.podman 1.10.1 cyberark.conjur 1.2.0 cyberark.pas 1.0.17 dellemc.enterprise_sonic 2.0.0 dellemc.openmanage 6.3.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 dellemc.powerflex 1.5.0 dellemc.unity 1.5.0 f5networks.f5_modules 1.22.0 fortinet.fortimanager 2.1.7 fortinet.fortios 2.2.2 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.1.2 grafana.grafana 1.1.0 hetzner.hcloud 1.9.1 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.11.0 infinidat.infinibox 1.3.12 infoblox.nios_modules 1.4.1 inspur.ispim 1.2.0 inspur.sm 2.3.0 junipernetworks.junos 4.1.0 kubernetes.core 2.3.2 lowlydba.sqlserver 1.3.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.22.0 netapp.elementsw 21.7.0 netapp.ontap 22.2.0 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.4.0 netbox.netbox 3.10.0 ngine_io.cloudstack 2.3.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.3 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.4.1 purestorage.flasharray 1.16.2 purestorage.flashblade 1.10.0 purestorage.fusion 1.3.0 sensu.sensu_go 1.13.2 splunk.es 2.1.0 t_systems_mms.icinga_director 1.32.0 theforeman.foreman 3.8.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.7.0 vyos.vyos 4.0.0 wti.remote 1.0.4 ``` ### Configuration ```console (paste below) CONFIG_FILE() = None ``` ### OS / Environment Linux ### Additional Information _No response_ ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/iam_access_key.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/iam_access_key.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @markuman @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify --->
2023-02-07T19:56:22
ansible-collections/community.aws
1,712
ansible-collections__community.aws-1712
[ "1710" ]
4631d2d3976a9bd8631dab2fa35c3a8cfcb7fb72
diff --git a/plugins/modules/iam_access_key.py b/plugins/modules/iam_access_key.py --- a/plugins/modules/iam_access_key.py +++ b/plugins/modules/iam_access_key.py @@ -68,8 +68,8 @@ - name: Delete the access_key community.aws.iam_access_key: - name: example_user - access_key_id: AKIA1EXAMPLE1EXAMPLE + user_name: example_user + id: AKIA1EXAMPLE1EXAMPLE state: absent '''
Broken example in iam_access_key ### Summary The "Delete the access key" example in the `iam_access_key` module is broken. It's currently: ```yaml - name: Delete the access_key community.aws.iam_access_key: name: example_user access_key_id: AKIA1EXAMPLE1EXAMPLE state: absent ``` There are two issues: - the `name` attribute doesn't exist - it should be `user_name` (or the `username` alias). - the `access_key_id` attribute should just be `id`. The `access_key_id` attribute specifies credentials for the module to use to access the API, not the ID of the access key we're trying to delete (which is specified by `id`). Corrected example: ```yaml - name: Delete the access_key community.aws.iam_access_key: user_name: example_user id: AKIA1EXAMPLE1EXAMPLE state: absent ``` ### Issue Type Documentation Report ### Component Name iam_access_key ### Ansible Version ```console (paste below) ansible [core 2.14.2] config file = None configured module search path = ['/Users/grt006/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /Users/grt006/ws/argocd/.scratch/external_secrets/iam/ansible/.venv/lib/python3.10/site-packages/ansible ansible collection location = /Users/grt006/.ansible/collections:/usr/share/ansible/collections executable location = /Users/grt006/ws/argocd/.scratch/external_secrets/iam/ansible/.venv/bin/ansible python version = 3.10.9 (main, Dec 15 2022, 17:11:09) [Clang 14.0.0 (clang-1400.0.29.202)] (/Users/grt006/ws/argocd/.scratch/external_secrets/iam/ansible/.venv/bin/python) jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) Collection Version ----------------------------- ------- amazon.aws 5.2.0 ansible.netcommon 4.1.0 ansible.posix 1.5.1 ansible.utils 2.9.0 ansible.windows 1.13.0 arista.eos 6.0.0 awx.awx 21.11.0 azure.azcollection 1.14.0 check_point.mgmt 4.0.0 chocolatey.chocolatey 1.4.0 cisco.aci 2.3.0 cisco.asa 4.0.0 cisco.dnac 6.6.3 cisco.intersight 1.0.23 cisco.ios 4.3.1 cisco.iosxr 4.1.0 cisco.ise 2.5.12 cisco.meraki 2.15.0 cisco.mso 2.2.1 cisco.nso 1.0.3 cisco.nxos 4.0.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.4 community.aws 5.2.0 community.azure 2.0.0 community.ciscosmb 1.0.5 community.crypto 2.10.0 community.digitalocean 1.23.0 community.dns 2.5.0 community.docker 3.4.0 community.fortios 1.0.0 community.general 6.3.0 community.google 1.0.0 community.grafana 1.5.3 community.hashi_vault 4.1.0 community.hrobot 1.7.0 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 5.0.0 community.okd 2.2.0 community.postgresql 2.3.2 community.proxysql 1.5.1 community.rabbitmq 1.2.3 community.routeros 2.7.0 community.sap 1.0.0 community.sap_libs 1.4.0 community.skydive 1.0.0 community.sops 1.6.0 community.vmware 3.3.0 community.windows 1.12.0 community.zabbix 1.9.1 containers.podman 1.10.1 cyberark.conjur 1.2.0 cyberark.pas 1.0.17 dellemc.enterprise_sonic 2.0.0 dellemc.openmanage 6.3.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 dellemc.powerflex 1.5.0 dellemc.unity 1.5.0 f5networks.f5_modules 1.22.0 fortinet.fortimanager 2.1.7 fortinet.fortios 2.2.2 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.1.2 grafana.grafana 1.1.0 hetzner.hcloud 1.9.1 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.11.0 infinidat.infinibox 1.3.12 infoblox.nios_modules 1.4.1 inspur.ispim 1.2.0 inspur.sm 2.3.0 junipernetworks.junos 4.1.0 kubernetes.core 2.3.2 lowlydba.sqlserver 1.3.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.22.0 netapp.elementsw 21.7.0 netapp.ontap 22.2.0 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.4.0 netbox.netbox 3.10.0 ngine_io.cloudstack 2.3.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.3 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.4.1 purestorage.flasharray 1.16.2 purestorage.flashblade 1.10.0 purestorage.fusion 1.3.0 sensu.sensu_go 1.13.2 splunk.es 2.1.0 t_systems_mms.icinga_director 1.32.0 theforeman.foreman 3.8.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.7.0 vyos.vyos 4.0.0 wti.remote 1.0.4 ``` ### Configuration ```console (paste below) CONFIG_FILE() = None ``` ### OS / Environment Linux ### Additional Information _No response_ ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/iam_access_key.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/iam_access_key.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @markuman @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify --->
2023-02-08T06:34:51
ansible-collections/community.aws
1,713
ansible-collections__community.aws-1713
[ "1710" ]
a79f996319579d6dbb252a93e61174fef0d521d8
diff --git a/plugins/modules/iam_access_key.py b/plugins/modules/iam_access_key.py --- a/plugins/modules/iam_access_key.py +++ b/plugins/modules/iam_access_key.py @@ -69,8 +69,8 @@ - name: Delete the access_key community.aws.iam_access_key: - name: example_user - access_key_id: AKIA1EXAMPLE1EXAMPLE + user_name: example_user + id: AKIA1EXAMPLE1EXAMPLE state: absent '''
Broken example in iam_access_key ### Summary The "Delete the access key" example in the `iam_access_key` module is broken. It's currently: ```yaml - name: Delete the access_key community.aws.iam_access_key: name: example_user access_key_id: AKIA1EXAMPLE1EXAMPLE state: absent ``` There are two issues: - the `name` attribute doesn't exist - it should be `user_name` (or the `username` alias). - the `access_key_id` attribute should just be `id`. The `access_key_id` attribute specifies credentials for the module to use to access the API, not the ID of the access key we're trying to delete (which is specified by `id`). Corrected example: ```yaml - name: Delete the access_key community.aws.iam_access_key: user_name: example_user id: AKIA1EXAMPLE1EXAMPLE state: absent ``` ### Issue Type Documentation Report ### Component Name iam_access_key ### Ansible Version ```console (paste below) ansible [core 2.14.2] config file = None configured module search path = ['/Users/grt006/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /Users/grt006/ws/argocd/.scratch/external_secrets/iam/ansible/.venv/lib/python3.10/site-packages/ansible ansible collection location = /Users/grt006/.ansible/collections:/usr/share/ansible/collections executable location = /Users/grt006/ws/argocd/.scratch/external_secrets/iam/ansible/.venv/bin/ansible python version = 3.10.9 (main, Dec 15 2022, 17:11:09) [Clang 14.0.0 (clang-1400.0.29.202)] (/Users/grt006/ws/argocd/.scratch/external_secrets/iam/ansible/.venv/bin/python) jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) Collection Version ----------------------------- ------- amazon.aws 5.2.0 ansible.netcommon 4.1.0 ansible.posix 1.5.1 ansible.utils 2.9.0 ansible.windows 1.13.0 arista.eos 6.0.0 awx.awx 21.11.0 azure.azcollection 1.14.0 check_point.mgmt 4.0.0 chocolatey.chocolatey 1.4.0 cisco.aci 2.3.0 cisco.asa 4.0.0 cisco.dnac 6.6.3 cisco.intersight 1.0.23 cisco.ios 4.3.1 cisco.iosxr 4.1.0 cisco.ise 2.5.12 cisco.meraki 2.15.0 cisco.mso 2.2.1 cisco.nso 1.0.3 cisco.nxos 4.0.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.4 community.aws 5.2.0 community.azure 2.0.0 community.ciscosmb 1.0.5 community.crypto 2.10.0 community.digitalocean 1.23.0 community.dns 2.5.0 community.docker 3.4.0 community.fortios 1.0.0 community.general 6.3.0 community.google 1.0.0 community.grafana 1.5.3 community.hashi_vault 4.1.0 community.hrobot 1.7.0 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 5.0.0 community.okd 2.2.0 community.postgresql 2.3.2 community.proxysql 1.5.1 community.rabbitmq 1.2.3 community.routeros 2.7.0 community.sap 1.0.0 community.sap_libs 1.4.0 community.skydive 1.0.0 community.sops 1.6.0 community.vmware 3.3.0 community.windows 1.12.0 community.zabbix 1.9.1 containers.podman 1.10.1 cyberark.conjur 1.2.0 cyberark.pas 1.0.17 dellemc.enterprise_sonic 2.0.0 dellemc.openmanage 6.3.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 dellemc.powerflex 1.5.0 dellemc.unity 1.5.0 f5networks.f5_modules 1.22.0 fortinet.fortimanager 2.1.7 fortinet.fortios 2.2.2 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.1.2 grafana.grafana 1.1.0 hetzner.hcloud 1.9.1 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.11.0 infinidat.infinibox 1.3.12 infoblox.nios_modules 1.4.1 inspur.ispim 1.2.0 inspur.sm 2.3.0 junipernetworks.junos 4.1.0 kubernetes.core 2.3.2 lowlydba.sqlserver 1.3.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.22.0 netapp.elementsw 21.7.0 netapp.ontap 22.2.0 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.4.0 netbox.netbox 3.10.0 ngine_io.cloudstack 2.3.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.3 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.4.1 purestorage.flasharray 1.16.2 purestorage.flashblade 1.10.0 purestorage.fusion 1.3.0 sensu.sensu_go 1.13.2 splunk.es 2.1.0 t_systems_mms.icinga_director 1.32.0 theforeman.foreman 3.8.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.7.0 vyos.vyos 4.0.0 wti.remote 1.0.4 ``` ### Configuration ```console (paste below) CONFIG_FILE() = None ``` ### OS / Environment Linux ### Additional Information _No response_ ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/iam_access_key.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/iam_access_key.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @markuman @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify --->
2023-02-08T06:35:05
ansible-collections/community.aws
1,715
ansible-collections__community.aws-1715
[ "1626" ]
2ffdb581c449335751122de2329c8f860862d73e
diff --git a/plugins/modules/secretsmanager_secret.py b/plugins/modules/secretsmanager_secret.py --- a/plugins/modules/secretsmanager_secret.py +++ b/plugins/modules/secretsmanager_secret.py @@ -29,6 +29,14 @@ default: 'present' choices: ['present', 'absent'] type: str + overwrite: + description: + - Whether to overwrite an existing secret with the same name. + - If set to C(True), an existing secret with the same I(name) will be overwritten. + - If set to C(False), a secret with the given I(name) will only be created if none exists. + type: bool + default: True + version_added: 5.3.0 recovery_window: description: - Only used if state is absent. @@ -130,6 +138,14 @@ state: absent secret_type: 'string' secret: "{{ super_secret_string }}" + +- name: Only create a new secret, but do not update if alredy exists by name + community.aws.secretsmanager_secret: + name: 'random_string' + state: present + secret_type: 'string' + secret: "{{ lookup('community.general.random_string', length=16, special=false) }}" + overwrite: false ''' RETURN = r''' @@ -524,6 +540,7 @@ def main(): argument_spec={ 'name': dict(required=True), 'state': dict(choices=['present', 'absent'], default='present'), + 'overwrite': dict(type='bool', default=True), 'description': dict(default=""), 'replica': dict(type='list', elements='dict', options=replica_args), 'kms_key_id': dict(), @@ -580,12 +597,15 @@ def main(): result = secrets_mgr.put_resource_policy(secret) changed = True else: + # current_secret exists; decide what to do with it if current_secret.get("DeletedDate"): secrets_mgr.restore_secret(secret.name) changed = True if not secrets_mgr.secrets_match(secret, current_secret): - result = secrets_mgr.update_secret(secret) - changed = True + overwrite = module.params.get('overwrite') + if overwrite: + result = secrets_mgr.update_secret(secret) + changed = True if not rotation_match(secret, current_secret): result = secrets_mgr.update_rotation(secret) changed = True
diff --git a/tests/integration/targets/secretsmanager_secret/tasks/basic.yml b/tests/integration/targets/secretsmanager_secret/tasks/basic.yml --- a/tests/integration/targets/secretsmanager_secret/tasks/basic.yml +++ b/tests/integration/targets/secretsmanager_secret/tasks/basic.yml @@ -688,6 +688,68 @@ that: - result is not changed + # ============================================================ + # Overwrite testing + # ============================================================ + + - name: Create secret with overwrite = False (Check mode) + aws_secret: + name: "{{ secret_name }}-2" + state: present + secret_type: 'string' + secret: "{{ super_secret_string }}" + overwrite: False + register: result + check_mode: True + + - name: assert key is changed + assert: + that: + - result is changed + + - name: Create secret with overwrite = False + aws_secret: + name: "{{ secret_name }}-2" + state: present + secret_type: 'string' + secret: "{{ super_secret_string }}" + overwrite: False + register: result + + - name: assert key is changed + assert: + that: + - result is changed + + - name: Update secret with overwrite = False (Check mode) + aws_secret: + name: "{{ secret_name }}-2" + state: present + secret_type: 'string' + secret: "{{ super_secret_string }}-2" + overwrite: False + register: result + check_mode: True + + - name: assert key is not changed + assert: + that: + - result is not changed + + - name: Create secret with overwrite = False + aws_secret: + name: "{{ secret_name }}-2" + state: present + secret_type: 'string' + secret: "{{ super_secret_string }}-2" + overwrite: False + register: result + + - name: assert key is not changed + assert: + that: + - result is not changed + # ============================================================ # Removal testing # ============================================================ @@ -749,3 +811,10 @@ state: absent recovery_window: 0 ignore_errors: yes + + - name: remove secret 2 + aws_secret: + name: "{{ secret_name }}-2" + state: absent + recovery_window: 0 + ignore_errors: yes
secretsmanager_secret module should not overwrite an existing Secret ### Summary The [`community.aws.secretsmanager_secret`](https://docs.ansible.com/ansible/latest/collections/community/aws/secretsmanager_secret_module.html) module currently offers no option to *not* overwrite a Secret if it exists with the same name but a different value. This forces the user to first try to check if the Secret exists and then skip the task if it does. It will simply call `secrets_mgr.update_secret(secret)` and overwrite the existing one. If the intended Secret value itself is, for example, a random password, the option to only lookup that the Secret exists (but not that its values match) would be a nice feature. Relevant code: https://github.com/ansible-collections/community.aws/blob/99978ef51ce1372d2f36b501b084b2bf54381073/plugins/modules/secretsmanager_secret.py#L479 ### Issue Type Feature Idea ### Component Name secretsmanager_secret ### Additional Information ```yaml - name: Try to retrive existing elastic secrets from AWS Secrets Manager ansible.builtin.set_fact: elastic_user_password: "{{ lookup('amazon.aws.aws_secret', clustername + '/' + elastic_namespace + '.elastic-user-password', nested=true, region=region, on_missing='error') }}" kibana_client_secret: "{{ lookup('amazon.aws.aws_secret', clustername + '/' + elastic_namespace + '.keycloak-secret', nested=true, region=region, on_missing='error') }}" register: secrets_found ignore_errors: true - name: Create elastic user password and Keycloak AWS secrets if necessary community.aws.secretsmanager_secret: name: "{{ clustername }}/{{ elastic_namespace }}" description: Elastic secrets for {{ elastic_namespace }} state: present secret_type: "string" json_secret: { "elastic-user-password": "{{ lookup('community.general.random_string', length=16, special=false) }}", "keycloak-secret": "{{ lookup('community.general.random_string', length=16, special=false) }}" } region: "{{ region }}" when: secrets_found is failed ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`lib/ansible/plugins/lookup`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/lib/ansible/plugins/lookup) * [`plugins/modules/secretsmanager_secret.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/secretsmanager_secret.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @markuman @rrey @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify --->
2023-02-08T13:14:03
ansible-collections/community.aws
1,724
ansible-collections__community.aws-1724
[ "174" ]
4baf1cf53f36cc2167c668483d7bd1e683ea58f1
diff --git a/plugins/modules/lightsail.py b/plugins/modules/lightsail.py --- a/plugins/modules/lightsail.py +++ b/plugins/modules/lightsail.py @@ -47,6 +47,38 @@ - Launch script that can configure the instance with additional data. type: str default: '' + public_ports: + description: + - A list of dictionaries to describe the ports to open for the specified instance. + type: list + elements: dict + suboptions: + from_port: + description: The first port in a range of open ports on the instance. + type: int + required: true + to_port: + description: The last port in a range of open ports on the instance. + type: int + required: true + protocol: + description: The IP protocol name accepted for the defined range of open ports. + type: str + choices: ['tcp', 'all', 'udp', 'icmp'] + required: true + cidrs: + description: + - The IPv4 address, or range of IPv4 addresses (in CIDR notation) that are allowed to connect to the instance through the ports, and the protocol. + - One of I(cidrs) or I(ipv6_cidrs) must be specified. + type: list + elements: str + ipv6_cidrs: + description: + - The IPv6 address, or range of IPv6 addresses (in CIDR notation) that are allowed to connect to the instance through the ports, and the protocol. + - One of I(cidrs) or I(ipv6_cidrs) must be specified. + type: list + elements: str + version_added: 6.0.0 key_pair_name: description: - Name of the key pair to use with the instance. @@ -83,6 +115,12 @@ bundle_id: nano_1_0 key_pair_name: id_rsa user_data: " echo 'hello world' > /home/ubuntu/test.txt" + public_ports: + - from_port: 22 + to_port: 22 + protocol: "tcp" + cidrs: ["0.0.0.0/0"] + ipv6_cidrs: ["::/0"] register: my_instance - name: Delete an instance @@ -155,6 +193,7 @@ pass from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code @@ -194,17 +233,28 @@ def wait_for_instance_state(module, client, instance_name, states): ' {1}'.format(instance_name, states)) -def create_instance(module, client, instance_name): +def update_public_ports(module, client, instance_name): + try: + client.put_instance_public_ports( + portInfos=snake_dict_to_camel_dict(module.params.get("public_ports")), + instanceName=instance_name, + ) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + +def create_or_update_instance(module, client, instance_name): inst = find_instance_info(module, client, instance_name) - if inst: - module.exit_json(changed=False, instance=camel_dict_to_snake_dict(inst)) - else: - create_params = {'instanceNames': [instance_name], - 'availabilityZone': module.params.get('zone'), - 'blueprintId': module.params.get('blueprint_id'), - 'bundleId': module.params.get('bundle_id'), - 'userData': module.params.get('user_data')} + + if not inst: + create_params = { + "instanceNames": [instance_name], + "availabilityZone": module.params.get("zone"), + "blueprintId": module.params.get("blueprint_id"), + "bundleId": module.params.get("bundle_id"), + "userData": module.params.get("user_data"), + } key_pair_name = module.params.get('key_pair_name') if key_pair_name: @@ -219,9 +269,15 @@ def create_instance(module, client, instance_name): if wait: desired_states = ['running'] wait_for_instance_state(module, client, instance_name, desired_states) - inst = find_instance_info(module, client, instance_name, fail_if_not_found=True) - module.exit_json(changed=True, instance=camel_dict_to_snake_dict(inst)) + if module.params.get("public_ports") is not None: + update_public_ports(module, client, instance_name) + after_update_inst = find_instance_info(module, client, instance_name, fail_if_not_found=True) + + module.exit_json( + changed=after_update_inst != inst, + instance=camel_dict_to_snake_dict(after_update_inst), + ) def delete_instance(module, client, instance_name): @@ -302,16 +358,29 @@ def start_or_stop_instance(module, client, instance_name, state): def main(): argument_spec = dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted', - 'rebooted']), - zone=dict(type='str'), - blueprint_id=dict(type='str'), - bundle_id=dict(type='str'), - key_pair_name=dict(type='str'), - user_data=dict(type='str', default=''), - wait=dict(type='bool', default=True), - wait_timeout=dict(default=300, type='int'), + name=dict(type="str", required=True), + state=dict( + type="str", default="present", choices=["present", "absent", "stopped", "running", "restarted", "rebooted"] + ), + zone=dict(type="str"), + blueprint_id=dict(type="str"), + bundle_id=dict(type="str"), + key_pair_name=dict(type="str"), + user_data=dict(type="str", default=""), + wait=dict(type="bool", default=True), + wait_timeout=dict(default=300, type="int"), + public_ports=dict( + type="list", + elements="dict", + options=dict( + from_port=dict(type="int", required=True), + to_port=dict(type="int", required=True), + protocol=dict(type="str", choices=["tcp", "all", "udp", "icmp"], required=True), + cidrs=dict(type="list", elements="str"), + ipv6_cidrs=dict(type="list", elements="str"), + ), + required_one_of=[("cidrs", "ipv6_cidrs")], + ), ) module = AnsibleAWSModule(argument_spec=argument_spec, @@ -323,7 +392,7 @@ def main(): state = module.params.get('state') if state == 'present': - create_instance(module, client, name) + create_or_update_instance(module, client, name) elif state == 'absent': delete_instance(module, client, name) elif state in ('running', 'stopped'): diff --git a/plugins/modules/lightsail_snapshot.py b/plugins/modules/lightsail_snapshot.py new file mode 100644 --- /dev/null +++ b/plugins/modules/lightsail_snapshot.py @@ -0,0 +1,205 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: lightsail_snapshot +version_added: "6.0.0" +short_description: Creates snapshots of AWS Lightsail instances +description: + - Creates snapshots of AWS Lightsail instances. +author: + - "Nuno Saavedra (@Nfsaavedra)" +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'absent'] + type: str + snapshot_name: + description: Name of the new instance snapshot. + required: true + type: str + instance_name: + description: + - Name of the instance to create the snapshot. + - Required when I(state=present). + type: str + wait: + description: + - Wait for the instance snapshot to be created before returning. + type: bool + default: true + wait_timeout: + description: + - How long before I(wait) gives up, in seconds. + default: 300 + type: int + +extends_documentation_fragment: +- amazon.aws.common.modules +- amazon.aws.region.modules +- amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: Create AWS Lightsail snapshot + lightsail_snapshot: + region: us-east-1 + snapshot_name: "my_instance_snapshot" + instance_name: "my_instance" + +- name: Delete AWS Lightsail snapshot + lightsail_snapshot: + region: us-east-1 + snapshot_name: "my_instance_snapshot" + state: absent +""" + +RETURN = r""" +changed: + description: if a snapshot has been modified/created + returned: always + type: bool + sample: + changed: true +snapshot: + description: instance snapshot data + type: dict + returned: always + sample: + arn: "arn:aws:lightsail:us-east-1:070807442430:InstanceSnapshot/54b0f785-7132-443d-9e32-95a6825636a4" + created_at: "2023-02-23T18:46:11.183000+00:00" + from_attached_disks: [] + from_blueprint_id: "amazon_linux_2" + from_bundle_id: "nano_2_0" + from_instance_arn: "arn:aws:lightsail:us-east-1:070807442430:Instance/5ca1e7ca-a994-4e19-bb82-deb9d79e9ca3" + from_instance_name: "my_instance" + is_from_auto_snapshot: false + location: + availability_zone: "all" + region_name: "us-east-1" + name: "my_instance_snapshot" + resource_type: "InstanceSnapshot" + size_in_gb: 20 + state: "available" + support_code: "351201681302/ami-06b48e5589f1e248b" + tags: [] +""" + +import time + +try: + import botocore +except ImportError: + # will be caught by AnsibleAWSModule + pass + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + +from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule + + +def find_instance_snapshot_info(module, client, instance_snapshot_name, fail_if_not_found=False): + try: + res = client.get_instance_snapshot(instanceSnapshotName=instance_snapshot_name) + except is_boto3_error_code("NotFoundException") as e: + if fail_if_not_found: + module.fail_json_aws(e) + return None + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + return res["instanceSnapshot"] + + +def wait_for_instance_snapshot(module, client, instance_snapshot_name): + wait_timeout = module.params.get("wait_timeout") + wait_max = time.time() + wait_timeout + snapshot = find_instance_snapshot_info(module, client, instance_snapshot_name) + + while wait_max > time.time(): + snapshot = find_instance_snapshot_info(module, client, instance_snapshot_name) + current_state = snapshot["state"] + if current_state != "pending": + break + time.sleep(5) + else: + module.fail_json(msg=f'Timed out waiting for instance snapshot "{instance_snapshot_name}" to be created.') + + return snapshot + + +def create_snapshot(module, client): + snapshot = find_instance_snapshot_info(module, client, module.params.get("snapshot_name")) + new_instance = snapshot is None + + if module.check_mode or not new_instance: + snapshot = snapshot if snapshot is not None else {} + module.exit_json( + changed=new_instance, + instance_snapshot=camel_dict_to_snake_dict(snapshot), + ) + + try: + snapshot = client.create_instance_snapshot( + instanceSnapshotName=module.params.get("snapshot_name"), + instanceName=module.params.get("instance_name"), + ) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + if module.params.get("wait"): + snapshot = wait_for_instance_snapshot(module, client, module.params.get("snapshot_name")) + + module.exit_json( + changed=new_instance, + instance_snapshot=camel_dict_to_snake_dict(snapshot), + ) + + +def delete_snapshot(module, client): + snapshot = find_instance_snapshot_info(module, client, module.params.get("snapshot_name")) + if module.check_mode or snapshot is None: + changed = not (snapshot is None) + instance = snapshot if changed else {} + module.exit_json(changed=changed, instance=instance) + + try: + client.delete_instance_snapshot(instanceSnapshotName=module.params.get("snapshot_name")) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + module.exit_json(changed=True, instance=camel_dict_to_snake_dict(snapshot)) + + +def main(): + argument_spec = dict( + state=dict(type="str", default="present", choices=["present", "absent"]), + snapshot_name=dict(type="str", required=True), + instance_name=dict(type="str"), + wait=dict(type="bool", default=True), + wait_timeout=dict(default=300, type="int"), + ) + required_if = [ + ["state", "present", ("instance_name",)], + ] + + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) + client = module.client("lightsail") + + state = module.params.get("state") + + if state == "present": + create_snapshot(module, client) + elif state == "absent": + delete_snapshot(module, client) + + +if __name__ == "__main__": + main()
diff --git a/tests/integration/targets/lightsail/tasks/main.yml b/tests/integration/targets/lightsail/tasks/main.yml --- a/tests/integration/targets/lightsail/tasks/main.yml +++ b/tests/integration/targets/lightsail/tasks/main.yml @@ -17,6 +17,12 @@ zone: "{{ zone }}" blueprint_id: amazon_linux_2 bundle_id: nano_2_0 + public_ports: + - from_port: 50 + to_port: 50 + protocol: "tcp" + cidrs: ["0.0.0.0/0"] + ipv6_cidrs: ["::/0"] wait: yes register: result @@ -25,6 +31,20 @@ - result.changed == True - "'instance' in result and result.instance.name == instance_name" - "result.instance.state.name == 'running'" + - "result.instance.networking.ports[0].from_port == 50" + - "{{ result.instance.networking.ports|length }} == 1" + + - name: Check if it does not delete public ports config when no value is provided + lightsail: + name: "{{ instance_name }}" + zone: "{{ zone }}" + blueprint_id: amazon_linux + bundle_id: nano_2_0 + register: result + + - assert: + that: + - result.changed == False - name: Make sure create is idempotent lightsail: @@ -32,6 +52,12 @@ zone: "{{ zone }}" blueprint_id: amazon_linux_2 bundle_id: nano_2_0 + public_ports: + - from_port: 50 + to_port: 50 + protocol: "tcp" + cidrs: ["0.0.0.0/0"] + ipv6_cidrs: ["::/0"] register: result - assert: diff --git a/tests/integration/targets/lightsail_snapshot/aliases b/tests/integration/targets/lightsail_snapshot/aliases new file mode 100644 --- /dev/null +++ b/tests/integration/targets/lightsail_snapshot/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/tests/integration/targets/lightsail_snapshot/defaults/main.yml b/tests/integration/targets/lightsail_snapshot/defaults/main.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/lightsail_snapshot/defaults/main.yml @@ -0,0 +1,3 @@ +instance_name: "{{ resource_prefix }}_instance" +snapshot_name: "{{ resource_prefix }}_instance_snapshot" +zone: "{{ aws_region }}a" diff --git a/tests/integration/targets/lightsail_snapshot/meta/main.yml b/tests/integration/targets/lightsail_snapshot/meta/main.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/lightsail_snapshot/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/tests/integration/targets/lightsail_snapshot/tasks/main.yml b/tests/integration/targets/lightsail_snapshot/tasks/main.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/lightsail_snapshot/tasks/main.yml @@ -0,0 +1,85 @@ +--- + +- module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key | default(omit) }}' + aws_secret_key: '{{ aws_secret_key | default(omit) }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region | default(omit) }}' + + block: + + # ==== Tests =================================================== + + - name: Create a new instance + lightsail: + name: "{{ instance_name }}" + zone: "{{ zone }}" + blueprint_id: amazon_linux_2 + bundle_id: nano_2_0 + wait: yes + + - name: Create a new snapshot + lightsail_snapshot: + snapshot_name: "{{ snapshot_name }}" + instance_name: "{{ instance_name }}" + region: "{{ aws_region }}" + wait: yes + register: result + + - assert: + that: + - result.changed == True + - "'instance_snapshot' in result and result.instance_snapshot.name == '{{ snapshot_name }}'" + - "result.instance_snapshot.state == 'available'" + + - name: Make sure instance snapshot creation is idempotent + lightsail_snapshot: + snapshot_name: "{{ snapshot_name }}" + instance_name: "{{ instance_name }}" + region: "{{ aws_region }}" + wait: yes + register: result + + - assert: + that: + - result.changed == False + + - name: Delete the instance snapshot + lightsail_snapshot: + snapshot_name: "{{ snapshot_name }}" + region: "{{ aws_region }}" + state: absent + register: result + + - assert: + that: + - result.changed == True + + - name: Make sure instance snapshot deletion is idempotent + lightsail_snapshot: + snapshot_name: "{{ snapshot_name }}" + region: "{{ aws_region }}" + state: absent + register: result + + - assert: + that: + - result.changed == False + + # ==== Cleanup ==================================================== + + always: + + - name: Cleanup - delete instance snapshot + lightsail_snapshot: + snapshot_name: "{{ snapshot_name }}" + region: "{{ aws_region }}" + state: absent + ignore_errors: yes + + - name: Cleanup - delete instance + lightsail: + name: "{{ instance_name }}" + state: absent + ignore_errors: yes
lightsail - Add firewall settings to lightsail module <!--- Verify first that your feature was not already discussed on GitHub --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Describe the new feature/improvement briefly below --> Extend AWS lightsail module to include additional parameters to configure instance network settings. Including firewall and Static IP binding. ##### ISSUE TYPE - Feature Idea ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> [community.aws.lightsail]( https://github.com/ansible-collections/community.aws/blob/main/docs/community.aws.lightsail_module.rst) ##### ADDITIONAL INFORMATION <!--- Describe how the feature would be used, why it is needed and what it would solve --> The additional parameters would allow the lightsail instance's firewall desired state to be set. Currently this requires post modification by either aws cli or gui. In addition binding to an existing lightsail static IP would be helpful. Suggested yaml format <!--- Paste example playbooks or commands between quotes below --> ```yaml - name: Create a new Lightsail instance, register the instance details lightsail: state: present name: new_instance region: ap-southeast-2 firewall: # application, protocol, port, from - ['SSH', 'TCP', 22, '10.10.10.10/24'] - ['HTTPS', 'TCP', 443, '10.10.10.10/24'] staticip: static_ip1 ``` <!--- HINT: You can also paste gist.github.com links for larger files -->
Files identified in the description: None If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> @bentaylr Would the following PR fix your issue? https://github.com/ansible-collections/community.aws/pull/259 Thanks @gravesm, that will definitely help with configuring the static IP. Though I'd still need to set firewall settings via aws cli, encouraging to see progress. Thanks @danielcotton @bentaylr It's worth noting that my PR will create static IPs in Lightsail, but you won't be able to attach an instance to them yet. I'm hoping to eventually add functionality for: - Attaching static IPs - Setting firewall rules - Setting metric alarms Unfortunately I'm still relatively new to writing Ansible modules, and free time is at a premium these days. Static IP support was added in #259
2023-02-23T20:11:30
ansible-collections/community.aws
1,732
ansible-collections__community.aws-1732
[ "1106" ]
6744af66921f241053738dbd3a236130389a2f04
diff --git a/plugins/modules/ecs_service.py b/plugins/modules/ecs_service.py --- a/plugins/modules/ecs_service.py +++ b/plugins/modules/ecs_service.py @@ -44,7 +44,7 @@ task_definition: description: - The task definition the service will run. - - This parameter is required when I(state=present). + - This parameter is required when I(state=present) unless I(force_new_deployment=True). - This parameter is ignored when updating a service with a C(CODE_DEPLOY) deployment controller in which case the task definition is managed by Code Pipeline and cannot be updated. required: false @@ -971,14 +971,15 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, - required_if=[('state', 'present', ['task_definition']), - ('launch_type', 'FARGATE', ['network_configuration'])], + required_if=[('launch_type', 'FARGATE', ['network_configuration'])], required_together=[['load_balancers', 'role']], mutually_exclusive=[['launch_type', 'capacity_provider_strategy']]) - if module.params['state'] == 'present' and module.params['scheduling_strategy'] == 'REPLICA': - if module.params['desired_count'] is None: + if module.params['state'] == 'present': + if module.params['scheduling_strategy'] == 'REPLICA' and module.params['desired_count'] is None: module.fail_json(msg='state is present, scheduling_strategy is REPLICA; missing desired_count') + if module.params['task_definition'] is None and not module.params['force_new_deployment']: + module.fail_json(msg='Either task_definition or force_new_deployment is required when status is present.') if len(module.params['capacity_provider_strategy']) > 6: module.fail_json(msg='AWS allows a maximum of six capacity providers in the strategy.') @@ -1075,6 +1076,9 @@ def main(): updatedLoadBalancers = loadBalancers if existing['deploymentController']['type'] == 'ECS' else [] + if task_definition is None and module.params['force_new_deployment']: + task_definition = existing['taskDefinition'] + # update required response = service_mgr.update_service(module.params['name'], module.params['cluster'],
diff --git a/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml b/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml --- a/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml +++ b/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml @@ -112,6 +112,32 @@ that: - ecs_service_again.changed +- name: force_new_deployment should work without providing a task_definition + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + state: present + force_new_deployment: yes + name: "{{ ecs_service_name }}" + cluster: "{{ ecs_cluster_name }}" + desired_count: 1 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + placement_strategy: "{{ ecs_service_placement_strategy }}" + placement_constraints: + - type: distinctInstance + health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + role: "{{ ecs_service_role_name }}" + register: ecs_service_notaskdef + +- name: check that ECS service changed again due to force_new_deployment with no task definition + assert: + that: + - ecs_service_notaskdef.changed + - name: attempt to use ECS network configuration on task definition without awsvpc network_mode (expected to fail) vars: ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
ecs_service - Support force_new_deployment without having to specify a task definition ### Summary Sometimes, it can be helpful to just restart all tasks from a service, without having to deal with all tasks definitions (since they do not need to change). For that purpose, the aws cli offers the following possibility : ``` aws ecs update-service --cluster cluster_name --service service_name --force-new-deployment ``` This is particularly interesting when we want some external files to be taken (but the task definition itself does not need any change). We can note that boto3 already support this option (at least since `v1.14.2` i'm working with) : ```python import boto3 client = boto3.client('ecs') client.update_service(cluster='cluster_name', service='service_name', forceNewDeployment=True) ``` However, the `ecs_service` module can not do that since when we specify `state: present`, we get the following error ``` fatal: [localhost]: FAILED! => {"changed": false, "msg": "state is present but all of the following are missing: task_definition"} ``` ### Issue Type Feature Idea ### Component Name ecs_service ### Additional Information An rough idea would be to add an additional parameter (like `preserve_tasks_definitions`) which would, when true, avoid the check about the presence of `task_definition`. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/ecs_service.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/ecs_service.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @Java1Guy @jillr @kaczynskid @markuman @s-hertel @tremble @zacblazic [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> I forgot to mention : It is indeed workaroundable but requires some more tasks to retrieve informations about loadBalancers & taskdefinition. ```ansible - name: Retrieve service details community.aws.ecs_service_info: cluster: "{{ cluster_name }}" service: "{{ service_name }}" details: true register: service_details - name: Reload ECS service community.aws.ecs_service: cluster: "{{ cluster_name }}" name: "{{ service_name }}" state: present force_new_deployment: yes load_balancers: "{{ service_details.services | map(attribute='loadBalancers') | first }}" task_definition: "{{ service_details.services | map(attribute='taskDefinition') | first }}" ``` The main idea of this feature request it to improve the user experience by not having to deal with data we do not care about at the moment. > We can note that boto3 already support this option (at least since `v1.14.2` i'm working with) : > > ```python > import boto3 > client = boto3.client('ecs') > client.update_service(cluster='cluster_name', service='service_name', forceNewDeployment=True) > ``` > > However, the `ecs_service` module can not do that since when we specify `state: present`, we get the following error > > ``` > fatal: [localhost]: FAILED! => {"changed": false, "msg": "state is present but all of the following are missing: task_definition"} > ``` So a new boolean parameter `force_new_deployment` must be introduced. When it got no default value, it can be mutually exclusive with the `state` parameter. @giom-l do you have some time to implement this new feature? Hi, Not this week but I can take a look at it the next one, sure. Just to be sure about what to do : you write about a new Boolean parameter, but there is already an existing one with this name : https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_service_module.html#parameter-force_new_deployment It is just the behaviour of this one that should be changed, right ? (NB : I haven't the whole code in front of me right now so I may have missed something) Ah sorry. I thought the entire parameter was missing. So yes, imo the internally logic must just be fixed, so that `force_new_deployment: yes` works also on existing service without the need to specify the taskdefinition.
2023-03-02T09:58:18
ansible-collections/community.aws
1,763
ansible-collections__community.aws-1763
[ "1762" ]
e3a89f6b59d7a193f93c1d47eca09d0dd1785255
diff --git a/plugins/modules/ec2_launch_template.py b/plugins/modules/ec2_launch_template.py --- a/plugins/modules/ec2_launch_template.py +++ b/plugins/modules/ec2_launch_template.py @@ -38,6 +38,12 @@ - Which version should be the default when users spin up new instances based on this template? By default, the latest version will be made the default. type: str default: latest + version_description: + version_added: 5.5.0 + description: + - The description of a launch template version. + default: "" + type: str state: description: - Whether the launch template should exist or not. @@ -576,8 +582,10 @@ def create_or_update(module, template_options): template, template_versions = existing_templates(module) out['changed'] = True elif template and template_versions: - most_recent = sorted(template_versions, key=lambda x: x['VersionNumber'])[-1] - if lt_data == most_recent['LaunchTemplateData']: + most_recent = sorted(template_versions, key=lambda x: x["VersionNumber"])[-1] + if lt_data == most_recent["LaunchTemplateData"] and module.params["version_description"] == most_recent.get( + "VersionDescription", "" + ): out['changed'] = False return out try: @@ -586,6 +594,7 @@ def create_or_update(module, template_options): LaunchTemplateId=template['LaunchTemplateId'], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, + VersionDescription=str(module.params["version_description"]), aws_retry=True, ) elif module.params.get('source_version') == 'latest': @@ -593,7 +602,8 @@ def create_or_update(module, template_options): LaunchTemplateId=template['LaunchTemplateId'], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, - SourceVersion=str(most_recent['VersionNumber']), + SourceVersion=str(most_recent["VersionNumber"]), + VersionDescription=str(module.params["version_description"]), aws_retry=True, ) else: @@ -609,7 +619,8 @@ def create_or_update(module, template_options): LaunchTemplateId=template['LaunchTemplateId'], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, - SourceVersion=str(source_version['VersionNumber']), + SourceVersion=str(source_version["VersionNumber"]), + VersionDescription=str(module.params["version_description"]), aws_retry=True, ) @@ -782,11 +793,12 @@ def main(): ) arg_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), - template_name=dict(aliases=['name']), - template_id=dict(aliases=['id']), - default_version=dict(default='latest'), - source_version=dict(default='latest') + state=dict(choices=["present", "absent"], default="present"), + template_name=dict(aliases=["name"]), + template_id=dict(aliases=["id"]), + default_version=dict(default="latest"), + source_version=dict(default="latest"), + version_description=dict(default=""), ) arg_spec.update(template_options)
diff --git a/tests/integration/targets/ec2_launch_template/tasks/versions.yml b/tests/integration/targets/ec2_launch_template/tasks/versions.yml --- a/tests/integration/targets/ec2_launch_template/tasks/versions.yml +++ b/tests/integration/targets/ec2_launch_template/tasks/versions.yml @@ -69,6 +69,21 @@ - lt.latest_version == 4 - lt.latest_template.launch_template_data.instance_type == "c4.large" + - name: update simple instance template + ec2_launch_template: + name: "{{ resource_prefix }}-simple" + version_description: "Fix something." + register: lt + + - name: instance with cpu_options created with the right options + assert: + that: + - lt is success + - lt is changed + - lt.default_version == 5 + - lt.latest_version == 5 + - lt.latest_template.version_description == "Fix something." + always: - name: delete the template ec2_launch_template:
Add version_description to ec2_launch_template ### Summary I found that `ec2_launch_template` does not expose `VersionDescription` from `create_launch_template_version`. - [create_launch_template_version](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2/client/create_launch_template_version.html) ### Issue Type Feature Idea ### Component Name ec2_launch_template ### Additional Information <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
2023-03-29T18:44:40
ansible-collections/community.aws
1,764
ansible-collections__community.aws-1764
[ "1761" ]
dc6b4585c91898a59394b21400654ab89a2e5bd8
diff --git a/plugins/modules/msk_cluster.py b/plugins/modules/msk_cluster.py --- a/plugins/modules/msk_cluster.py +++ b/plugins/modules/msk_cluster.py @@ -122,7 +122,15 @@ sasl_scram: description: SASL/SCRAM authentication is enabled or not. type: bool - default: False + sasl_iam: + version_added: 5.5.0 + description: IAM authentication is enabled or not. + type: bool + unauthenticated: + version_added: 5.5.0 + description: Option to explicitly turn on or off authentication + type: bool + default: True enhanced_monitoring: description: Specifies the level of monitoring for the MSK cluster. choices: @@ -382,13 +390,21 @@ def prepare_create_options(module): if module.params["authentication"]: c_params["ClientAuthentication"] = {} - if module.params["authentication"].get("sasl_scram"): - c_params["ClientAuthentication"]["Sasl"] = { - "Scram": module.params["authentication"]["sasl_scram"] - } + if module.params["authentication"].get("sasl_scram") or module.params["authentication"].get("sasl_iam"): + sasl = {} + if module.params["authentication"].get("sasl_scram"): + sasl["Scram"] = {"Enabled": True} + if module.params["authentication"].get("sasl_iam"): + sasl["Iam"] = {"Enabled": True} + c_params["ClientAuthentication"]["Sasl"] = sasl if module.params["authentication"].get("tls_ca_arn"): c_params["ClientAuthentication"]["Tls"] = { - "CertificateAuthorityArnList": module.params["authentication"]["tls_ca_arn"] + "CertificateAuthorityArnList": module.params["authentication"]["tls_ca_arn"], + "Enabled": True, + } + if module.params["authentication"].get("unauthenticated"): + c_params["ClientAuthentication"] = { + "Unauthenticated": {"Enabled": True}, } c_params.update(prepare_enhanced_monitoring_options(module)) @@ -713,7 +729,9 @@ def main(): type="dict", options=dict( tls_ca_arn=dict(type="list", elements="str", required=False), - sasl_scram=dict(type="bool", default=False), + sasl_scram=dict(type="bool", required=False), + sasl_iam=dict(type="bool", required=False), + unauthenticated=dict(type="bool", default=True, required=False), ), ), enhanced_monitoring=dict(
diff --git a/tests/integration/targets/msk_cluster-auth/aliases b/tests/integration/targets/msk_cluster-auth/aliases new file mode 100644 --- /dev/null +++ b/tests/integration/targets/msk_cluster-auth/aliases @@ -0,0 +1,4 @@ +cloud/aws +time=46m + +msk_cluster diff --git a/tests/integration/targets/msk_cluster-auth/defaults/main.yml b/tests/integration/targets/msk_cluster-auth/defaults/main.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/msk_cluster-auth/defaults/main.yml @@ -0,0 +1,19 @@ +--- +vpc_name: "{{ resource_prefix }}-mskc-a" +vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16' +vpc_subnets: + - '10.{{ 256 | random(seed=resource_prefix) }}.100.0/24' + - '10.{{ 256 | random(seed=resource_prefix) }}.101.0/24' +vpc_subnet_name_prefix: "{{ resource_prefix }}" + +msk_config_name: "{{ resource_prefix }}-msk-cluster-auth" +msk_cluster_name: "{{ tiny_prefix }}-msk-cluster-auth" +msk_version: 2.8.1 +msk_broker_nodes: 2 + +tags_create: + key1: "value1" + key2: "value2" +tags_update: + key2: "value2" + key3: "value3" diff --git a/tests/integration/targets/msk_cluster-auth/meta/main.yml b/tests/integration/targets/msk_cluster-auth/meta/main.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/msk_cluster-auth/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/tests/integration/targets/msk_cluster-auth/tasks/main.yml b/tests/integration/targets/msk_cluster-auth/tasks/main.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/msk_cluster-auth/tasks/main.yml @@ -0,0 +1,91 @@ +--- +- name: aws_msk_cluster integration tests + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + collections: + - amazon.aws + block: + - name: collect availability zone info + aws_az_info: + register: az_info + + - name: assert there are at least two zones + assert: + that: az_info.availability_zones | length >= 2 + + - name: create vpc + ec2_vpc_net: + state: present + cidr_block: '{{ vpc_cidr }}' + name: '{{ vpc_name }}' + register: vpc + + - name: create subnets + ec2_vpc_subnet: + state: present + cidr: '{{ item }}' + az: '{{ az_info.availability_zones[index].zone_name }}' + vpc_id: '{{ vpc.vpc.id }}' + tags: + Name: '{{ vpc_subnet_name_prefix }}-subnet-{{ index }}' + loop: "{{ vpc_subnets }}" + loop_control: + index_var: index + register: subnets + + - set_fact: + subnet_ids: '{{ subnets | community.general.json_query("results[].subnet.id") | list }}' + + # ============================================================ + - name: create msk configuration + aws_msk_config: + name: "{{ msk_config_name }}" + state: "present" + kafka_versions: + - "{{ msk_version }}" + register: msk_config + + - name: create test with sasl_iam + include_tasks: test_create_auth.yml + + always: + + - name: delete msk cluster + aws_msk_cluster: + name: "{{ msk_cluster_name }}" + state: absent + wait: true + ignore_errors: yes + + - name: remove msk configuration + aws_msk_config: + name: "{{ msk_config_name }}" + state: absent + ignore_errors: yes + + - name: remove subnets + ec2_vpc_subnet: + state: absent + cidr: '{{ item }}' + vpc_id: '{{ vpc.vpc.id }}' + loop: "{{ vpc_subnets }}" + ignore_errors: yes + register: removed_subnets + until: removed_subnets is succeeded + retries: 5 + delay: 5 + + - name: remove the vpc + ec2_vpc_net: + state: absent + cidr_block: '{{ vpc_cidr }}' + name: '{{ vpc_name }}' + ignore_errors: yes + register: removed_vpc + until: removed_vpc is success + retries: 5 + delay: 5 diff --git a/tests/integration/targets/msk_cluster-auth/tasks/test_create_auth.yml b/tests/integration/targets/msk_cluster-auth/tasks/test_create_auth.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/msk_cluster-auth/tasks/test_create_auth.yml @@ -0,0 +1,101 @@ +--- +- name: create a msk cluster with authentication flipped from default (check mode) + aws_msk_cluster: + name: "{{ msk_cluster_name }}" + state: "present" + version: "{{ msk_version }}" + nodes: "{{ msk_broker_nodes }}" + ebs_volume_size: 10 + authentication: + sasl_iam: true + sasl_scram: true + unauthenticated: false + subnets: "{{ subnet_ids }}" + wait: true + tags: "{{ tags_create }}" + configuration_arn: "{{ msk_config.arn }}" + configuration_revision: "{{ msk_config.revision }}" + check_mode: yes + register: msk_cluster + +- name: assert that the msk cluster be created + assert: + that: + - msk_cluster is changed + +- name: create a msk cluster with authentication flipped from default + aws_msk_cluster: + name: "{{ msk_cluster_name }}" + state: "present" + version: "{{ msk_version }}" + nodes: "{{ msk_broker_nodes }}" + ebs_volume_size: 10 + authentication: + sasl_iam: true + sasl_scram: true + unauthenticated: false + subnets: "{{ subnet_ids }}" + wait: true + tags: "{{ tags_create }}" + configuration_arn: "{{ msk_config.arn }}" + configuration_revision: "{{ msk_config.revision }}" + register: msk_cluster + +- name: assert that the msk cluster is created + assert: + that: + - msk_cluster is changed + +- name: validate return values + assert: + that: + - "'cluster_info' in msk_cluster" + - "'bootstrap_broker_string' in msk_cluster" + - "'key1' in msk_cluster.cluster_info.tags" + - "msk_cluster.cluster_info.tags.key1 == 'value1'" + - "msk_cluster.cluster_info.cluster_name == msk_cluster_name" + - "msk_cluster.cluster_info.number_of_broker_nodes == msk_broker_nodes" + - "msk_cluster.cluster_info.broker_node_group_info.instance_type == 'kafka.t3.small'" + - "msk_cluster.cluster_info.broker_node_group_info.storage_info.ebs_storage_info.volume_size == 10" + - "msk_cluster.cluster_info.client_authentication.sasl.iam.enabled == true" + - "msk_cluster.cluster_info.client_authentication.sasl.scram.enabled == true" + # Not always returned by API + # - "msk_cluster.cluster_info.client_authentication.unauthenticated.enabled == false" + - "msk_cluster.cluster_info.open_monitoring.prometheus.jmx_exporter.enabled_in_broker == false" + - "msk_cluster.cluster_info.cluster_arn.startswith('arn:aws:kafka:{{ aws_region }}:')" + +- name: create a msk cluster with authentication flipped from default (idempotency) + aws_msk_cluster: + name: "{{ msk_cluster_name }}" + state: "present" + version: "{{ msk_version }}" + nodes: "{{ msk_broker_nodes }}" + ebs_volume_size: 10 + authentication: + sasl_iam: true + sasl_scram: true + unauthenticated: false + subnets: "{{ subnet_ids }}" + wait: true + tags: "{{ tags_create }}" + configuration_arn: "{{ msk_config.arn }}" + configuration_revision: "{{ msk_config.revision }}" + register: msk_cluster + +- name: assert that the msk cluster wasn't changed + assert: + that: + - msk_cluster is not changed + +### Keep delete simple as we're not checking delete here +- name: delete msk cluster + aws_msk_cluster: + name: "{{ msk_cluster_name }}" + state: "absent" + wait: true + register: msk_cluster + +- name: assert that the msk cluster is changed + assert: + that: + - msk_cluster is changed diff --git a/tests/integration/targets/msk_cluster/defaults/main.yml b/tests/integration/targets/msk_cluster/defaults/main.yml --- a/tests/integration/targets/msk_cluster/defaults/main.yml +++ b/tests/integration/targets/msk_cluster/defaults/main.yml @@ -8,7 +8,7 @@ vpc_subnet_name_prefix: "{{ resource_prefix }}" msk_config_name: "{{ resource_prefix }}-msk-cluster" msk_cluster_name: "{{ tiny_prefix }}-msk-cluster" -msk_version: 2.6.0 +msk_version: 2.8.1 msk_broker_nodes: 2 tags_create:
msk_cluster - Cannot create a cluster w/ authentication sasl_scram ### Summary When I do ```- name: Create MSK cluster community.aws.msk_cluster: name: "{{ item.name }}" .................................................................... authentication: sasl_scram: true .................................................................... ``` i get an error: ``` "msg": "Failed to create kafka cluster: Parameter validation failed:\nInvalid type for parameter ClientAuthentication.Sasl.Scram, value: True, type: <class 'bool'>, valid types: <class 'dict'>" ``` Full traceback: ``` The full traceback is: Traceback (most recent call last): File "/tmp/ansible_community.aws.msk_cluster_payload_roovgjj3/ansible_community.aws.msk_cluster_payload.zip/ansible_collections/community/aws/plugins/modules/msk_cluster.py", line 484, in create_or_update_cluster File "/tmp/ansible_community.aws.msk_cluster_payload_roovgjj3/ansible_community.aws.msk_cluster_payload.zip/ansible_collections/amazon/aws/plugins/module_utils/modules.py", line 354, in deciding_wrapper return retrying_wrapper(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/tmp/ansible_community.aws.msk_cluster_payload_roovgjj3/ansible_community.aws.msk_cluster_payload.zip/ansible_collections/amazon/aws/plugins/module_utils/cloud.py", line 119, in _retry_wrapper return _retry_func( ^^^^^^^^^^^^ File "/tmp/ansible_community.aws.msk_cluster_payload_roovgjj3/ansible_community.aws.msk_cluster_payload.zip/ansible_collections/amazon/aws/plugins/module_utils/cloud.py", line 69, in _retry_func return func() ^^^^^^ File "/home/.../.local/lib/python3.11/site-packages/botocore/client.py", line 530, in _api_call return self._make_api_call(operation_name, kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/.../.local/lib/python3.11/site-packages/botocore/client.py", line 919, in _make_api_call request_dict = self._convert_to_request_dict( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/.../.local/lib/python3.11/site-packages/botocore/client.py", line 990, in _convert_to_request_dict request_dict = self._serializer.serialize_to_request( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/.../.local/lib/python3.11/site-packages/botocore/validate.py", line 381, in serialize_to_request raise ParamValidationError(report=report.generate_report()) botocore.exceptions.ParamValidationError: Parameter validation failed: Invalid type for parameter ClientAuthentication.Sasl.Scram, value: True, type: <class 'bool'>, valid types: <class 'dict'> failed: [localhost] (item={'name': '...-msk-dev-2', 'configuration': '...-msk-dev-conf', 'version': '2.8.1', 'nodes': 3, 'ebs_volume_gb': 256, 'enhanced_monitoring': 'PER_TOPIC_PER_BROKER', 'instance_type': 'kafka.t3.small', 'open_monitoring': {'jmx_exporter': False, 'node_exporter': True}, 'subnets': ['subnet-...', 'subnet-...', 'subnet-...'], 'security_groups': ['sg-...'], 'tags': {'Payer': '...'}}) => { "ansible_loop_var": "item", "boto3_version": "1.26.72", "botocore_version": "1.29.72", "changed": false, "invocation": { "module_args": { "access_key": null, "authentication": { "sasl_scram": true, "tls_ca_arn": null }, ``` I can create the cluster w/o `authentication` but it creates an `Unauthenticated` cluster. ### Issue Type Bug Report ### Component Name msk_cluster ### Ansible Version ansible [core 2.14.3] config file = .../infrastructure/ansible.cfg configured module search path = ['/home/.../.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python3.11/site-packages/ansible ansible collection location = /home/.../.ansible/collections:/usr/share/ansible/collections executable location = /usr/bin/ansible python version = 3.11.2 (main, Feb 8 2023, 00:00:00) [GCC 12.2.1 20221121 (Red Hat 12.2.1-4)] (/usr/bin/python3) jinja version = 3.0.3 libyaml = True ``` ### Collection Versions ``` $ ansible-galaxy collection list [DEPRECATION WARNING]: DEFAULT_GATHER_SUBSET option, the module_defaults keyword is a more generic version and can apply to all calls to the M(ansible.builtin.gather_facts) or M(ansible.builtin.setup) actions, use module_defaults instead. This feature will be removed from ansible-core in version 2.18. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg. # /home/.../.ansible/collections/ansible_collections Collection Version ----------------- ------- amazon.aws 5.4.0 ansible.posix 1.5.1 community.aws 5.4.0 community.general 6.5.0 community.mongodb 1.5.1 community.mysql 3.6.0 # /usr/lib/python3.11/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 5.2.0 ansible.netcommon 4.1.0 ansible.posix 1.5.1 ansible.utils 2.9.0 ansible.windows 1.13.0 arista.eos 6.0.0 awx.awx 21.12.0 azure.azcollection 1.14.0 check_point.mgmt 4.0.0 chocolatey.chocolatey 1.4.0 cisco.aci 2.4.0 cisco.asa 4.0.0 cisco.dnac 6.6.3 cisco.intersight 1.0.23 cisco.ios 4.3.1 cisco.iosxr 4.1.0 cisco.ise 2.5.12 cisco.meraki 2.15.1 cisco.mso 2.2.1 cisco.nso 1.0.3 cisco.nxos 4.1.0 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.4 community.aws 5.2.0 community.azure 2.0.0 community.ciscosmb 1.0.5 community.crypto 2.11.0 community.digitalocean 1.23.0 community.dns 2.5.1 community.docker 3.4.2 community.fortios 1.0.0 community.general 6.4.0 community.google 1.0.0 community.grafana 1.5.4 community.hashi_vault 4.1.0 community.hrobot 1.7.0 community.libvirt 1.2.0 community.mongodb 1.5.1 community.mysql 3.6.0 community.network 5.0.0 community.okd 2.3.0 community.postgresql 2.3.2 community.proxysql 1.5.1 community.rabbitmq 1.2.3 community.routeros 2.7.0 community.sap 1.0.0 community.sap_libs 1.4.0 community.skydive 1.0.0 community.sops 1.6.1 community.vmware 3.4.0 community.windows 1.12.0 community.zabbix 1.9.2 containers.podman 1.10.1 cyberark.conjur 1.2.0 cyberark.pas 1.0.17 dellemc.enterprise_sonic 2.0.0 dellemc.openmanage 6.3.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 dellemc.powerflex 1.5.0 dellemc.unity 1.5.0 f5networks.f5_modules 1.22.1 fortinet.fortimanager 2.1.7 fortinet.fortios 2.2.2 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.1.2 grafana.grafana 1.1.1 hetzner.hcloud 1.10.0 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.11.0 infinidat.infinibox 1.3.12 infoblox.nios_modules 1.4.1 inspur.ispim 1.3.0 inspur.sm 2.3.0 junipernetworks.junos 4.1.0 kubernetes.core 2.4.0 lowlydba.sqlserver 1.3.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.22.0 netapp.elementsw 21.7.0 netapp.ontap 22.3.0 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.4.0 netbox.netbox 3.11.0 ngine_io.cloudstack 2.3.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.3 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.4.1 purestorage.flasharray 1.17.0 purestorage.flashblade 1.10.0 purestorage.fusion 1.3.0 sensu.sensu_go 1.13.2 splunk.es 2.1.0 t_systems_mms.icinga_director 1.32.0 theforeman.foreman 3.9.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.7.0 vyos.vyos 4.0.0 wti.remote 1.0.4 ``` ### AWS SDK versions ``` $ pip show boto boto3 botocore Name: boto Version: 2.49.0 Summary: Amazon Web Services Library Home-page: https://github.com/boto/boto/ Author: Mitch Garnaat Author-email: [email protected] License: MIT Location: /usr/local/lib/python3.11/site-packages Requires: Required-by: --- Name: boto3 Version: 1.26.72 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /home/.../.local/lib/python3.11/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.29.72 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /home/.../.local/lib/python3.11/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: awscli, boto3, s3transfer ``` ### Configuration ```$ ansible-config dump --only-changed [DEPRECATION WARNING]: DEFAULT_GATHER_SUBSET option, the module_defaults keyword is a more generic version and can apply to all calls to the M(ansible.builtin.gather_facts) or M(ansible.builtin.setup) actions, use module_defaults instead. This feature will be removed from ansible-core in version 2.18. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg. CACHE_PLUGIN(/home/.../infrastructure/ansible.cfg) = jsonfile CACHE_PLUGIN_CONNECTION(/home/.../infrastructure/ansible.cfg) = ~/.ansible/cache CACHE_PLUGIN_TIMEOUT(/home/.../infrastructure/ansible.cfg) = 3600 CALLBACKS_ENABLED(/home/.../infrastructure/ansible.cfg) = ['timer', 'profile_tasks', 'profile_roles'] CONFIG_FILE() = /home/.../infrastructure/ansible.cfg DEFAULT_ASK_PASS(/home/.../infrastructure/ansible.cfg) = False DEFAULT_EXECUTABLE(/home/.../infrastructure/ansible.cfg) = /bin/bash DEFAULT_FORCE_HANDLERS(/home/.../infrastructure/ansible.cfg) = True DEFAULT_FORKS(/home/.../infrastructure/ansible.cfg) = 15 DEFAULT_GATHERING(/home/.../infrastructure/ansible.cfg) = smart DEFAULT_GATHER_SUBSET(/home/.../infrastructure/ansible.cfg) = ['all'] DEFAULT_HOST_LIST(/home/.../infrastructure/ansible.cfg) = ['/home/.../infrastructure/envs'] DEFAULT_LOG_PATH(/home/.../infrastructure/ansible.cfg) = /home/.../.ansible/ansible.log DEFAULT_MANAGED_STR(/home/.../infrastructure/ansible.cfg) = Ansible managed! DON'T CHANGE THIS FILE BY HAND! You were warned! DEFAULT_ROLES_PATH(/home/.../infrastructure/ansible.cfg) = ['/home/.../infrastructure/roles'] DEFAULT_TIMEOUT(/home/.../infrastructure/ansible.cfg) = 30 DEPRECATION_WARNINGS(/home/.../infrastructure/ansible.cfg) = True HOST_KEY_CHECKING(/home/.../infrastructure/ansible.cfg) = False INVENTORY_ENABLED(/home/.../infrastructure/ansible.cfg) = ['yaml', 'aws_ec2', 'ini'] RETRY_FILES_ENABLED(/home/.../infrastructure/ansible.cfg) = False SHOW_CUSTOM_STATS(/home/.../infrastructure/ansible.cfg) = True ``` ### OS / Environment Fedora release 37 (Thirty Seven) ### Steps to Reproduce ``` - name: Create MSK cluster community.aws.msk_cluster: name: "{{ item.name }}" .................................................................... authentication: sasl_scram: true ``` ### Expected Results Create a MSK cluster w/ SASL/SCRAM authentication. ### Actual Results ``` "msg": "Failed to create kafka cluster: Parameter validation failed:\nInvalid type for parameter ClientAuthentication.Sasl.Scram, value: True, type: <class 'bool'>, valid types: <class 'dict'>" ``` As I'm working on this I can jump in at any time to further debug this. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
@gabriel-preda-adswizz thanks for your bug report. https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/msk_cluster.py#L383-L392 the generated object is wrong. ref https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kafka/client/create_cluster.html ```diff diff --git a/plugins/modules/msk_cluster.py b/plugins/modules/msk_cluster.py index 65c9edea..1b045c05 100644 --- a/plugins/modules/msk_cluster.py +++ b/plugins/modules/msk_cluster.py @@ -384,11 +384,13 @@ def prepare_create_options(module): c_params["ClientAuthentication"] = {} if module.params["authentication"].get("sasl_scram"): c_params["ClientAuthentication"]["Sasl"] = { - "Scram": module.params["authentication"]["sasl_scram"] + "Scram": { + "Enable": module.params["authentication"]["sasl_scram"] } if module.params["authentication"].get("tls_ca_arn"): c_params["ClientAuthentication"]["Tls"] = { - "CertificateAuthorityArnList": module.params["authentication"]["tls_ca_arn"] + "CertificateAuthorityArnList": module.params["authentication"]["tls_ca_arn"], + 'Enabled': True } c_params.update(prepare_enhanced_monitoring_options(module)) ``` this might work. @gabriel-preda-adswizz do you have some time for work on a fix to contribute or can you test if this patch fixes the issue for you? Thanx @markuman. The patch is working for me. However aside from the above I found this in output: ``` "invocation": { "module_args": { "access_key": null, "authentication": { "sasl_scram": true, "tls_ca_arn": null } ``` I didn't set anything about `tls_ca_arn`, I don't understand why I have that line in there. I only did: ``` authentication: sasl_scram: true ``` Thanx for the fast turnout (I can work w/ the patched version for some time). > I didn't set anything about `tls_ca_arn`, I don't understand why I have that line in there. That's because the module treated `tls_ca_arn` as `default(false)` if not provided. See: https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/msk_cluster.py#L715 So this is also a documentation bug, because the default value is missing there. * https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/msk_cluster.py#L118-L121 * https://docs.ansible.com/ansible/devel//collections/community/aws/msk_cluster_module.html#parameter-authentication/tls_ca_arn Ah not. It's wrong. There is no default. Basically the empty key must/can be popped out. So your patch is ok for me. What next? It's your contribution :1st_place_medal: @gabriel-preda-adswizz If you have time for it, feel free to make a pull request with * the patch/fix * a changelog fragment * and expand the integration test that covers the bug Hi @markuman, In preparing the pull request I've extended the fix for IAM authentication and also the posibility to disable unauthenticated clients that were not previously covered. Now I'm puzzled about the tests. There are a myriad of combinations :) and I'm still thinking in how to reorganize or only add those. > Now I'm puzzled about the tests. > There are a myriad of combinations :) and I'm still thinking in how to reorganize or only add those. Mostly it helps to create a new task file that covers just that case/bug. Example: https://github.com/ansible-collections/community.mysql/pull/503/files New task file for the scenario `revoke_only_grant.yml` is included in the `main.yml` of the integration test.
2023-03-30T14:33:35
ansible-collections/community.aws
1,767
ansible-collections__community.aws-1767
[ "1736" ]
3ce57189b348dbebad51a9cd66954cf5b0d294f5
diff --git a/plugins/modules/elb_target_group.py b/plugins/modules/elb_target_group.py --- a/plugins/modules/elb_target_group.py +++ b/plugins/modules/elb_target_group.py @@ -729,9 +729,12 @@ def create_or_update_target_group(connection, module): if add_instances: instances_to_add = [] - for target in params['Targets']: - if target['Id'] in add_instances: - instances_to_add.append({'Id': target['Id'], 'Port': target['Port']}) + for target in params["Targets"]: + if target["Id"] in add_instances: + tmp_item = {"Id": target["Id"], "Port": target["Port"]} + if target.get("AvailabilityZone"): + tmp_item["AvailabilityZone"] = target["AvailabilityZone"] + instances_to_add.append(tmp_item) changed = True try:
diff --git a/tests/integration/targets/elb_target/tasks/ec2_target.yml b/tests/integration/targets/elb_target/tasks/ec2_target.yml --- a/tests/integration/targets/elb_target/tasks/ec2_target.yml +++ b/tests/integration/targets/elb_target/tasks/ec2_target.yml @@ -168,14 +168,8 @@ tags: Name: "{{ resource_prefix }}-inst" user_data: | - #cloud-config - package_upgrade: true - package_update: true - packages: - - httpd - runcmd: - - "service httpd start" - - echo "HELLO ANSIBLE" > /var/www/html/index.html + #!/bin/bash + sudo nohup python3 -m http.server 80 & register: ec2 - set_fact: @@ -480,6 +474,46 @@ - not result.changed - not result.target_health_descriptions + - name: create ip target group + elb_target_group: + name: "{{ tg_name }}-ip" + health_check_port: 443 + protocol: tcp + port: 443 + vpc_id: "{{ vpc.vpc.id }}" + state: present + target_type: ip + register: result + + - name: ip target group must be created + assert: + that: + - result.changed + - result.target_type == 'ip' + + - name: "mobify ip target group with AvailabilityZone: all" + elb_target_group: + name: "{{ tg_name }}-ip" + health_check_port: 443 + protocol: tcp + port: 443 + vpc_id: "{{ vpc.vpc.id }}" + state: present + target_type: ip + wait: false + modify_targets: true + targets: + - Id: 192.168.178.32 + Port: 443 + AvailabilityZone: all + register: result + + - name: ip target group must be modified + assert: + that: + - result.changed + - result.load_balancing_cross_zone_enabled == 'use_load_balancer_configuration' + # ============================================================ always: @@ -533,6 +567,7 @@ - "{{ tg_used_name }}" - "{{ tg_tcpudp_name }}" - "{{ tg_name }}-nlb" + - "{{ tg_name }}-ip" ignore_errors: true - name: remove routing rules
community.aws.elb_target_group ignoring targets.AvailabilityZone 'all' param ### Summary Having ALB in vpc-1 and targets in vpc-2 I am getting the error (despite that the targets.AvailabilityZone is set to 'all'): "Couldn't register targets: An error occurred (ValidationError) when calling the RegisterTargets operation: You must specify a valid Availability Zone or select 'all' for all enabled Availability zones, since the IP address '192.168.112.221' is outside the VPC" The ALB has Cross-zone load balancing enabled (On). Despite that the module is failing, the TG is being created with no targets. If register the target manually (from AWS console) the module will report OK: ok: [lvt00] => { "changed": false, ... } ### Issue Type Bug Report ### Component Name elb_target_group ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.13.4] config file = None configured module search path = ['/Users/octav/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /opt/homebrew/lib/python3.10/site-packages/ansible ansible collection location = /Users/octav/.ansible/collections:/usr/share/ansible/collections executable location = /opt/homebrew/bin/ansible python version = 3.10.8 (main, Oct 21 2022, 22:22:30) [Clang 14.0.0 (clang-1400.0.29.202)] jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /Users/octav/ansible/collections/ansible_collections Collection Version -------------------- ------- amazon.aws 5.2.0 ansible.posix 1.5.1 ansible.windows 1.13.0 community.aws 5.2.0 community.docker 3.4.1 community.postgresql 2.3.2 ``` ### AWS SDK versions ```console (paste below) $ pip3 show boto boto3 botocore WARNING: Package(s) not found: boto Name: boto3 Version: 1.24.69 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /opt/homebrew/lib/python3.10/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.27.69 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /opt/homebrew/lib/python3.10/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed CALLBACKS_ENABLED(/Users/octav/ansible/workdir/ansible.cfg) = ['yaml', 'profile_roles'] COLLECTIONS_PATHS(/Users/octav/ansible/workdir/ansible.cfg) = ['/Users/octav/ansible/collections'] DEFAULT_HOST_LIST(/Users/octav/ansible/workdir/ansible.cfg) = ['/Users/octav/ansible/workdir/inventory'] DEFAULT_LOAD_CALLBACK_PLUGINS(/Users/octav/ansible/workdir/ansible.cfg) = True DEFAULT_ROLES_PATH(/Users/octav/ansible/workdir/ansible.cfg) = ['/Users/octav/ansible/roles'] HOST_KEY_CHECKING(/Users/octav/ansible/workdir/ansible.cfg) = False INTERPRETER_PYTHON(/Users/octav/ansible/workdir/ansible.cfg) = auto_silent INVENTORY_ENABLED(/Users/octav/ansible/workdir/ansible.cfg) = ['ini', 'host_list', 'script', 'auto', 'yaml', 'toml', 'tower'] RETRY_FILES_ENABLED(/Users/octav/ansible/workdir/ansible.cfg) = False ``` ### OS / Environment ProductName: macOS ProductVersion: 13.1 BuildVersion: 22C65 ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) - name: "Create TG" community.aws.elb_target_group: access_key: "{{ aws_access_key_id }}" secret_key: "{{ aws_secret_access_key }}" name: "name" protocol: http port: 81 vpc_id: 'vpc-069XXXXXXX2c' # VPC-1 region: eu-west-1 health_check_protocol: http health_check_path: / health_check_port: 81 successful_response_codes: "200-299" health_check_interval: 15 health_check_timeout: 5 healthy_threshold_count: 4 unhealthy_threshold_count: 3 state: present target_type: ip targets: - Id: "{{ ansible_facts.default_ipv4.address }}" Port: 81 AvailabilityZone: all tags: "{{ default_tags | combine({'Name': 'name'}) }}" ``` ### Expected Results ```console (paste below) changed: [lvt00] => { "changed": true, "deregistration_delay_timeout_seconds": "300", "health_check_enabled": true, "health_check_interval_seconds": 15, "health_check_path": "/", "health_check_port": "81", "health_check_protocol": "HTTP", "health_check_timeout_seconds": 5, "healthy_threshold_count": 4, "invocation": { "module_args": { "access_key": "XXXXXXXXXXXXXXXXXX", "aws_ca_bundle": null, "aws_config": null, "debug_botocore_endpoint_logs": false, "deregistration_connection_termination": false, "deregistration_delay_timeout": null, "endpoint_url": null, "health_check_interval": 15, "health_check_path": "/", "health_check_port": "81", "health_check_protocol": "http", "health_check_timeout": 5, "healthy_threshold_count": 4, "load_balancing_algorithm_type": null, "modify_targets": true, "name": "name", "port": 81, "preserve_client_ip_enabled": null, "profile": null, "protocol": "http", "protocol_version": null, "proxy_protocol_v2_enabled": null, "purge_tags": true, "region": "eu-west-1", "secret_key": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", "session_token": null, "state": "present", "stickiness_app_cookie_duration": null, "stickiness_app_cookie_name": null, "stickiness_enabled": null, "stickiness_lb_cookie_duration": null, "stickiness_type": null, "successful_response_codes": "200-299", "tags": { "Env": "Env_PROD", "Ioper": "Ioper_T1", "Name": "name" }, "target_type": "ip", "targets": [ { "AvailabilityZone": "all", "Id": "192.168.112.221", "Port": 81 } ], "unhealthy_threshold_count": 3, "validate_certs": true, "vpc_id": "vpc-069XXXXXXX2c", "wait": false, "wait_timeout": 200 } }, "ip_address_type": "ipv4", "load_balancer_arns": [ "arn:aws:elasticloadbalancing:eu-west-1:58XXXXXXXX:loadbalancer/app/alb-ext/9e5cXXXXXXX25" ], "load_balancing_algorithm_type": "round_robin", "load_balancing_cross_zone_enabled": "use_load_balancer_configuration", "matcher": { "http_code": "200-299" }, "port": 81, "protocol": "HTTP", "protocol_version": "HTTP1", "slow_start_duration_seconds": "0", "stickiness_app_cookie_cookie_name": "", "stickiness_app_cookie_duration_seconds": "86400", "stickiness_enabled": "false", "stickiness_lb_cookie_duration_seconds": "86400", "stickiness_type": "lb_cookie", "tags": { "Env": "Env_PROD", "Ioper": "Ioper_T1", "Name": "name" }, "target_group_arn": "arn:aws:elasticloadbalancing:eu-west-1:XXXXXXX:targetgroup/bane/8e2b4XXXXXXXc", "target_group_health_dns_failover_minimum_healthy_targets_count": "1", "target_group_health_dns_failover_minimum_healthy_targets_percentage": "off", "target_group_health_unhealthy_state_routing_minimum_healthy_targets_count": "1", "target_group_health_unhealthy_state_routing_minimum_healthy_targets_percentage": "off", "target_group_name": "name", "target_type": "ip", "unhealthy_threshold_count": 3, "vpc_id": "vpc-069XXXXXXX2c" } ``` ### Actual Results ```console (paste below) The full traceback is: Traceback (most recent call last): File "/tmp/ansible_community.aws.elb_target_group_payload_fvdhi88t/ansible_community.aws.elb_target_group_payload.zip/ansible_collections/community/aws/plugins/modules/elb_target_group.py", line 738, in create_or_update_target_group File "/tmp/ansible_community.aws.elb_target_group_payload_fvdhi88t/ansible_community.aws.elb_target_group_payload.zip/ansible_collections/amazon/aws/plugins/module_utils/modules.py", line 354, in deciding_wrapper return retrying_wrapper(*args, **kwargs) File "/tmp/ansible_community.aws.elb_target_group_payload_fvdhi88t/ansible_community.aws.elb_target_group_payload.zip/ansible_collections/amazon/aws/plugins/module_utils/cloud.py", line 119, in _retry_wrapper return _retry_func( File "/tmp/ansible_community.aws.elb_target_group_payload_fvdhi88t/ansible_community.aws.elb_target_group_payload.zip/ansible_collections/amazon/aws/plugins/module_utils/cloud.py", line 69, in _retry_func return func() File "/usr/local/lib/python3.9/site-packages/botocore/client.py", line 530, in _api_call return self._make_api_call(operation_name, kwargs) File "/usr/local/lib/python3.9/site-packages/botocore/client.py", line 960, in _make_api_call raise error_class(parsed_response, operation_name) botocore.exceptions.ClientError: An error occurred (ValidationError) when calling the RegisterTargets operation: You must specify a valid Availability Zone or select 'all' for all enabled Availability zones, since the IP address '192.168.112.221' is outside the VPC fatal: [lvt00]: FAILED! => { "boto3_version": "1.26.73", "botocore_version": "1.29.73", "changed": false, "error": { "code": "ValidationError", "message": "You must specify a valid Availability Zone or select 'all' for all enabled Availability zones, since the IP address '192.168.112.221' is outside the VPC", "type": "Sender" }, "invocation": { "module_args": { "access_key": "XXXXXXXXXXXXXXXXXX", "aws_ca_bundle": null, "aws_config": null, "debug_botocore_endpoint_logs": false, "deregistration_connection_termination": false, "deregistration_delay_timeout": null, "endpoint_url": null, "health_check_interval": 15, "health_check_path": "/", "health_check_port": "81", "health_check_protocol": "http", "health_check_timeout": 5, "healthy_threshold_count": 4, "load_balancing_algorithm_type": null, "modify_targets": true, "name": "name", "port": 81, "preserve_client_ip_enabled": null, "profile": null, "protocol": "http", "protocol_version": null, "proxy_protocol_v2_enabled": null, "purge_tags": true, "region": "eu-west-1", "secret_key": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", "session_token": null, "state": "present", "stickiness_app_cookie_duration": null, "stickiness_app_cookie_name": null, "stickiness_enabled": null, "stickiness_lb_cookie_duration": null, "stickiness_type": null, "successful_response_codes": "200-299", "tags": { "Env": "Env_PROD", "Ioper": "Ioper_T1", "Name": "name" }, "target_type": "ip", "targets": [ { "AvailabilityZone": "all", "Id": "192.168.112.221", "Port": 81 } ], "unhealthy_threshold_count": 3, "validate_certs": true, "vpc_id": "vpc-069XXXXXXX2c", "wait": false, "wait_timeout": 200 } }, "msg": "Couldn't register targets: An error occurred (ValidationError) when calling the RegisterTargets operation: You must specify a valid Availability Zone or select 'all' for all enabled Availability zones, since the IP address '192.168.112.221' is outside the VPC", "response_metadata": { "http_headers": { "connection": "close", "content-length": "415", "content-type": "text/xml", "date": "Sat, 04 Mar 2023 15:41:37 GMT", "x-amzn-requestid": "a980830a-8683-4946-ada7-7d1ab6b7b6ad" }, "http_status_code": 400, "request_id": "a980830a-8683-4946-ada7-7d1ab6b7b6ad", "retry_attempts": 0 } } ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
The property `AvailabilityZone` is lost in line 734. https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/elb_target_group.py#L730-L738 this might be a hotfix ```diff diff --git a/plugins/modules/elb_target_group.py b/plugins/modules/elb_target_group.py index 784fa143..cd2f4162 100644 --- a/plugins/modules/elb_target_group.py +++ b/plugins/modules/elb_target_group.py @@ -731,7 +731,10 @@ def create_or_update_target_group(connection, module): instances_to_add = [] for target in params['Targets']: if target['Id'] in add_instances: - instances_to_add.append({'Id': target['Id'], 'Port': target['Port']}) + tmp = {'Id': target['Id'], 'Port': target['Port']} + if target.get('AvailabilityZone'): + tmp['AvailabilityZone'] = target['AvailabilityZone'] + instances_to_add.append(tmp) changed = True try: ``` @octavian2204 do you have some time to test this hotfix or to work on this bug and contribute a proper fix? @markuman thx a lot for your effort. Unfortunately, will not be able to do it today. Maybe tomorrow. Will keep you in touch. Hi @markuman, your hotfix worked like a charm! Thank you!
2023-04-05T05:37:11
ansible-collections/community.aws
1,770
ansible-collections__community.aws-1770
[ "1762" ]
bcdde29ad0b321b2971e2ae3c83e2bdfa46c5bf5
diff --git a/plugins/modules/ec2_launch_template.py b/plugins/modules/ec2_launch_template.py --- a/plugins/modules/ec2_launch_template.py +++ b/plugins/modules/ec2_launch_template.py @@ -43,6 +43,12 @@ - Which version should be the default when users spin up new instances based on this template? By default, the latest version will be made the default. type: str default: latest + version_description: + version_added: 5.5.0 + description: + - The description of a launch template version. + default: "" + type: str state: description: - Whether the launch template should exist or not. @@ -573,8 +579,10 @@ def create_or_update(module, template_options): template, template_versions = existing_templates(module) out['changed'] = True elif template and template_versions: - most_recent = sorted(template_versions, key=lambda x: x['VersionNumber'])[-1] - if lt_data == most_recent['LaunchTemplateData']: + most_recent = sorted(template_versions, key=lambda x: x["VersionNumber"])[-1] + if lt_data == most_recent["LaunchTemplateData"] and module.params["version_description"] == most_recent.get( + "VersionDescription", "" + ): out['changed'] = False return out try: @@ -583,6 +591,7 @@ def create_or_update(module, template_options): LaunchTemplateId=template['LaunchTemplateId'], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, + VersionDescription=str(module.params["version_description"]), aws_retry=True, ) elif module.params.get('source_version') == 'latest': @@ -590,7 +599,8 @@ def create_or_update(module, template_options): LaunchTemplateId=template['LaunchTemplateId'], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, - SourceVersion=str(most_recent['VersionNumber']), + SourceVersion=str(most_recent["VersionNumber"]), + VersionDescription=str(module.params["version_description"]), aws_retry=True, ) else: @@ -606,7 +616,8 @@ def create_or_update(module, template_options): LaunchTemplateId=template['LaunchTemplateId'], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, - SourceVersion=str(source_version['VersionNumber']), + SourceVersion=str(source_version["VersionNumber"]), + VersionDescription=str(module.params["version_description"]), aws_retry=True, ) @@ -779,11 +790,12 @@ def main(): ) arg_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), - template_name=dict(aliases=['name']), - template_id=dict(aliases=['id']), - default_version=dict(default='latest'), - source_version=dict(default='latest') + state=dict(choices=["present", "absent"], default="present"), + template_name=dict(aliases=["name"]), + template_id=dict(aliases=["id"]), + default_version=dict(default="latest"), + source_version=dict(default="latest"), + version_description=dict(default=""), ) arg_spec.update(template_options)
diff --git a/tests/integration/targets/ec2_launch_template/tasks/versions.yml b/tests/integration/targets/ec2_launch_template/tasks/versions.yml --- a/tests/integration/targets/ec2_launch_template/tasks/versions.yml +++ b/tests/integration/targets/ec2_launch_template/tasks/versions.yml @@ -69,6 +69,21 @@ - lt.latest_version == 4 - lt.latest_template.launch_template_data.instance_type == "c4.large" + - name: update simple instance template + ec2_launch_template: + name: "{{ resource_prefix }}-simple" + version_description: "Fix something." + register: lt + + - name: instance with cpu_options created with the right options + assert: + that: + - lt is success + - lt is changed + - lt.default_version == 5 + - lt.latest_version == 5 + - lt.latest_template.version_description == "Fix something." + always: - name: delete the template ec2_launch_template:
Add version_description to ec2_launch_template ### Summary I found that `ec2_launch_template` does not expose `VersionDescription` from `create_launch_template_version`. - [create_launch_template_version](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2/client/create_launch_template_version.html) ### Issue Type Feature Idea ### Component Name ec2_launch_template ### Additional Information <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
@squirrel532 do you have time to contribute this feature? Sure. Let me create a PR for this.
2023-04-12T07:45:27
ansible-collections/community.aws
1,773
ansible-collections__community.aws-1773
[ "1771" ]
63920a1cc57f539145eac784cd7cadbff3f7662d
diff --git a/plugins/modules/eks_nodegroup.py b/plugins/modules/eks_nodegroup.py --- a/plugins/modules/eks_nodegroup.py +++ b/plugins/modules/eks_nodegroup.py @@ -514,7 +514,11 @@ def create_or_update_nodegroups(client, module): if module.params['release_version'] is not None: params['releaseVersion'] = module.params['release_version'] if module.params['remote_access'] is not None: - params['remoteAccess'] = module.params['remote_access'] + params['remoteAccess'] = dict() + if module.params['remote_access']['ec2_ssh_key'] is not None: + params['remoteAccess']['ec2SshKey'] = module.params['remote_access']['ec2_ssh_key'] + if module.params['remote_access']['source_sg'] is not None: + params['remoteAccess']['sourceSecurityGroups'] = module.params['remote_access']['source_sg'] if module.params['capacity_type'] is not None: params['capacityType'] = module.params['capacity_type'].upper() if module.params['labels'] is not None:
diff --git a/tests/integration/targets/eks_nodegroup/tasks/cleanup.yml b/tests/integration/targets/eks_nodegroup/tasks/cleanup.yml --- a/tests/integration/targets/eks_nodegroup/tasks/cleanup.yml +++ b/tests/integration/targets/eks_nodegroup/tasks/cleanup.yml @@ -37,6 +37,19 @@ with_items: '{{ eks_security_groups|reverse|list + additional_eks_sg }}' ignore_errors: 'yes' +- name: Delete securitygroup for node access + amazon.aws.ec2_security_group: + name: 'ansible-test-eks_nodegroup' + description: "SSH access" + vpc_id: '{{ setup_vpc.vpc.id }}' + rules: [] + state: absent + +- name: Delete Keypair for Access to Nodegroup nodes + amazon.aws.ec2_key: + name: "ansible-test-eks_nodegroup" + state: absent + - name: remove Route Tables ec2_vpc_route_table: state: absent diff --git a/tests/integration/targets/eks_nodegroup/tasks/dependecies.yml b/tests/integration/targets/eks_nodegroup/tasks/dependecies.yml --- a/tests/integration/targets/eks_nodegroup/tasks/dependecies.yml +++ b/tests/integration/targets/eks_nodegroup/tasks/dependecies.yml @@ -106,3 +106,20 @@ default_version: 1 instance_type: t3.micro register: lt + +- name: Create securitygroup for node access + amazon.aws.ec2_security_group: + name: 'ansible-test-eks_nodegroup' + description: "SSH access" + vpc_id: '{{ setup_vpc.vpc.id }}' + rules: + - proto: tcp + ports: + - 22 + cidr_ip: 0.0.0.0/0 + register: securitygroup_eks_nodegroup + +- name: Create Keypair for Access to Nodegroup nodes + amazon.aws.ec2_key: + name: "ansible-test-eks_nodegroup" + register: ec2_key_eks_nodegroup diff --git a/tests/integration/targets/eks_nodegroup/tasks/full_test.yml b/tests/integration/targets/eks_nodegroup/tasks/full_test.yml --- a/tests/integration/targets/eks_nodegroup/tasks/full_test.yml +++ b/tests/integration/targets/eks_nodegroup/tasks/full_test.yml @@ -80,6 +80,10 @@ capacity_type: 'SPOT' tags: 'foo': 'bar' + remote_access: + ec2_ssh_key: "{{ ec2_key_eks_nodegroup.key.name }}" + source_sg: + - "{{ securitygroup_eks_nodegroup.group_id }}" wait: True register: eks_nodegroup_result check_mode: True @@ -114,6 +118,10 @@ capacity_type: 'SPOT' tags: 'foo': 'bar' + remote_access: + ec2_ssh_key: "{{ ec2_key_eks_nodegroup.key.name }}" + source_sg: + - "{{ securitygroup_eks_nodegroup.group_id }}" wait: True register: eks_nodegroup_result @@ -147,6 +155,10 @@ capacity_type: 'SPOT' tags: 'foo': 'bar' + remote_access: + ec2_ssh_key: "{{ ec2_key_eks_nodegroup.key.name }}" + source_sg: + - "{{ securitygroup_eks_nodegroup.group_id }}" wait: True register: eks_nodegroup_result check_mode: True @@ -181,6 +193,10 @@ capacity_type: 'SPOT' tags: 'foo': 'bar' + remote_access: + ec2_ssh_key: "{{ ec2_key_eks_nodegroup.key.name }}" + source_sg: + - "{{ securitygroup_eks_nodegroup.group_id }}" wait: True register: eks_nodegroup_result check_mode: True @@ -255,6 +271,10 @@ capacity_type: 'SPOT' tags: 'foo': 'bar' + remote_access: + ec2_ssh_key: "{{ ec2_key_eks_nodegroup.key.name }}" + source_sg: + - "{{ securitygroup_eks_nodegroup.group_id }}" wait: True register: eks_nodegroup_result check_mode: True @@ -289,6 +309,10 @@ capacity_type: 'SPOT' tags: 'foo': 'bar' + remote_access: + ec2_ssh_key: "{{ ec2_key_eks_nodegroup.key.name }}" + source_sg: + - "{{ securitygroup_eks_nodegroup.group_id }}" wait: True register: eks_nodegroup_result @@ -322,6 +346,10 @@ capacity_type: 'SPOT' tags: 'foo': 'bar' + remote_access: + ec2_ssh_key: "{{ ec2_key_eks_nodegroup.key.name }}" + source_sg: + - "{{ securitygroup_eks_nodegroup.group_id }}" wait: True register: eks_nodegroup_result check_mode: True @@ -356,6 +384,10 @@ capacity_type: 'SPOT' tags: 'foo': 'bar' + remote_access: + ec2_ssh_key: "{{ ec2_key_eks_nodegroup.key.name }}" + source_sg: + - "{{ securitygroup_eks_nodegroup.group_id }}" wait: True register: eks_nodegroup_result
eks_nodegroup module remote_access config not working as documented ### Summary When I try to create a nodegroup with remote_access config as documented, I got following error: `Couldn't create Nodegroup devcluster-eu-central-1a.: Parameter validation failed: Unknown parameter in remoteAccess: "ec2_ssh_key", must be one of: ec2SshKey, sourceSecurityGroups Unknown parameter in remoteAccess: "source_sg", must be one of: ec2SshKey, sourceSecurityGroups ` However when I use the suggested camelcase, I get an validation error: `msg: 'Unsupported parameters for (community.aws.eks_nodegroup) module: remote_access.ec2SshKey, remote_access.sourceSecurityGroups. Supported parameters include: ec2_ssh_key, source_sg.' ` Think there needs to be a translation from unterscore ansible definition to camelcase from boto in this line, similar to "update_config": https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/eks_nodegroup.py#L518 I will try to debug further and may provide a PR if successful. ### Issue Type Bug Report ### Component Name eks_nodegroup ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.14.3] config file = None configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.10/dist-packages/ansible ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.10.6 (main, Mar 10 2023, 10:55:28) [GCC 11.3.0] (/usr/bin/python3) jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list community.aws 5.2.0 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore Name: boto3 Version: 1.26.95 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.10/dist-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.29.95 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.10/dist-packages Requires: jmespath, python-dateutil, urllib3 Required-by: boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed CONFIG_FILE() = None ``` ### OS / Environment ```console (paste below) $ cat /etc/lsb-release DISTRIB_ID=Ubuntu DISTRIB_RELEASE=22.04 DISTRIB_CODENAME=jammy DISTRIB_DESCRIPTION="Ubuntu 22.04.2 LTS" ``` ### Steps to Reproduce Using the module quiet normal ```yaml (paste below) - name: Create nodegroup for internal usage community.aws.eks_nodegroup: name: "{{ eks_cluster_name }}-{{ item.subnet.availability_zone }}" cluster_name: "{{ eks_cluster_name }}" node_role: "{{ iam_role_eks_nodegroup.arn }}" subnets: - "{{ item.subnet.id }}" scaling_config: "{{ eks_nodegroup.scaling_config }}" update_config: max_unavailable: 1 disk_size: "{{ eks_nodegroup.disk_size }}" instance_types: "{{ eks_nodegroup.instance_types }}" ami_type: "{{ eks_nodegroup.ami_type }}" labels: env: "internal" capacity_type: "{{ eks_nodegroup.capacity_type }}" remote_access: ec2_ssh_key: "{{ k8s_admin_keypair.key.id }}" source_sg: - "{{ k8s_ssh_securitygroup.group_id }}" wait: true ``` ### Expected Results No error from boto or the module itself and injected ssh key on all workers ### Actual Results ```console (paste below) Couldn't create Nodegroup devcluster-eu-central-1a.: Parameter validation failed: Unknown parameter in remoteAccess: "ec2_ssh_key", must be one of: ec2SshKey, sourceSecurityGroups Unknown parameter in remoteAccess: "source_sg", must be one of: ec2SshKey, sourceSecurityGroups ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
2023-04-13T10:15:15
ansible-collections/community.aws
1,776
ansible-collections__community.aws-1776
[ "1736" ]
63920a1cc57f539145eac784cd7cadbff3f7662d
diff --git a/plugins/modules/elb_target_group.py b/plugins/modules/elb_target_group.py --- a/plugins/modules/elb_target_group.py +++ b/plugins/modules/elb_target_group.py @@ -729,9 +729,12 @@ def create_or_update_target_group(connection, module): if add_instances: instances_to_add = [] - for target in params['Targets']: - if target['Id'] in add_instances: - instances_to_add.append({'Id': target['Id'], 'Port': target['Port']}) + for target in params["Targets"]: + if target["Id"] in add_instances: + tmp_item = {"Id": target["Id"], "Port": target["Port"]} + if target.get("AvailabilityZone"): + tmp_item["AvailabilityZone"] = target["AvailabilityZone"] + instances_to_add.append(tmp_item) changed = True try:
diff --git a/tests/integration/targets/elb_target/tasks/ec2_target.yml b/tests/integration/targets/elb_target/tasks/ec2_target.yml --- a/tests/integration/targets/elb_target/tasks/ec2_target.yml +++ b/tests/integration/targets/elb_target/tasks/ec2_target.yml @@ -168,14 +168,8 @@ tags: Name: "{{ resource_prefix }}-inst" user_data: | - #cloud-config - package_upgrade: true - package_update: true - packages: - - httpd - runcmd: - - "service httpd start" - - echo "HELLO ANSIBLE" > /var/www/html/index.html + #!/bin/bash + sudo nohup python3 -m http.server 80 & register: ec2 - set_fact: @@ -480,6 +474,46 @@ - not result.changed - not result.target_health_descriptions + - name: create ip target group + elb_target_group: + name: "{{ tg_name }}-ip" + health_check_port: 443 + protocol: tcp + port: 443 + vpc_id: "{{ vpc.vpc.id }}" + state: present + target_type: ip + register: result + + - name: ip target group must be created + assert: + that: + - result.changed + - result.target_type == 'ip' + + - name: "mobify ip target group with AvailabilityZone: all" + elb_target_group: + name: "{{ tg_name }}-ip" + health_check_port: 443 + protocol: tcp + port: 443 + vpc_id: "{{ vpc.vpc.id }}" + state: present + target_type: ip + wait: false + modify_targets: true + targets: + - Id: 192.168.178.32 + Port: 443 + AvailabilityZone: all + register: result + + - name: ip target group must be modified + assert: + that: + - result.changed + - result.load_balancing_cross_zone_enabled == 'use_load_balancer_configuration' + # ============================================================ always: @@ -533,6 +567,7 @@ - "{{ tg_used_name }}" - "{{ tg_tcpudp_name }}" - "{{ tg_name }}-nlb" + - "{{ tg_name }}-ip" ignore_errors: true - name: remove routing rules
community.aws.elb_target_group ignoring targets.AvailabilityZone 'all' param ### Summary Having ALB in vpc-1 and targets in vpc-2 I am getting the error (despite that the targets.AvailabilityZone is set to 'all'): "Couldn't register targets: An error occurred (ValidationError) when calling the RegisterTargets operation: You must specify a valid Availability Zone or select 'all' for all enabled Availability zones, since the IP address '192.168.112.221' is outside the VPC" The ALB has Cross-zone load balancing enabled (On). Despite that the module is failing, the TG is being created with no targets. If register the target manually (from AWS console) the module will report OK: ok: [lvt00] => { "changed": false, ... } ### Issue Type Bug Report ### Component Name elb_target_group ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.13.4] config file = None configured module search path = ['/Users/octav/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /opt/homebrew/lib/python3.10/site-packages/ansible ansible collection location = /Users/octav/.ansible/collections:/usr/share/ansible/collections executable location = /opt/homebrew/bin/ansible python version = 3.10.8 (main, Oct 21 2022, 22:22:30) [Clang 14.0.0 (clang-1400.0.29.202)] jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /Users/octav/ansible/collections/ansible_collections Collection Version -------------------- ------- amazon.aws 5.2.0 ansible.posix 1.5.1 ansible.windows 1.13.0 community.aws 5.2.0 community.docker 3.4.1 community.postgresql 2.3.2 ``` ### AWS SDK versions ```console (paste below) $ pip3 show boto boto3 botocore WARNING: Package(s) not found: boto Name: boto3 Version: 1.24.69 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /opt/homebrew/lib/python3.10/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.27.69 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /opt/homebrew/lib/python3.10/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed CALLBACKS_ENABLED(/Users/octav/ansible/workdir/ansible.cfg) = ['yaml', 'profile_roles'] COLLECTIONS_PATHS(/Users/octav/ansible/workdir/ansible.cfg) = ['/Users/octav/ansible/collections'] DEFAULT_HOST_LIST(/Users/octav/ansible/workdir/ansible.cfg) = ['/Users/octav/ansible/workdir/inventory'] DEFAULT_LOAD_CALLBACK_PLUGINS(/Users/octav/ansible/workdir/ansible.cfg) = True DEFAULT_ROLES_PATH(/Users/octav/ansible/workdir/ansible.cfg) = ['/Users/octav/ansible/roles'] HOST_KEY_CHECKING(/Users/octav/ansible/workdir/ansible.cfg) = False INTERPRETER_PYTHON(/Users/octav/ansible/workdir/ansible.cfg) = auto_silent INVENTORY_ENABLED(/Users/octav/ansible/workdir/ansible.cfg) = ['ini', 'host_list', 'script', 'auto', 'yaml', 'toml', 'tower'] RETRY_FILES_ENABLED(/Users/octav/ansible/workdir/ansible.cfg) = False ``` ### OS / Environment ProductName: macOS ProductVersion: 13.1 BuildVersion: 22C65 ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) - name: "Create TG" community.aws.elb_target_group: access_key: "{{ aws_access_key_id }}" secret_key: "{{ aws_secret_access_key }}" name: "name" protocol: http port: 81 vpc_id: 'vpc-069XXXXXXX2c' # VPC-1 region: eu-west-1 health_check_protocol: http health_check_path: / health_check_port: 81 successful_response_codes: "200-299" health_check_interval: 15 health_check_timeout: 5 healthy_threshold_count: 4 unhealthy_threshold_count: 3 state: present target_type: ip targets: - Id: "{{ ansible_facts.default_ipv4.address }}" Port: 81 AvailabilityZone: all tags: "{{ default_tags | combine({'Name': 'name'}) }}" ``` ### Expected Results ```console (paste below) changed: [lvt00] => { "changed": true, "deregistration_delay_timeout_seconds": "300", "health_check_enabled": true, "health_check_interval_seconds": 15, "health_check_path": "/", "health_check_port": "81", "health_check_protocol": "HTTP", "health_check_timeout_seconds": 5, "healthy_threshold_count": 4, "invocation": { "module_args": { "access_key": "XXXXXXXXXXXXXXXXXX", "aws_ca_bundle": null, "aws_config": null, "debug_botocore_endpoint_logs": false, "deregistration_connection_termination": false, "deregistration_delay_timeout": null, "endpoint_url": null, "health_check_interval": 15, "health_check_path": "/", "health_check_port": "81", "health_check_protocol": "http", "health_check_timeout": 5, "healthy_threshold_count": 4, "load_balancing_algorithm_type": null, "modify_targets": true, "name": "name", "port": 81, "preserve_client_ip_enabled": null, "profile": null, "protocol": "http", "protocol_version": null, "proxy_protocol_v2_enabled": null, "purge_tags": true, "region": "eu-west-1", "secret_key": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", "session_token": null, "state": "present", "stickiness_app_cookie_duration": null, "stickiness_app_cookie_name": null, "stickiness_enabled": null, "stickiness_lb_cookie_duration": null, "stickiness_type": null, "successful_response_codes": "200-299", "tags": { "Env": "Env_PROD", "Ioper": "Ioper_T1", "Name": "name" }, "target_type": "ip", "targets": [ { "AvailabilityZone": "all", "Id": "192.168.112.221", "Port": 81 } ], "unhealthy_threshold_count": 3, "validate_certs": true, "vpc_id": "vpc-069XXXXXXX2c", "wait": false, "wait_timeout": 200 } }, "ip_address_type": "ipv4", "load_balancer_arns": [ "arn:aws:elasticloadbalancing:eu-west-1:58XXXXXXXX:loadbalancer/app/alb-ext/9e5cXXXXXXX25" ], "load_balancing_algorithm_type": "round_robin", "load_balancing_cross_zone_enabled": "use_load_balancer_configuration", "matcher": { "http_code": "200-299" }, "port": 81, "protocol": "HTTP", "protocol_version": "HTTP1", "slow_start_duration_seconds": "0", "stickiness_app_cookie_cookie_name": "", "stickiness_app_cookie_duration_seconds": "86400", "stickiness_enabled": "false", "stickiness_lb_cookie_duration_seconds": "86400", "stickiness_type": "lb_cookie", "tags": { "Env": "Env_PROD", "Ioper": "Ioper_T1", "Name": "name" }, "target_group_arn": "arn:aws:elasticloadbalancing:eu-west-1:XXXXXXX:targetgroup/bane/8e2b4XXXXXXXc", "target_group_health_dns_failover_minimum_healthy_targets_count": "1", "target_group_health_dns_failover_minimum_healthy_targets_percentage": "off", "target_group_health_unhealthy_state_routing_minimum_healthy_targets_count": "1", "target_group_health_unhealthy_state_routing_minimum_healthy_targets_percentage": "off", "target_group_name": "name", "target_type": "ip", "unhealthy_threshold_count": 3, "vpc_id": "vpc-069XXXXXXX2c" } ``` ### Actual Results ```console (paste below) The full traceback is: Traceback (most recent call last): File "/tmp/ansible_community.aws.elb_target_group_payload_fvdhi88t/ansible_community.aws.elb_target_group_payload.zip/ansible_collections/community/aws/plugins/modules/elb_target_group.py", line 738, in create_or_update_target_group File "/tmp/ansible_community.aws.elb_target_group_payload_fvdhi88t/ansible_community.aws.elb_target_group_payload.zip/ansible_collections/amazon/aws/plugins/module_utils/modules.py", line 354, in deciding_wrapper return retrying_wrapper(*args, **kwargs) File "/tmp/ansible_community.aws.elb_target_group_payload_fvdhi88t/ansible_community.aws.elb_target_group_payload.zip/ansible_collections/amazon/aws/plugins/module_utils/cloud.py", line 119, in _retry_wrapper return _retry_func( File "/tmp/ansible_community.aws.elb_target_group_payload_fvdhi88t/ansible_community.aws.elb_target_group_payload.zip/ansible_collections/amazon/aws/plugins/module_utils/cloud.py", line 69, in _retry_func return func() File "/usr/local/lib/python3.9/site-packages/botocore/client.py", line 530, in _api_call return self._make_api_call(operation_name, kwargs) File "/usr/local/lib/python3.9/site-packages/botocore/client.py", line 960, in _make_api_call raise error_class(parsed_response, operation_name) botocore.exceptions.ClientError: An error occurred (ValidationError) when calling the RegisterTargets operation: You must specify a valid Availability Zone or select 'all' for all enabled Availability zones, since the IP address '192.168.112.221' is outside the VPC fatal: [lvt00]: FAILED! => { "boto3_version": "1.26.73", "botocore_version": "1.29.73", "changed": false, "error": { "code": "ValidationError", "message": "You must specify a valid Availability Zone or select 'all' for all enabled Availability zones, since the IP address '192.168.112.221' is outside the VPC", "type": "Sender" }, "invocation": { "module_args": { "access_key": "XXXXXXXXXXXXXXXXXX", "aws_ca_bundle": null, "aws_config": null, "debug_botocore_endpoint_logs": false, "deregistration_connection_termination": false, "deregistration_delay_timeout": null, "endpoint_url": null, "health_check_interval": 15, "health_check_path": "/", "health_check_port": "81", "health_check_protocol": "http", "health_check_timeout": 5, "healthy_threshold_count": 4, "load_balancing_algorithm_type": null, "modify_targets": true, "name": "name", "port": 81, "preserve_client_ip_enabled": null, "profile": null, "protocol": "http", "protocol_version": null, "proxy_protocol_v2_enabled": null, "purge_tags": true, "region": "eu-west-1", "secret_key": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", "session_token": null, "state": "present", "stickiness_app_cookie_duration": null, "stickiness_app_cookie_name": null, "stickiness_enabled": null, "stickiness_lb_cookie_duration": null, "stickiness_type": null, "successful_response_codes": "200-299", "tags": { "Env": "Env_PROD", "Ioper": "Ioper_T1", "Name": "name" }, "target_type": "ip", "targets": [ { "AvailabilityZone": "all", "Id": "192.168.112.221", "Port": 81 } ], "unhealthy_threshold_count": 3, "validate_certs": true, "vpc_id": "vpc-069XXXXXXX2c", "wait": false, "wait_timeout": 200 } }, "msg": "Couldn't register targets: An error occurred (ValidationError) when calling the RegisterTargets operation: You must specify a valid Availability Zone or select 'all' for all enabled Availability zones, since the IP address '192.168.112.221' is outside the VPC", "response_metadata": { "http_headers": { "connection": "close", "content-length": "415", "content-type": "text/xml", "date": "Sat, 04 Mar 2023 15:41:37 GMT", "x-amzn-requestid": "a980830a-8683-4946-ada7-7d1ab6b7b6ad" }, "http_status_code": 400, "request_id": "a980830a-8683-4946-ada7-7d1ab6b7b6ad", "retry_attempts": 0 } } ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
The property `AvailabilityZone` is lost in line 734. https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/elb_target_group.py#L730-L738 this might be a hotfix ```diff diff --git a/plugins/modules/elb_target_group.py b/plugins/modules/elb_target_group.py index 784fa143..cd2f4162 100644 --- a/plugins/modules/elb_target_group.py +++ b/plugins/modules/elb_target_group.py @@ -731,7 +731,10 @@ def create_or_update_target_group(connection, module): instances_to_add = [] for target in params['Targets']: if target['Id'] in add_instances: - instances_to_add.append({'Id': target['Id'], 'Port': target['Port']}) + tmp = {'Id': target['Id'], 'Port': target['Port']} + if target.get('AvailabilityZone'): + tmp['AvailabilityZone'] = target['AvailabilityZone'] + instances_to_add.append(tmp) changed = True try: ``` @octavian2204 do you have some time to test this hotfix or to work on this bug and contribute a proper fix? @markuman thx a lot for your effort. Unfortunately, will not be able to do it today. Maybe tomorrow. Will keep you in touch. Hi @markuman, your hotfix worked like a charm! Thank you!
2023-04-18T11:28:41
ansible-collections/community.aws
1,780
ansible-collections__community.aws-1780
[ "1761" ]
0344815a451b46c5c63582603dd6506e2b0c42a7
diff --git a/plugins/modules/msk_cluster.py b/plugins/modules/msk_cluster.py --- a/plugins/modules/msk_cluster.py +++ b/plugins/modules/msk_cluster.py @@ -125,7 +125,15 @@ sasl_scram: description: SASL/SCRAM authentication is enabled or not. type: bool - default: False + sasl_iam: + version_added: 5.5.0 + description: IAM authentication is enabled or not. + type: bool + unauthenticated: + version_added: 5.5.0 + description: Option to explicitly turn on or off authentication + type: bool + default: True enhanced_monitoring: description: Specifies the level of monitoring for the MSK cluster. choices: @@ -385,13 +393,21 @@ def prepare_create_options(module): if module.params["authentication"]: c_params["ClientAuthentication"] = {} - if module.params["authentication"].get("sasl_scram"): - c_params["ClientAuthentication"]["Sasl"] = { - "Scram": module.params["authentication"]["sasl_scram"] - } + if module.params["authentication"].get("sasl_scram") or module.params["authentication"].get("sasl_iam"): + sasl = {} + if module.params["authentication"].get("sasl_scram"): + sasl["Scram"] = {"Enabled": True} + if module.params["authentication"].get("sasl_iam"): + sasl["Iam"] = {"Enabled": True} + c_params["ClientAuthentication"]["Sasl"] = sasl if module.params["authentication"].get("tls_ca_arn"): c_params["ClientAuthentication"]["Tls"] = { - "CertificateAuthorityArnList": module.params["authentication"]["tls_ca_arn"] + "CertificateAuthorityArnList": module.params["authentication"]["tls_ca_arn"], + "Enabled": True, + } + if module.params["authentication"].get("unauthenticated"): + c_params["ClientAuthentication"] = { + "Unauthenticated": {"Enabled": True}, } c_params.update(prepare_enhanced_monitoring_options(module)) @@ -716,7 +732,9 @@ def main(): type="dict", options=dict( tls_ca_arn=dict(type="list", elements="str", required=False), - sasl_scram=dict(type="bool", default=False), + sasl_scram=dict(type="bool", required=False), + sasl_iam=dict(type="bool", required=False), + unauthenticated=dict(type="bool", default=True, required=False), ), ), enhanced_monitoring=dict(
diff --git a/tests/integration/targets/msk_cluster-auth/aliases b/tests/integration/targets/msk_cluster-auth/aliases new file mode 100644 --- /dev/null +++ b/tests/integration/targets/msk_cluster-auth/aliases @@ -0,0 +1,4 @@ +cloud/aws +time=46m + +msk_cluster diff --git a/tests/integration/targets/msk_cluster-auth/defaults/main.yml b/tests/integration/targets/msk_cluster-auth/defaults/main.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/msk_cluster-auth/defaults/main.yml @@ -0,0 +1,19 @@ +--- +vpc_name: "{{ resource_prefix }}-mskc-a" +vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16' +vpc_subnets: + - '10.{{ 256 | random(seed=resource_prefix) }}.100.0/24' + - '10.{{ 256 | random(seed=resource_prefix) }}.101.0/24' +vpc_subnet_name_prefix: "{{ resource_prefix }}" + +msk_config_name: "{{ resource_prefix }}-msk-cluster-auth" +msk_cluster_name: "{{ tiny_prefix }}-msk-cluster-auth" +msk_version: 2.8.1 +msk_broker_nodes: 2 + +tags_create: + key1: "value1" + key2: "value2" +tags_update: + key2: "value2" + key3: "value3" diff --git a/tests/integration/targets/msk_cluster-auth/meta/main.yml b/tests/integration/targets/msk_cluster-auth/meta/main.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/msk_cluster-auth/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/tests/integration/targets/msk_cluster-auth/tasks/main.yml b/tests/integration/targets/msk_cluster-auth/tasks/main.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/msk_cluster-auth/tasks/main.yml @@ -0,0 +1,91 @@ +--- +- name: aws_msk_cluster integration tests + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + collections: + - amazon.aws + block: + - name: collect availability zone info + aws_az_info: + register: az_info + + - name: assert there are at least two zones + assert: + that: az_info.availability_zones | length >= 2 + + - name: create vpc + ec2_vpc_net: + state: present + cidr_block: '{{ vpc_cidr }}' + name: '{{ vpc_name }}' + register: vpc + + - name: create subnets + ec2_vpc_subnet: + state: present + cidr: '{{ item }}' + az: '{{ az_info.availability_zones[index].zone_name }}' + vpc_id: '{{ vpc.vpc.id }}' + tags: + Name: '{{ vpc_subnet_name_prefix }}-subnet-{{ index }}' + loop: "{{ vpc_subnets }}" + loop_control: + index_var: index + register: subnets + + - set_fact: + subnet_ids: '{{ subnets | community.general.json_query("results[].subnet.id") | list }}' + + # ============================================================ + - name: create msk configuration + aws_msk_config: + name: "{{ msk_config_name }}" + state: "present" + kafka_versions: + - "{{ msk_version }}" + register: msk_config + + - name: create test with sasl_iam + include_tasks: test_create_auth.yml + + always: + + - name: delete msk cluster + aws_msk_cluster: + name: "{{ msk_cluster_name }}" + state: absent + wait: true + ignore_errors: yes + + - name: remove msk configuration + aws_msk_config: + name: "{{ msk_config_name }}" + state: absent + ignore_errors: yes + + - name: remove subnets + ec2_vpc_subnet: + state: absent + cidr: '{{ item }}' + vpc_id: '{{ vpc.vpc.id }}' + loop: "{{ vpc_subnets }}" + ignore_errors: yes + register: removed_subnets + until: removed_subnets is succeeded + retries: 5 + delay: 5 + + - name: remove the vpc + ec2_vpc_net: + state: absent + cidr_block: '{{ vpc_cidr }}' + name: '{{ vpc_name }}' + ignore_errors: yes + register: removed_vpc + until: removed_vpc is success + retries: 5 + delay: 5 diff --git a/tests/integration/targets/msk_cluster-auth/tasks/test_create_auth.yml b/tests/integration/targets/msk_cluster-auth/tasks/test_create_auth.yml new file mode 100644 --- /dev/null +++ b/tests/integration/targets/msk_cluster-auth/tasks/test_create_auth.yml @@ -0,0 +1,101 @@ +--- +- name: create a msk cluster with authentication flipped from default (check mode) + aws_msk_cluster: + name: "{{ msk_cluster_name }}" + state: "present" + version: "{{ msk_version }}" + nodes: "{{ msk_broker_nodes }}" + ebs_volume_size: 10 + authentication: + sasl_iam: true + sasl_scram: true + unauthenticated: false + subnets: "{{ subnet_ids }}" + wait: true + tags: "{{ tags_create }}" + configuration_arn: "{{ msk_config.arn }}" + configuration_revision: "{{ msk_config.revision }}" + check_mode: yes + register: msk_cluster + +- name: assert that the msk cluster be created + assert: + that: + - msk_cluster is changed + +- name: create a msk cluster with authentication flipped from default + aws_msk_cluster: + name: "{{ msk_cluster_name }}" + state: "present" + version: "{{ msk_version }}" + nodes: "{{ msk_broker_nodes }}" + ebs_volume_size: 10 + authentication: + sasl_iam: true + sasl_scram: true + unauthenticated: false + subnets: "{{ subnet_ids }}" + wait: true + tags: "{{ tags_create }}" + configuration_arn: "{{ msk_config.arn }}" + configuration_revision: "{{ msk_config.revision }}" + register: msk_cluster + +- name: assert that the msk cluster is created + assert: + that: + - msk_cluster is changed + +- name: validate return values + assert: + that: + - "'cluster_info' in msk_cluster" + - "'bootstrap_broker_string' in msk_cluster" + - "'key1' in msk_cluster.cluster_info.tags" + - "msk_cluster.cluster_info.tags.key1 == 'value1'" + - "msk_cluster.cluster_info.cluster_name == msk_cluster_name" + - "msk_cluster.cluster_info.number_of_broker_nodes == msk_broker_nodes" + - "msk_cluster.cluster_info.broker_node_group_info.instance_type == 'kafka.t3.small'" + - "msk_cluster.cluster_info.broker_node_group_info.storage_info.ebs_storage_info.volume_size == 10" + - "msk_cluster.cluster_info.client_authentication.sasl.iam.enabled == true" + - "msk_cluster.cluster_info.client_authentication.sasl.scram.enabled == true" + # Not always returned by API + # - "msk_cluster.cluster_info.client_authentication.unauthenticated.enabled == false" + - "msk_cluster.cluster_info.open_monitoring.prometheus.jmx_exporter.enabled_in_broker == false" + - "msk_cluster.cluster_info.cluster_arn.startswith('arn:aws:kafka:{{ aws_region }}:')" + +- name: create a msk cluster with authentication flipped from default (idempotency) + aws_msk_cluster: + name: "{{ msk_cluster_name }}" + state: "present" + version: "{{ msk_version }}" + nodes: "{{ msk_broker_nodes }}" + ebs_volume_size: 10 + authentication: + sasl_iam: true + sasl_scram: true + unauthenticated: false + subnets: "{{ subnet_ids }}" + wait: true + tags: "{{ tags_create }}" + configuration_arn: "{{ msk_config.arn }}" + configuration_revision: "{{ msk_config.revision }}" + register: msk_cluster + +- name: assert that the msk cluster wasn't changed + assert: + that: + - msk_cluster is not changed + +### Keep delete simple as we're not checking delete here +- name: delete msk cluster + aws_msk_cluster: + name: "{{ msk_cluster_name }}" + state: "absent" + wait: true + register: msk_cluster + +- name: assert that the msk cluster is changed + assert: + that: + - msk_cluster is changed diff --git a/tests/integration/targets/msk_cluster/defaults/main.yml b/tests/integration/targets/msk_cluster/defaults/main.yml --- a/tests/integration/targets/msk_cluster/defaults/main.yml +++ b/tests/integration/targets/msk_cluster/defaults/main.yml @@ -8,7 +8,7 @@ vpc_subnet_name_prefix: "{{ resource_prefix }}" msk_config_name: "{{ resource_prefix }}-msk-cluster" msk_cluster_name: "{{ tiny_prefix }}-msk-cluster" -msk_version: 2.6.0 +msk_version: 2.8.1 msk_broker_nodes: 2 tags_create:
msk_cluster - Cannot create a cluster w/ authentication sasl_scram ### Summary When I do ```- name: Create MSK cluster community.aws.msk_cluster: name: "{{ item.name }}" .................................................................... authentication: sasl_scram: true .................................................................... ``` i get an error: ``` "msg": "Failed to create kafka cluster: Parameter validation failed:\nInvalid type for parameter ClientAuthentication.Sasl.Scram, value: True, type: <class 'bool'>, valid types: <class 'dict'>" ``` Full traceback: ``` The full traceback is: Traceback (most recent call last): File "/tmp/ansible_community.aws.msk_cluster_payload_roovgjj3/ansible_community.aws.msk_cluster_payload.zip/ansible_collections/community/aws/plugins/modules/msk_cluster.py", line 484, in create_or_update_cluster File "/tmp/ansible_community.aws.msk_cluster_payload_roovgjj3/ansible_community.aws.msk_cluster_payload.zip/ansible_collections/amazon/aws/plugins/module_utils/modules.py", line 354, in deciding_wrapper return retrying_wrapper(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/tmp/ansible_community.aws.msk_cluster_payload_roovgjj3/ansible_community.aws.msk_cluster_payload.zip/ansible_collections/amazon/aws/plugins/module_utils/cloud.py", line 119, in _retry_wrapper return _retry_func( ^^^^^^^^^^^^ File "/tmp/ansible_community.aws.msk_cluster_payload_roovgjj3/ansible_community.aws.msk_cluster_payload.zip/ansible_collections/amazon/aws/plugins/module_utils/cloud.py", line 69, in _retry_func return func() ^^^^^^ File "/home/.../.local/lib/python3.11/site-packages/botocore/client.py", line 530, in _api_call return self._make_api_call(operation_name, kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/.../.local/lib/python3.11/site-packages/botocore/client.py", line 919, in _make_api_call request_dict = self._convert_to_request_dict( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/.../.local/lib/python3.11/site-packages/botocore/client.py", line 990, in _convert_to_request_dict request_dict = self._serializer.serialize_to_request( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/.../.local/lib/python3.11/site-packages/botocore/validate.py", line 381, in serialize_to_request raise ParamValidationError(report=report.generate_report()) botocore.exceptions.ParamValidationError: Parameter validation failed: Invalid type for parameter ClientAuthentication.Sasl.Scram, value: True, type: <class 'bool'>, valid types: <class 'dict'> failed: [localhost] (item={'name': '...-msk-dev-2', 'configuration': '...-msk-dev-conf', 'version': '2.8.1', 'nodes': 3, 'ebs_volume_gb': 256, 'enhanced_monitoring': 'PER_TOPIC_PER_BROKER', 'instance_type': 'kafka.t3.small', 'open_monitoring': {'jmx_exporter': False, 'node_exporter': True}, 'subnets': ['subnet-...', 'subnet-...', 'subnet-...'], 'security_groups': ['sg-...'], 'tags': {'Payer': '...'}}) => { "ansible_loop_var": "item", "boto3_version": "1.26.72", "botocore_version": "1.29.72", "changed": false, "invocation": { "module_args": { "access_key": null, "authentication": { "sasl_scram": true, "tls_ca_arn": null }, ``` I can create the cluster w/o `authentication` but it creates an `Unauthenticated` cluster. ### Issue Type Bug Report ### Component Name msk_cluster ### Ansible Version ansible [core 2.14.3] config file = .../infrastructure/ansible.cfg configured module search path = ['/home/.../.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python3.11/site-packages/ansible ansible collection location = /home/.../.ansible/collections:/usr/share/ansible/collections executable location = /usr/bin/ansible python version = 3.11.2 (main, Feb 8 2023, 00:00:00) [GCC 12.2.1 20221121 (Red Hat 12.2.1-4)] (/usr/bin/python3) jinja version = 3.0.3 libyaml = True ``` ### Collection Versions ``` $ ansible-galaxy collection list [DEPRECATION WARNING]: DEFAULT_GATHER_SUBSET option, the module_defaults keyword is a more generic version and can apply to all calls to the M(ansible.builtin.gather_facts) or M(ansible.builtin.setup) actions, use module_defaults instead. This feature will be removed from ansible-core in version 2.18. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg. # /home/.../.ansible/collections/ansible_collections Collection Version ----------------- ------- amazon.aws 5.4.0 ansible.posix 1.5.1 community.aws 5.4.0 community.general 6.5.0 community.mongodb 1.5.1 community.mysql 3.6.0 # /usr/lib/python3.11/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 5.2.0 ansible.netcommon 4.1.0 ansible.posix 1.5.1 ansible.utils 2.9.0 ansible.windows 1.13.0 arista.eos 6.0.0 awx.awx 21.12.0 azure.azcollection 1.14.0 check_point.mgmt 4.0.0 chocolatey.chocolatey 1.4.0 cisco.aci 2.4.0 cisco.asa 4.0.0 cisco.dnac 6.6.3 cisco.intersight 1.0.23 cisco.ios 4.3.1 cisco.iosxr 4.1.0 cisco.ise 2.5.12 cisco.meraki 2.15.1 cisco.mso 2.2.1 cisco.nso 1.0.3 cisco.nxos 4.1.0 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.4 community.aws 5.2.0 community.azure 2.0.0 community.ciscosmb 1.0.5 community.crypto 2.11.0 community.digitalocean 1.23.0 community.dns 2.5.1 community.docker 3.4.2 community.fortios 1.0.0 community.general 6.4.0 community.google 1.0.0 community.grafana 1.5.4 community.hashi_vault 4.1.0 community.hrobot 1.7.0 community.libvirt 1.2.0 community.mongodb 1.5.1 community.mysql 3.6.0 community.network 5.0.0 community.okd 2.3.0 community.postgresql 2.3.2 community.proxysql 1.5.1 community.rabbitmq 1.2.3 community.routeros 2.7.0 community.sap 1.0.0 community.sap_libs 1.4.0 community.skydive 1.0.0 community.sops 1.6.1 community.vmware 3.4.0 community.windows 1.12.0 community.zabbix 1.9.2 containers.podman 1.10.1 cyberark.conjur 1.2.0 cyberark.pas 1.0.17 dellemc.enterprise_sonic 2.0.0 dellemc.openmanage 6.3.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 dellemc.powerflex 1.5.0 dellemc.unity 1.5.0 f5networks.f5_modules 1.22.1 fortinet.fortimanager 2.1.7 fortinet.fortios 2.2.2 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.1.2 grafana.grafana 1.1.1 hetzner.hcloud 1.10.0 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.11.0 infinidat.infinibox 1.3.12 infoblox.nios_modules 1.4.1 inspur.ispim 1.3.0 inspur.sm 2.3.0 junipernetworks.junos 4.1.0 kubernetes.core 2.4.0 lowlydba.sqlserver 1.3.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.22.0 netapp.elementsw 21.7.0 netapp.ontap 22.3.0 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.4.0 netbox.netbox 3.11.0 ngine_io.cloudstack 2.3.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.3 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.4.1 purestorage.flasharray 1.17.0 purestorage.flashblade 1.10.0 purestorage.fusion 1.3.0 sensu.sensu_go 1.13.2 splunk.es 2.1.0 t_systems_mms.icinga_director 1.32.0 theforeman.foreman 3.9.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.7.0 vyos.vyos 4.0.0 wti.remote 1.0.4 ``` ### AWS SDK versions ``` $ pip show boto boto3 botocore Name: boto Version: 2.49.0 Summary: Amazon Web Services Library Home-page: https://github.com/boto/boto/ Author: Mitch Garnaat Author-email: [email protected] License: MIT Location: /usr/local/lib/python3.11/site-packages Requires: Required-by: --- Name: boto3 Version: 1.26.72 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /home/.../.local/lib/python3.11/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.29.72 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /home/.../.local/lib/python3.11/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: awscli, boto3, s3transfer ``` ### Configuration ```$ ansible-config dump --only-changed [DEPRECATION WARNING]: DEFAULT_GATHER_SUBSET option, the module_defaults keyword is a more generic version and can apply to all calls to the M(ansible.builtin.gather_facts) or M(ansible.builtin.setup) actions, use module_defaults instead. This feature will be removed from ansible-core in version 2.18. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg. CACHE_PLUGIN(/home/.../infrastructure/ansible.cfg) = jsonfile CACHE_PLUGIN_CONNECTION(/home/.../infrastructure/ansible.cfg) = ~/.ansible/cache CACHE_PLUGIN_TIMEOUT(/home/.../infrastructure/ansible.cfg) = 3600 CALLBACKS_ENABLED(/home/.../infrastructure/ansible.cfg) = ['timer', 'profile_tasks', 'profile_roles'] CONFIG_FILE() = /home/.../infrastructure/ansible.cfg DEFAULT_ASK_PASS(/home/.../infrastructure/ansible.cfg) = False DEFAULT_EXECUTABLE(/home/.../infrastructure/ansible.cfg) = /bin/bash DEFAULT_FORCE_HANDLERS(/home/.../infrastructure/ansible.cfg) = True DEFAULT_FORKS(/home/.../infrastructure/ansible.cfg) = 15 DEFAULT_GATHERING(/home/.../infrastructure/ansible.cfg) = smart DEFAULT_GATHER_SUBSET(/home/.../infrastructure/ansible.cfg) = ['all'] DEFAULT_HOST_LIST(/home/.../infrastructure/ansible.cfg) = ['/home/.../infrastructure/envs'] DEFAULT_LOG_PATH(/home/.../infrastructure/ansible.cfg) = /home/.../.ansible/ansible.log DEFAULT_MANAGED_STR(/home/.../infrastructure/ansible.cfg) = Ansible managed! DON'T CHANGE THIS FILE BY HAND! You were warned! DEFAULT_ROLES_PATH(/home/.../infrastructure/ansible.cfg) = ['/home/.../infrastructure/roles'] DEFAULT_TIMEOUT(/home/.../infrastructure/ansible.cfg) = 30 DEPRECATION_WARNINGS(/home/.../infrastructure/ansible.cfg) = True HOST_KEY_CHECKING(/home/.../infrastructure/ansible.cfg) = False INVENTORY_ENABLED(/home/.../infrastructure/ansible.cfg) = ['yaml', 'aws_ec2', 'ini'] RETRY_FILES_ENABLED(/home/.../infrastructure/ansible.cfg) = False SHOW_CUSTOM_STATS(/home/.../infrastructure/ansible.cfg) = True ``` ### OS / Environment Fedora release 37 (Thirty Seven) ### Steps to Reproduce ``` - name: Create MSK cluster community.aws.msk_cluster: name: "{{ item.name }}" .................................................................... authentication: sasl_scram: true ``` ### Expected Results Create a MSK cluster w/ SASL/SCRAM authentication. ### Actual Results ``` "msg": "Failed to create kafka cluster: Parameter validation failed:\nInvalid type for parameter ClientAuthentication.Sasl.Scram, value: True, type: <class 'bool'>, valid types: <class 'dict'>" ``` As I'm working on this I can jump in at any time to further debug this. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
@gabriel-preda-adswizz thanks for your bug report. https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/msk_cluster.py#L383-L392 the generated object is wrong. ref https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kafka/client/create_cluster.html ```diff diff --git a/plugins/modules/msk_cluster.py b/plugins/modules/msk_cluster.py index 65c9edea..1b045c05 100644 --- a/plugins/modules/msk_cluster.py +++ b/plugins/modules/msk_cluster.py @@ -384,11 +384,13 @@ def prepare_create_options(module): c_params["ClientAuthentication"] = {} if module.params["authentication"].get("sasl_scram"): c_params["ClientAuthentication"]["Sasl"] = { - "Scram": module.params["authentication"]["sasl_scram"] + "Scram": { + "Enable": module.params["authentication"]["sasl_scram"] } if module.params["authentication"].get("tls_ca_arn"): c_params["ClientAuthentication"]["Tls"] = { - "CertificateAuthorityArnList": module.params["authentication"]["tls_ca_arn"] + "CertificateAuthorityArnList": module.params["authentication"]["tls_ca_arn"], + 'Enabled': True } c_params.update(prepare_enhanced_monitoring_options(module)) ``` this might work. @gabriel-preda-adswizz do you have some time for work on a fix to contribute or can you test if this patch fixes the issue for you? Thanx @markuman. The patch is working for me. However aside from the above I found this in output: ``` "invocation": { "module_args": { "access_key": null, "authentication": { "sasl_scram": true, "tls_ca_arn": null } ``` I didn't set anything about `tls_ca_arn`, I don't understand why I have that line in there. I only did: ``` authentication: sasl_scram: true ``` Thanx for the fast turnout (I can work w/ the patched version for some time). > I didn't set anything about `tls_ca_arn`, I don't understand why I have that line in there. That's because the module treated `tls_ca_arn` as `default(false)` if not provided. See: https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/msk_cluster.py#L715 So this is also a documentation bug, because the default value is missing there. * https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/msk_cluster.py#L118-L121 * https://docs.ansible.com/ansible/devel//collections/community/aws/msk_cluster_module.html#parameter-authentication/tls_ca_arn Ah not. It's wrong. There is no default. Basically the empty key must/can be popped out. So your patch is ok for me. What next? It's your contribution :1st_place_medal: @gabriel-preda-adswizz If you have time for it, feel free to make a pull request with * the patch/fix * a changelog fragment * and expand the integration test that covers the bug Hi @markuman, In preparing the pull request I've extended the fix for IAM authentication and also the posibility to disable unauthenticated clients that were not previously covered. Now I'm puzzled about the tests. There are a myriad of combinations :) and I'm still thinking in how to reorganize or only add those. > Now I'm puzzled about the tests. > There are a myriad of combinations :) and I'm still thinking in how to reorganize or only add those. Mostly it helps to create a new task file that covers just that case/bug. Example: https://github.com/ansible-collections/community.mysql/pull/503/files New task file for the scenario `revoke_only_grant.yml` is included in the `main.yml` of the integration test.
2023-04-19T11:42:46
ansible-collections/community.aws
1,781
ansible-collections__community.aws-1781
[ "1771" ]
2fe39baaa98a3f488303e473c611a65244fc4034
diff --git a/plugins/modules/eks_nodegroup.py b/plugins/modules/eks_nodegroup.py --- a/plugins/modules/eks_nodegroup.py +++ b/plugins/modules/eks_nodegroup.py @@ -515,7 +515,11 @@ def create_or_update_nodegroups(client, module): if module.params['release_version'] is not None: params['releaseVersion'] = module.params['release_version'] if module.params['remote_access'] is not None: - params['remoteAccess'] = module.params['remote_access'] + params['remoteAccess'] = dict() + if module.params['remote_access']['ec2_ssh_key'] is not None: + params['remoteAccess']['ec2SshKey'] = module.params['remote_access']['ec2_ssh_key'] + if module.params['remote_access']['source_sg'] is not None: + params['remoteAccess']['sourceSecurityGroups'] = module.params['remote_access']['source_sg'] if module.params['capacity_type'] is not None: params['capacityType'] = module.params['capacity_type'].upper() if module.params['labels'] is not None:
diff --git a/tests/integration/targets/eks_nodegroup/tasks/cleanup.yml b/tests/integration/targets/eks_nodegroup/tasks/cleanup.yml --- a/tests/integration/targets/eks_nodegroup/tasks/cleanup.yml +++ b/tests/integration/targets/eks_nodegroup/tasks/cleanup.yml @@ -37,6 +37,19 @@ with_items: '{{ eks_security_groups|reverse|list + additional_eks_sg }}' ignore_errors: 'yes' +- name: Delete securitygroup for node access + amazon.aws.ec2_security_group: + name: 'ansible-test-eks_nodegroup' + description: "SSH access" + vpc_id: '{{ setup_vpc.vpc.id }}' + rules: [] + state: absent + +- name: Delete Keypair for Access to Nodegroup nodes + amazon.aws.ec2_key: + name: "ansible-test-eks_nodegroup" + state: absent + - name: remove Route Tables ec2_vpc_route_table: state: absent diff --git a/tests/integration/targets/eks_nodegroup/tasks/dependecies.yml b/tests/integration/targets/eks_nodegroup/tasks/dependecies.yml --- a/tests/integration/targets/eks_nodegroup/tasks/dependecies.yml +++ b/tests/integration/targets/eks_nodegroup/tasks/dependecies.yml @@ -106,3 +106,20 @@ default_version: 1 instance_type: t3.micro register: lt + +- name: Create securitygroup for node access + amazon.aws.ec2_security_group: + name: 'ansible-test-eks_nodegroup' + description: "SSH access" + vpc_id: '{{ setup_vpc.vpc.id }}' + rules: + - proto: tcp + ports: + - 22 + cidr_ip: 0.0.0.0/0 + register: securitygroup_eks_nodegroup + +- name: Create Keypair for Access to Nodegroup nodes + amazon.aws.ec2_key: + name: "ansible-test-eks_nodegroup" + register: ec2_key_eks_nodegroup diff --git a/tests/integration/targets/eks_nodegroup/tasks/full_test.yml b/tests/integration/targets/eks_nodegroup/tasks/full_test.yml --- a/tests/integration/targets/eks_nodegroup/tasks/full_test.yml +++ b/tests/integration/targets/eks_nodegroup/tasks/full_test.yml @@ -80,6 +80,10 @@ capacity_type: 'SPOT' tags: 'foo': 'bar' + remote_access: + ec2_ssh_key: "{{ ec2_key_eks_nodegroup.key.name }}" + source_sg: + - "{{ securitygroup_eks_nodegroup.group_id }}" wait: True register: eks_nodegroup_result check_mode: True @@ -114,6 +118,10 @@ capacity_type: 'SPOT' tags: 'foo': 'bar' + remote_access: + ec2_ssh_key: "{{ ec2_key_eks_nodegroup.key.name }}" + source_sg: + - "{{ securitygroup_eks_nodegroup.group_id }}" wait: True register: eks_nodegroup_result @@ -147,6 +155,10 @@ capacity_type: 'SPOT' tags: 'foo': 'bar' + remote_access: + ec2_ssh_key: "{{ ec2_key_eks_nodegroup.key.name }}" + source_sg: + - "{{ securitygroup_eks_nodegroup.group_id }}" wait: True register: eks_nodegroup_result check_mode: True @@ -181,6 +193,10 @@ capacity_type: 'SPOT' tags: 'foo': 'bar' + remote_access: + ec2_ssh_key: "{{ ec2_key_eks_nodegroup.key.name }}" + source_sg: + - "{{ securitygroup_eks_nodegroup.group_id }}" wait: True register: eks_nodegroup_result check_mode: True @@ -255,6 +271,10 @@ capacity_type: 'SPOT' tags: 'foo': 'bar' + remote_access: + ec2_ssh_key: "{{ ec2_key_eks_nodegroup.key.name }}" + source_sg: + - "{{ securitygroup_eks_nodegroup.group_id }}" wait: True register: eks_nodegroup_result check_mode: True @@ -289,6 +309,10 @@ capacity_type: 'SPOT' tags: 'foo': 'bar' + remote_access: + ec2_ssh_key: "{{ ec2_key_eks_nodegroup.key.name }}" + source_sg: + - "{{ securitygroup_eks_nodegroup.group_id }}" wait: True register: eks_nodegroup_result @@ -322,6 +346,10 @@ capacity_type: 'SPOT' tags: 'foo': 'bar' + remote_access: + ec2_ssh_key: "{{ ec2_key_eks_nodegroup.key.name }}" + source_sg: + - "{{ securitygroup_eks_nodegroup.group_id }}" wait: True register: eks_nodegroup_result check_mode: True @@ -356,6 +384,10 @@ capacity_type: 'SPOT' tags: 'foo': 'bar' + remote_access: + ec2_ssh_key: "{{ ec2_key_eks_nodegroup.key.name }}" + source_sg: + - "{{ securitygroup_eks_nodegroup.group_id }}" wait: True register: eks_nodegroup_result
eks_nodegroup module remote_access config not working as documented ### Summary When I try to create a nodegroup with remote_access config as documented, I got following error: `Couldn't create Nodegroup devcluster-eu-central-1a.: Parameter validation failed: Unknown parameter in remoteAccess: "ec2_ssh_key", must be one of: ec2SshKey, sourceSecurityGroups Unknown parameter in remoteAccess: "source_sg", must be one of: ec2SshKey, sourceSecurityGroups ` However when I use the suggested camelcase, I get an validation error: `msg: 'Unsupported parameters for (community.aws.eks_nodegroup) module: remote_access.ec2SshKey, remote_access.sourceSecurityGroups. Supported parameters include: ec2_ssh_key, source_sg.' ` Think there needs to be a translation from unterscore ansible definition to camelcase from boto in this line, similar to "update_config": https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/eks_nodegroup.py#L518 I will try to debug further and may provide a PR if successful. ### Issue Type Bug Report ### Component Name eks_nodegroup ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.14.3] config file = None configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.10/dist-packages/ansible ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.10.6 (main, Mar 10 2023, 10:55:28) [GCC 11.3.0] (/usr/bin/python3) jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list community.aws 5.2.0 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore Name: boto3 Version: 1.26.95 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.10/dist-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.29.95 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.10/dist-packages Requires: jmespath, python-dateutil, urllib3 Required-by: boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed CONFIG_FILE() = None ``` ### OS / Environment ```console (paste below) $ cat /etc/lsb-release DISTRIB_ID=Ubuntu DISTRIB_RELEASE=22.04 DISTRIB_CODENAME=jammy DISTRIB_DESCRIPTION="Ubuntu 22.04.2 LTS" ``` ### Steps to Reproduce Using the module quiet normal ```yaml (paste below) - name: Create nodegroup for internal usage community.aws.eks_nodegroup: name: "{{ eks_cluster_name }}-{{ item.subnet.availability_zone }}" cluster_name: "{{ eks_cluster_name }}" node_role: "{{ iam_role_eks_nodegroup.arn }}" subnets: - "{{ item.subnet.id }}" scaling_config: "{{ eks_nodegroup.scaling_config }}" update_config: max_unavailable: 1 disk_size: "{{ eks_nodegroup.disk_size }}" instance_types: "{{ eks_nodegroup.instance_types }}" ami_type: "{{ eks_nodegroup.ami_type }}" labels: env: "internal" capacity_type: "{{ eks_nodegroup.capacity_type }}" remote_access: ec2_ssh_key: "{{ k8s_admin_keypair.key.id }}" source_sg: - "{{ k8s_ssh_securitygroup.group_id }}" wait: true ``` ### Expected Results No error from boto or the module itself and injected ssh key on all workers ### Actual Results ```console (paste below) Couldn't create Nodegroup devcluster-eu-central-1a.: Parameter validation failed: Unknown parameter in remoteAccess: "ec2_ssh_key", must be one of: ec2SshKey, sourceSecurityGroups Unknown parameter in remoteAccess: "source_sg", must be one of: ec2SshKey, sourceSecurityGroups ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
2023-04-20T07:27:48
ansible-collections/community.aws
1,788
ansible-collections__community.aws-1788
[ "1774", "1774" ]
0714131c8c86f77cf4c62a3c80d37d34d40530b7
diff --git a/plugins/modules/s3_lifecycle.py b/plugins/modules/s3_lifecycle.py --- a/plugins/modules/s3_lifecycle.py +++ b/plugins/modules/s3_lifecycle.py @@ -331,7 +331,7 @@ def build_rule(client, module): if transition.get("transition_date"): t_out["Date"] = transition["transition_date"] elif transition.get("transition_days") is not None: - t_out["Days"] = transition["transition_days"] + t_out["Days"] = int(transition["transition_days"]) if transition.get("storage_class"): t_out["StorageClass"] = transition["storage_class"].upper() rule["Transitions"].append(t_out) @@ -498,7 +498,7 @@ def create_lifecycle_rule(client, module): aws_retry=True, Bucket=name, LifecycleConfiguration=lifecycle_configuration ) except is_boto3_error_message("At least one action needs to be specified in a rule"): - # Amazon interpretted this as not changing anything + # Amazon interpreted this as not changing anything changed = False except ( botocore.exceptions.ClientError,
diff --git a/tests/integration/targets/s3_lifecycle/tasks/main.yml b/tests/integration/targets/s3_lifecycle/tasks/main.yml --- a/tests/integration/targets/s3_lifecycle/tasks/main.yml +++ b/tests/integration/targets/s3_lifecycle/tasks/main.yml @@ -9,7 +9,7 @@ security_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' s3_lifecycle: - wait: yes + wait: true block: # ============================================================ @@ -33,7 +33,7 @@ prefix: "{{ item }}" status: enabled state: present - wait: yes + wait: true register: output loop: - rule_1 @@ -51,7 +51,7 @@ prefix: "{{ item }}" status: enabled state: absent - wait: yes + wait: true register: output loop: - rule_1 @@ -700,6 +700,6 @@ s3_bucket: name: "{{item}}" state: absent - ignore_errors: yes + ignore_errors: true with_items: - '{{ bucket_name }}'
S3 Lifecycle module is missing a cast to integer for transitions list ### Summary Hi, I have noticed that when using the `community.aws.s3_lifecycle` module, AWS S3 complains that values passed for the `transition_days` of a life cycle transition are of string type instead of integer: ``` "msg": "Parameter validation failed:\nInvalid type for parameter LifecycleConfiguration.Rules[0].Transitions[0].Days, value: 5, type: <class 'str'>, valid types: <class 'int'>", ``` This is what I have for my transition rule: ```yaml - name: Manage S3 bucket lifecycle rules in AWS community.aws.s3_lifecycle: name: "<BUCKET NAME>" state: present status: enabled expiration_days: "{{ s3_bucket.lifecycle.expiration_days | int }}" transitions: - storage_class: "{{ s3_bucket.lifecycle.storage_class }}" transition_days: "{{ s3_bucket.lifecycle.transition_days }}" ``` With `expiration_days` having a value of 30 and `transition_days` having a value of 15. Looking at the code, I _suspect_ that passing the type instructs Ansible to perform an internal conversion: ```python def main(): s3_storage_class = ['glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive'] argument_spec = dict( ... expiration_days=dict(type='int'), <--- type here in int ``` The conversion isn't performed for parameters in transtions, because this is a list of dicts: ```python transitions=dict(type='list', elements='dict'), ``` When the rule is being built via the [`build_rule` function](https://github.com/ansible-collections/community.aws/blob/3c3698d79ce5f56cefd24efe98cc49e6330f4c29/plugins/modules/s3_lifecycle.py#L273), the value is retrieved "as is", which can be a string if the value has been templated by Jinja2. I suggest to force the cast to integer via the following change. This way, irrespective of the type being "rendered" (either native int or str from Jinja2 templating), we always send the correct type: ```python transition_days = int(module.params.get("transition_days")) ``` I could be missing something, but in my test, forcing the conversion solved the issue. ### Issue Type Bug Report ### Component Name s3_lifecycle ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.12.10] config file = None configured module search path = ['/home/rmahroua/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.9/site-packages/ansible ansible collection location = /home/rmahroua/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.9.13 (main, May 18 2022, 00:00:00) [GCC 11.3.1 20220421 (Red Hat 11.3.1-2)] jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /home/rmahroua/.ansible/collections/ansible_collections Collection Version --------------------- ------- amazon.aws 5.1.0 awx.awx 21.7.0 community.aws 5.2.0 community.general 6.0.1 community.hashi_vault 4.0.0 google.cloud 1.0.2 # /usr/local/lib/python3.9/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 2.3.0 ansible.netcommon 2.6.1 ansible.posix 1.4.0 ansible.utils 2.6.1 ansible.windows 1.10.0 arista.eos 3.1.0 awx.awx 19.4.0 azure.azcollection 1.13.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.2.0 cisco.aci 2.2.0 cisco.asa 2.1.0 cisco.dnac 6.5.0 cisco.intersight 1.0.19 cisco.ios 2.8.1 cisco.iosxr 2.9.0 cisco.ise 1.2.1 cisco.meraki 2.8.0 cisco.mso 1.4.0 cisco.nso 1.0.3 cisco.nxos 2.9.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.2 community.aws 2.6.1 community.azure 1.1.0 community.ciscosmb 1.0.5 community.crypto 2.3.4 community.digitalocean 1.20.0 community.dns 2.2.0 community.docker 2.6.0 community.fortios 1.0.0 community.general 4.8.3 community.google 1.0.0 community.grafana 1.5.0 community.hashi_vault 2.5.0 community.hrobot 1.4.0 community.kubernetes 2.0.1 community.kubevirt 1.0.0 community.libvirt 1.1.0 community.mongodb 1.4.1 community.mysql 2.3.8 community.network 3.3.0 community.okd 2.2.0 community.postgresql 1.7.4 community.proxysql 1.4.0 community.rabbitmq 1.2.1 community.routeros 2.1.0 community.sap 1.0.0 community.sap_libs 1.1.0 community.skydive 1.0.0 community.sops 1.2.2 community.vmware 1.18.2 community.windows 1.10.0 community.zabbix 1.7.0 containers.podman 1.9.3 cyberark.conjur 1.1.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 1.1.1 dellemc.openmanage 4.4.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.17.0 fortinet.fortimanager 2.1.5 fortinet.fortios 2.1.6 frr.frr 1.0.4 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.6.0 hpe.nimble 1.1.4 ibm.qradar 1.0.3 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.2.2 inspur.sm 1.3.0 junipernetworks.junos 2.10.0 kubernetes.core 2.3.2 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.18.0 netapp.elementsw 21.7.0 netapp.ontap 21.20.0 netapp.storagegrid 21.10.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.0 netbox.netbox 3.7.1 ngine_io.cloudstack 2.2.4 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.8.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 1.6.6 purestorage.flasharray 1.13.0 purestorage.flashblade 1.9.0 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 1.0.2 t_systems_mms.icinga_director 1.30.0 theforeman.foreman 2.2.0 vmware.vmware_rest 2.2.0 vyos.vyos 2.8.0 wti.remote 1.0.4 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore Name: boto Version: 2.49.0 Summary: Amazon Web Services Library Home-page: https://github.com/boto/boto/ Author: Mitch Garnaat Author-email: [email protected] License: MIT Location: /home/rmahroua/.local/lib/python3.9/site-packages Requires: Required-by: aws-automation --- Name: boto3 Version: 1.24.89 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.9/site-packages Requires: botocore, jmespath, s3transfer Required-by: aws-automation, aws-shell --- Name: botocore Version: 1.27.89 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.9/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: aws-automation, awscli, boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment Fedora release 34 (Thirty Four) ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) object_expire_days: 30 object_storage_class: "glacier" object_transition_days: 15 ### s3_bucket: encryption: "AES256" force: false lifecycle: storage_class: "{{ object_storage_class | default('intelligent_tiering', true) }}" transition_days: "{{ object_transition_days | default(30) }}" expiration_days: "{{ object_expire_days | default(90) }}" name: "test-bucket" ### - name: Manage S3 bucket lifecycle rules in AWS community.aws.s3_lifecycle: name: "{{ s3_bucket.name }}" state: present status: enabled expiration_days: "{{ s3_bucket.lifecycle.expiration_days | int }}" transitions: - storage_class: "{{ s3_bucket.lifecycle.storage_class }}" transition_days: "{{ s3_bucket.lifecycle.transition_days | int }}" ``` ### Expected Results I expect the module to treat my input value as integer instead of string ### Actual Results ```console (paste below) "msg": "Parameter validation failed:\nInvalid type for parameter LifecycleConfiguration.Rules[0].Transitions[0].Days, value: 5, type: <class 'str'>, valid types: <class 'int'>", ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct S3 Lifecycle module is missing a cast to integer for transitions list ### Summary Hi, I have noticed that when using the `community.aws.s3_lifecycle` module, AWS S3 complains that values passed for the `transition_days` of a life cycle transition are of string type instead of integer: ``` "msg": "Parameter validation failed:\nInvalid type for parameter LifecycleConfiguration.Rules[0].Transitions[0].Days, value: 5, type: <class 'str'>, valid types: <class 'int'>", ``` This is what I have for my transition rule: ```yaml - name: Manage S3 bucket lifecycle rules in AWS community.aws.s3_lifecycle: name: "<BUCKET NAME>" state: present status: enabled expiration_days: "{{ s3_bucket.lifecycle.expiration_days | int }}" transitions: - storage_class: "{{ s3_bucket.lifecycle.storage_class }}" transition_days: "{{ s3_bucket.lifecycle.transition_days }}" ``` With `expiration_days` having a value of 30 and `transition_days` having a value of 15. Looking at the code, I _suspect_ that passing the type instructs Ansible to perform an internal conversion: ```python def main(): s3_storage_class = ['glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive'] argument_spec = dict( ... expiration_days=dict(type='int'), <--- type here in int ``` The conversion isn't performed for parameters in transtions, because this is a list of dicts: ```python transitions=dict(type='list', elements='dict'), ``` When the rule is being built via the [`build_rule` function](https://github.com/ansible-collections/community.aws/blob/3c3698d79ce5f56cefd24efe98cc49e6330f4c29/plugins/modules/s3_lifecycle.py#L273), the value is retrieved "as is", which can be a string if the value has been templated by Jinja2. I suggest to force the cast to integer via the following change. This way, irrespective of the type being "rendered" (either native int or str from Jinja2 templating), we always send the correct type: ```python transition_days = int(module.params.get("transition_days")) ``` I could be missing something, but in my test, forcing the conversion solved the issue. ### Issue Type Bug Report ### Component Name s3_lifecycle ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.12.10] config file = None configured module search path = ['/home/rmahroua/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.9/site-packages/ansible ansible collection location = /home/rmahroua/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.9.13 (main, May 18 2022, 00:00:00) [GCC 11.3.1 20220421 (Red Hat 11.3.1-2)] jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /home/rmahroua/.ansible/collections/ansible_collections Collection Version --------------------- ------- amazon.aws 5.1.0 awx.awx 21.7.0 community.aws 5.2.0 community.general 6.0.1 community.hashi_vault 4.0.0 google.cloud 1.0.2 # /usr/local/lib/python3.9/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 2.3.0 ansible.netcommon 2.6.1 ansible.posix 1.4.0 ansible.utils 2.6.1 ansible.windows 1.10.0 arista.eos 3.1.0 awx.awx 19.4.0 azure.azcollection 1.13.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.2.0 cisco.aci 2.2.0 cisco.asa 2.1.0 cisco.dnac 6.5.0 cisco.intersight 1.0.19 cisco.ios 2.8.1 cisco.iosxr 2.9.0 cisco.ise 1.2.1 cisco.meraki 2.8.0 cisco.mso 1.4.0 cisco.nso 1.0.3 cisco.nxos 2.9.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.2 community.aws 2.6.1 community.azure 1.1.0 community.ciscosmb 1.0.5 community.crypto 2.3.4 community.digitalocean 1.20.0 community.dns 2.2.0 community.docker 2.6.0 community.fortios 1.0.0 community.general 4.8.3 community.google 1.0.0 community.grafana 1.5.0 community.hashi_vault 2.5.0 community.hrobot 1.4.0 community.kubernetes 2.0.1 community.kubevirt 1.0.0 community.libvirt 1.1.0 community.mongodb 1.4.1 community.mysql 2.3.8 community.network 3.3.0 community.okd 2.2.0 community.postgresql 1.7.4 community.proxysql 1.4.0 community.rabbitmq 1.2.1 community.routeros 2.1.0 community.sap 1.0.0 community.sap_libs 1.1.0 community.skydive 1.0.0 community.sops 1.2.2 community.vmware 1.18.2 community.windows 1.10.0 community.zabbix 1.7.0 containers.podman 1.9.3 cyberark.conjur 1.1.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 1.1.1 dellemc.openmanage 4.4.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.17.0 fortinet.fortimanager 2.1.5 fortinet.fortios 2.1.6 frr.frr 1.0.4 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.6.0 hpe.nimble 1.1.4 ibm.qradar 1.0.3 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.2.2 inspur.sm 1.3.0 junipernetworks.junos 2.10.0 kubernetes.core 2.3.2 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.18.0 netapp.elementsw 21.7.0 netapp.ontap 21.20.0 netapp.storagegrid 21.10.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.0 netbox.netbox 3.7.1 ngine_io.cloudstack 2.2.4 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.8.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 1.6.6 purestorage.flasharray 1.13.0 purestorage.flashblade 1.9.0 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 1.0.2 t_systems_mms.icinga_director 1.30.0 theforeman.foreman 2.2.0 vmware.vmware_rest 2.2.0 vyos.vyos 2.8.0 wti.remote 1.0.4 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore Name: boto Version: 2.49.0 Summary: Amazon Web Services Library Home-page: https://github.com/boto/boto/ Author: Mitch Garnaat Author-email: [email protected] License: MIT Location: /home/rmahroua/.local/lib/python3.9/site-packages Requires: Required-by: aws-automation --- Name: boto3 Version: 1.24.89 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.9/site-packages Requires: botocore, jmespath, s3transfer Required-by: aws-automation, aws-shell --- Name: botocore Version: 1.27.89 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.9/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: aws-automation, awscli, boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment Fedora release 34 (Thirty Four) ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) object_expire_days: 30 object_storage_class: "glacier" object_transition_days: 15 ### s3_bucket: encryption: "AES256" force: false lifecycle: storage_class: "{{ object_storage_class | default('intelligent_tiering', true) }}" transition_days: "{{ object_transition_days | default(30) }}" expiration_days: "{{ object_expire_days | default(90) }}" name: "test-bucket" ### - name: Manage S3 bucket lifecycle rules in AWS community.aws.s3_lifecycle: name: "{{ s3_bucket.name }}" state: present status: enabled expiration_days: "{{ s3_bucket.lifecycle.expiration_days | int }}" transitions: - storage_class: "{{ s3_bucket.lifecycle.storage_class }}" transition_days: "{{ s3_bucket.lifecycle.transition_days | int }}" ``` ### Expected Results I expect the module to treat my input value as integer instead of string ### Actual Results ```console (paste below) "msg": "Parameter validation failed:\nInvalid type for parameter LifecycleConfiguration.Rules[0].Transitions[0].Days, value: 5, type: <class 'str'>, valid types: <class 'int'>", ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
@Razique thanks for reporting and digging into the code. Are you willing to provide a PR that fixes the issue? * bugfix * changelog fragment * integration test, that covers this case Yup! I might need to update my workstation, haven't pushed changes in a while. I'll try to get to that soon. @Razique thanks for reporting and digging into the code. Are you willing to provide a PR that fixes the issue? * bugfix * changelog fragment * integration test, that covers this case Yup! I might need to update my workstation, haven't pushed changes in a while. I'll try to get to that soon.
2023-04-24T20:11:59
ansible-collections/community.aws
1,790
ansible-collections__community.aws-1790
[ "1608", "1608" ]
f43f5b8d9212aa00bf8d15eb8dadcd7adcf963d8
diff --git a/plugins/modules/cloudformation_stack_set.py b/plugins/modules/cloudformation_stack_set.py --- a/plugins/modules/cloudformation_stack_set.py +++ b/plugins/modules/cloudformation_stack_set.py @@ -182,9 +182,11 @@ description: Test stack in two accounts state: present template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template - accounts: [1234567890, 2345678901] + accounts: + - 123456789012 + - 234567890123 regions: - - us-east-1 + - us-east-1 - name: on subsequent calls, templates are optional but parameters and tags can be altered community.aws.cloudformation_stack_set: @@ -195,9 +197,11 @@ tags: foo: bar test: stack - accounts: [1234567890, 2345678901] + accounts: + - 123456789012 + - 234567890123 regions: - - us-east-1 + - us-east-1 - name: The same type of update, but wait for the update to complete in all stacks community.aws.cloudformation_stack_set: @@ -209,7 +213,26 @@ tags: foo: bar test: stack - accounts: [1234567890, 2345678901] + accounts: + - 123456789012 + - 234567890123 + regions: + - us-east-1 + +- name: Register new accounts (create new stack instances) with an existing stack set. + community.aws.cloudformation_stack_set: + name: my-stack + state: present + wait: true + parameters: + InstanceName: my_restacked_instance + tags: + foo: bar + test: stack + accounts: + - 123456789012 + - 234567890123 + - 345678901234 regions: - us-east-1 """ @@ -655,6 +678,14 @@ def main(): stack_params["OperationPreferences"] = get_operation_preferences(module) changed |= update_stack_set(module, stack_params, cfn) + await_stack_set_operation( + module, + cfn, + operation_id=stack_params["OperationId"], + stack_set_name=stack_params["StackSetName"], + max_wait=module.params.get("wait_timeout"), + ) + # now create/update any appropriate stack instances new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances( cfn,
diff --git a/tests/integration/targets/cloudformation_stack_set/tasks/main.yml b/tests/integration/targets/cloudformation_stack_set/tasks/main.yml --- a/tests/integration/targets/cloudformation_stack_set/tasks/main.yml +++ b/tests/integration/targets/cloudformation_stack_set/tasks/main.yml @@ -14,9 +14,9 @@ aws_secret_key: "{{ secondary_aws_secret_key }}" security_token: "{{ secondary_security_token }}" region: "{{ aws_region }}" - no_log: yes + no_log: true -- name: cloudformation_stack_set tests +- name: cloudformation_stack_set tests collections: - amazon.aws
aws_cloudformation_stackset: adding new accounts doesn't create new stack instances ### Summary Hi, I have noticed that the `cloudformation_stack_set` module doesn't seem to support adding new accounts. The stack set is initially provisioned with a list of accounts, however, on subsequent runs that have more accounts, the module doesn't add these new accounts. ### Issue Type Bug Report ### Component Name `cloudformation_stack_set` ### Ansible Version ```console (paste below) ansible 2.10.17 config file = None configured module search path = ['/home/rmahroua/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.9/site-packages/ansible executable location = /usr/local/bin/ansible python version = 3.9.13 (main, May 18 2022, 00:00:00) [GCC 11.3.1 20220421 (Red Hat 11.3.1-2)] ``` ### Collection Versions ```console (paste below) Collection Version ------------- ------- amazon.aws 5.0.2 awx.awx 21.7.0 community.aws 5.0.0 # /usr/local/lib/python3.9/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 1.5.0 ``` ### AWS SDK versions ```console (paste below) Name: boto Version: 2.49.0 Summary: Amazon Web Services Library Home-page: https://github.com/boto/boto/ Author: Mitch Garnaat Author-email: [email protected] License: MIT Location: /home/rmahroua/.local/lib/python3.9/site-packages Requires: Required-by: aws-automation --- Name: boto3 Version: 1.24.89 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.9/site-packages Requires: botocore, jmespath, s3transfer Required-by: aws-automation, aws-shell --- Name: botocore Version: 1.27.89 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.9/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: aws-automation, awscli, boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment Fedora release 34 ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) # First run to deploy the stack set in two accounts - name: Deploy AWS Config StackSet community.aws.cloudformation_stack_set: name: "My StackSet" description: "descripion" state: present template: "{{ stackset.config.template | default(omit) }}" accounts: [1234, 5678] region: us-east-1 regions: us-east-1 # Second run with an extra account - name: Deploy AWS Config StackSet community.aws.cloudformation_stack_set: name: "My StackSet" description: "descripion" state: present template: "{{ stackset.config.template | default(omit) }}" accounts: [1234, 5678, 910112] region: us-east-1 regions: us-east-1 ``` ### Expected Results When adding an account to the `accounts` parameter, the module should deploy a new stack instance for that account. ### Actual Results Currently, the new account isn't being added. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct aws_cloudformation_stackset: adding new accounts doesn't create new stack instances ### Summary Hi, I have noticed that the `cloudformation_stack_set` module doesn't seem to support adding new accounts. The stack set is initially provisioned with a list of accounts, however, on subsequent runs that have more accounts, the module doesn't add these new accounts. ### Issue Type Bug Report ### Component Name `cloudformation_stack_set` ### Ansible Version ```console (paste below) ansible 2.10.17 config file = None configured module search path = ['/home/rmahroua/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.9/site-packages/ansible executable location = /usr/local/bin/ansible python version = 3.9.13 (main, May 18 2022, 00:00:00) [GCC 11.3.1 20220421 (Red Hat 11.3.1-2)] ``` ### Collection Versions ```console (paste below) Collection Version ------------- ------- amazon.aws 5.0.2 awx.awx 21.7.0 community.aws 5.0.0 # /usr/local/lib/python3.9/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 1.5.0 ``` ### AWS SDK versions ```console (paste below) Name: boto Version: 2.49.0 Summary: Amazon Web Services Library Home-page: https://github.com/boto/boto/ Author: Mitch Garnaat Author-email: [email protected] License: MIT Location: /home/rmahroua/.local/lib/python3.9/site-packages Requires: Required-by: aws-automation --- Name: boto3 Version: 1.24.89 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.9/site-packages Requires: botocore, jmespath, s3transfer Required-by: aws-automation, aws-shell --- Name: botocore Version: 1.27.89 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.9/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: aws-automation, awscli, boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment Fedora release 34 ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) # First run to deploy the stack set in two accounts - name: Deploy AWS Config StackSet community.aws.cloudformation_stack_set: name: "My StackSet" description: "descripion" state: present template: "{{ stackset.config.template | default(omit) }}" accounts: [1234, 5678] region: us-east-1 regions: us-east-1 # Second run with an extra account - name: Deploy AWS Config StackSet community.aws.cloudformation_stack_set: name: "My StackSet" description: "descripion" state: present template: "{{ stackset.config.template | default(omit) }}" accounts: [1234, 5678, 910112] region: us-east-1 regions: us-east-1 ``` ### Expected Results When adding an account to the `accounts` parameter, the module should deploy a new stack instance for that account. ### Actual Results Currently, the new account isn't being added. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/cloudformation_stack_set.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/cloudformation_stack_set.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @markuman @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> Based on my understanding of the API, the [`update_stack_instances`](https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/cloudformation_stack_set.py#L662) call does not deploy new stack instances, it only updates parameters against existing stack instances. To support real idempotency in this case, supplementary calls to the [`create_stack_instances`](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudformation.html#CloudFormation.Client.create_stack_instances) is necessary. The method takes in as parameter the name of an existing stack set to provision a new instance in new accounts. ``` python response = client.create_stack_instances( StackSetName='string', Accounts=[ 'string', ], DeploymentTargets={ 'Accounts': [ 'string', ], 'AccountsUrl': 'string', 'OrganizationalUnitIds': [ 'string', ], 'AccountFilterType': 'NONE'|'INTERSECTION'|'DIFFERENCE'|'UNION' }, Regions=[ 'string', ], ParameterOverrides=[ { 'ParameterKey': 'string', 'ParameterValue': 'string', 'UsePreviousValue': True|False, 'ResolvedValue': 'string' }, ], OperationPreferences={ 'RegionConcurrencyType': 'SEQUENTIAL'|'PARALLEL', 'RegionOrder': [ 'string', ], 'FailureToleranceCount': 123, 'FailureTolerancePercentage': 123, 'MaxConcurrentCount': 123, 'MaxConcurrentPercentage': 123 }, OperationId='string', CallAs='SELF'|'DELEGATED_ADMIN' ) ``` EDIT: happy to work on implementing this functionality if we wish to support adding new accounts -- if the goal of the module is to remain close to the original CloudFormation feature that consists in updating existing accounts only, then a new Ansible module is required since the current collection does not have any module for provisioning new stack instances for an existing stackset. @Razique Thank you for reporting this. Feel free to open a PR to support this functionality. Files identified in the description: * [`plugins/modules/cloudformation_stack_set.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/cloudformation_stack_set.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @markuman @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> Based on my understanding of the API, the [`update_stack_instances`](https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/cloudformation_stack_set.py#L662) call does not deploy new stack instances, it only updates parameters against existing stack instances. To support real idempotency in this case, supplementary calls to the [`create_stack_instances`](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudformation.html#CloudFormation.Client.create_stack_instances) is necessary. The method takes in as parameter the name of an existing stack set to provision a new instance in new accounts. ``` python response = client.create_stack_instances( StackSetName='string', Accounts=[ 'string', ], DeploymentTargets={ 'Accounts': [ 'string', ], 'AccountsUrl': 'string', 'OrganizationalUnitIds': [ 'string', ], 'AccountFilterType': 'NONE'|'INTERSECTION'|'DIFFERENCE'|'UNION' }, Regions=[ 'string', ], ParameterOverrides=[ { 'ParameterKey': 'string', 'ParameterValue': 'string', 'UsePreviousValue': True|False, 'ResolvedValue': 'string' }, ], OperationPreferences={ 'RegionConcurrencyType': 'SEQUENTIAL'|'PARALLEL', 'RegionOrder': [ 'string', ], 'FailureToleranceCount': 123, 'FailureTolerancePercentage': 123, 'MaxConcurrentCount': 123, 'MaxConcurrentPercentage': 123 }, OperationId='string', CallAs='SELF'|'DELEGATED_ADMIN' ) ``` EDIT: happy to work on implementing this functionality if we wish to support adding new accounts -- if the goal of the module is to remain close to the original CloudFormation feature that consists in updating existing accounts only, then a new Ansible module is required since the current collection does not have any module for provisioning new stack instances for an existing stackset. @Razique Thank you for reporting this. Feel free to open a PR to support this functionality.
2023-04-24T23:01:21
ansible-collections/community.aws
1,806
ansible-collections__community.aws-1806
[ "1608" ]
37eb2bda30ed1f8c18748c1f4e7cc5988b9d2daf
diff --git a/plugins/modules/cloudformation_stack_set.py b/plugins/modules/cloudformation_stack_set.py --- a/plugins/modules/cloudformation_stack_set.py +++ b/plugins/modules/cloudformation_stack_set.py @@ -183,9 +183,11 @@ description: Test stack in two accounts state: present template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template - accounts: [1234567890, 2345678901] + accounts: + - 123456789012 + - 234567890123 regions: - - us-east-1 + - us-east-1 - name: on subsequent calls, templates are optional but parameters and tags can be altered community.aws.cloudformation_stack_set: @@ -196,9 +198,11 @@ tags: foo: bar test: stack - accounts: [1234567890, 2345678901] + accounts: + - 123456789012 + - 234567890123 regions: - - us-east-1 + - us-east-1 - name: The same type of update, but wait for the update to complete in all stacks community.aws.cloudformation_stack_set: @@ -210,7 +214,26 @@ tags: foo: bar test: stack - accounts: [1234567890, 2345678901] + accounts: + - 123456789012 + - 234567890123 + regions: + - us-east-1 + +- name: Register new accounts (create new stack instances) with an existing stack set. + community.aws.cloudformation_stack_set: + name: my-stack + state: present + wait: true + parameters: + InstanceName: my_restacked_instance + tags: + foo: bar + test: stack + accounts: + - 123456789012 + - 234567890123 + - 345678901234 regions: - us-east-1 ''' @@ -640,6 +663,14 @@ def main(): stack_params['OperationPreferences'] = get_operation_preferences(module) changed |= update_stack_set(module, stack_params, cfn) + await_stack_set_operation( + module, + cfn, + operation_id=stack_params["OperationId"], + stack_set_name=stack_params["StackSetName"], + max_wait=module.params.get("wait_timeout"), + ) + # now create/update any appropriate stack instances new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances( cfn,
diff --git a/tests/integration/targets/cloudformation_stack_set/tasks/main.yml b/tests/integration/targets/cloudformation_stack_set/tasks/main.yml --- a/tests/integration/targets/cloudformation_stack_set/tasks/main.yml +++ b/tests/integration/targets/cloudformation_stack_set/tasks/main.yml @@ -14,9 +14,9 @@ aws_secret_key: "{{ secondary_aws_secret_key }}" security_token: "{{ secondary_security_token }}" region: "{{ aws_region }}" - no_log: yes + no_log: true -- name: cloudformation_stack_set tests +- name: cloudformation_stack_set tests collections: - amazon.aws
aws_cloudformation_stackset: adding new accounts doesn't create new stack instances ### Summary Hi, I have noticed that the `cloudformation_stack_set` module doesn't seem to support adding new accounts. The stack set is initially provisioned with a list of accounts, however, on subsequent runs that have more accounts, the module doesn't add these new accounts. ### Issue Type Bug Report ### Component Name `cloudformation_stack_set` ### Ansible Version ```console (paste below) ansible 2.10.17 config file = None configured module search path = ['/home/rmahroua/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.9/site-packages/ansible executable location = /usr/local/bin/ansible python version = 3.9.13 (main, May 18 2022, 00:00:00) [GCC 11.3.1 20220421 (Red Hat 11.3.1-2)] ``` ### Collection Versions ```console (paste below) Collection Version ------------- ------- amazon.aws 5.0.2 awx.awx 21.7.0 community.aws 5.0.0 # /usr/local/lib/python3.9/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 1.5.0 ``` ### AWS SDK versions ```console (paste below) Name: boto Version: 2.49.0 Summary: Amazon Web Services Library Home-page: https://github.com/boto/boto/ Author: Mitch Garnaat Author-email: [email protected] License: MIT Location: /home/rmahroua/.local/lib/python3.9/site-packages Requires: Required-by: aws-automation --- Name: boto3 Version: 1.24.89 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.9/site-packages Requires: botocore, jmespath, s3transfer Required-by: aws-automation, aws-shell --- Name: botocore Version: 1.27.89 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.9/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: aws-automation, awscli, boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment Fedora release 34 ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) # First run to deploy the stack set in two accounts - name: Deploy AWS Config StackSet community.aws.cloudformation_stack_set: name: "My StackSet" description: "descripion" state: present template: "{{ stackset.config.template | default(omit) }}" accounts: [1234, 5678] region: us-east-1 regions: us-east-1 # Second run with an extra account - name: Deploy AWS Config StackSet community.aws.cloudformation_stack_set: name: "My StackSet" description: "descripion" state: present template: "{{ stackset.config.template | default(omit) }}" accounts: [1234, 5678, 910112] region: us-east-1 regions: us-east-1 ``` ### Expected Results When adding an account to the `accounts` parameter, the module should deploy a new stack instance for that account. ### Actual Results Currently, the new account isn't being added. ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/cloudformation_stack_set.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/cloudformation_stack_set.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @markuman @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> Based on my understanding of the API, the [`update_stack_instances`](https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/cloudformation_stack_set.py#L662) call does not deploy new stack instances, it only updates parameters against existing stack instances. To support real idempotency in this case, supplementary calls to the [`create_stack_instances`](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudformation.html#CloudFormation.Client.create_stack_instances) is necessary. The method takes in as parameter the name of an existing stack set to provision a new instance in new accounts. ``` python response = client.create_stack_instances( StackSetName='string', Accounts=[ 'string', ], DeploymentTargets={ 'Accounts': [ 'string', ], 'AccountsUrl': 'string', 'OrganizationalUnitIds': [ 'string', ], 'AccountFilterType': 'NONE'|'INTERSECTION'|'DIFFERENCE'|'UNION' }, Regions=[ 'string', ], ParameterOverrides=[ { 'ParameterKey': 'string', 'ParameterValue': 'string', 'UsePreviousValue': True|False, 'ResolvedValue': 'string' }, ], OperationPreferences={ 'RegionConcurrencyType': 'SEQUENTIAL'|'PARALLEL', 'RegionOrder': [ 'string', ], 'FailureToleranceCount': 123, 'FailureTolerancePercentage': 123, 'MaxConcurrentCount': 123, 'MaxConcurrentPercentage': 123 }, OperationId='string', CallAs='SELF'|'DELEGATED_ADMIN' ) ``` EDIT: happy to work on implementing this functionality if we wish to support adding new accounts -- if the goal of the module is to remain close to the original CloudFormation feature that consists in updating existing accounts only, then a new Ansible module is required since the current collection does not have any module for provisioning new stack instances for an existing stackset. @Razique Thank you for reporting this. Feel free to open a PR to support this functionality.
2023-05-05T08:22:30
ansible-collections/community.aws
1,807
ansible-collections__community.aws-1807
[ "1774" ]
37eb2bda30ed1f8c18748c1f4e7cc5988b9d2daf
diff --git a/plugins/modules/s3_lifecycle.py b/plugins/modules/s3_lifecycle.py --- a/plugins/modules/s3_lifecycle.py +++ b/plugins/modules/s3_lifecycle.py @@ -321,13 +321,13 @@ def build_rule(client, module): rule['Transitions'] = [] for transition in transitions: t_out = dict() - if transition.get('transition_date'): - t_out['Date'] = transition['transition_date'] - elif transition.get('transition_days') is not None: - t_out['Days'] = transition['transition_days'] - if transition.get('storage_class'): - t_out['StorageClass'] = transition['storage_class'].upper() - rule['Transitions'].append(t_out) + if transition.get("transition_date"): + t_out["Date"] = transition["transition_date"] + elif transition.get("transition_days") is not None: + t_out["Days"] = int(transition["transition_days"]) + if transition.get("storage_class"): + t_out["StorageClass"] = transition["storage_class"].upper() + rule["Transitions"].append(t_out) if noncurrent_version_transition_days is not None: rule['NoncurrentVersionTransitions'] = [dict(NoncurrentDays=noncurrent_version_transition_days, @@ -485,9 +485,10 @@ def create_lifecycle_rule(client, module): client.put_bucket_lifecycle_configuration( aws_retry=True, Bucket=name, - LifecycleConfiguration=lifecycle_configuration) - except is_boto3_error_message('At least one action needs to be specified in a rule'): - # Amazon interpretted this as not changing anything + LifecycleConfiguration=lifecycle_configuration, + ) + except is_boto3_error_message("At least one action needs to be specified in a rule"): + # Amazon interpreted this as not changing anything changed = False except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules)
diff --git a/tests/integration/targets/s3_lifecycle/tasks/main.yml b/tests/integration/targets/s3_lifecycle/tasks/main.yml --- a/tests/integration/targets/s3_lifecycle/tasks/main.yml +++ b/tests/integration/targets/s3_lifecycle/tasks/main.yml @@ -9,7 +9,7 @@ security_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' s3_lifecycle: - wait: yes + wait: true block: # ============================================================ @@ -33,7 +33,7 @@ prefix: "{{ item }}" status: enabled state: present - wait: yes + wait: true register: output loop: - rule_1 @@ -51,7 +51,7 @@ prefix: "{{ item }}" status: enabled state: absent - wait: yes + wait: true register: output loop: - rule_1 @@ -704,6 +704,6 @@ s3_bucket: name: "{{item}}" state: absent - ignore_errors: yes + ignore_errors: true with_items: - '{{ bucket_name }}'
S3 Lifecycle module is missing a cast to integer for transitions list ### Summary Hi, I have noticed that when using the `community.aws.s3_lifecycle` module, AWS S3 complains that values passed for the `transition_days` of a life cycle transition are of string type instead of integer: ``` "msg": "Parameter validation failed:\nInvalid type for parameter LifecycleConfiguration.Rules[0].Transitions[0].Days, value: 5, type: <class 'str'>, valid types: <class 'int'>", ``` This is what I have for my transition rule: ```yaml - name: Manage S3 bucket lifecycle rules in AWS community.aws.s3_lifecycle: name: "<BUCKET NAME>" state: present status: enabled expiration_days: "{{ s3_bucket.lifecycle.expiration_days | int }}" transitions: - storage_class: "{{ s3_bucket.lifecycle.storage_class }}" transition_days: "{{ s3_bucket.lifecycle.transition_days }}" ``` With `expiration_days` having a value of 30 and `transition_days` having a value of 15. Looking at the code, I _suspect_ that passing the type instructs Ansible to perform an internal conversion: ```python def main(): s3_storage_class = ['glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive'] argument_spec = dict( ... expiration_days=dict(type='int'), <--- type here in int ``` The conversion isn't performed for parameters in transtions, because this is a list of dicts: ```python transitions=dict(type='list', elements='dict'), ``` When the rule is being built via the [`build_rule` function](https://github.com/ansible-collections/community.aws/blob/3c3698d79ce5f56cefd24efe98cc49e6330f4c29/plugins/modules/s3_lifecycle.py#L273), the value is retrieved "as is", which can be a string if the value has been templated by Jinja2. I suggest to force the cast to integer via the following change. This way, irrespective of the type being "rendered" (either native int or str from Jinja2 templating), we always send the correct type: ```python transition_days = int(module.params.get("transition_days")) ``` I could be missing something, but in my test, forcing the conversion solved the issue. ### Issue Type Bug Report ### Component Name s3_lifecycle ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.12.10] config file = None configured module search path = ['/home/rmahroua/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.9/site-packages/ansible ansible collection location = /home/rmahroua/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.9.13 (main, May 18 2022, 00:00:00) [GCC 11.3.1 20220421 (Red Hat 11.3.1-2)] jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /home/rmahroua/.ansible/collections/ansible_collections Collection Version --------------------- ------- amazon.aws 5.1.0 awx.awx 21.7.0 community.aws 5.2.0 community.general 6.0.1 community.hashi_vault 4.0.0 google.cloud 1.0.2 # /usr/local/lib/python3.9/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 2.3.0 ansible.netcommon 2.6.1 ansible.posix 1.4.0 ansible.utils 2.6.1 ansible.windows 1.10.0 arista.eos 3.1.0 awx.awx 19.4.0 azure.azcollection 1.13.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.2.0 cisco.aci 2.2.0 cisco.asa 2.1.0 cisco.dnac 6.5.0 cisco.intersight 1.0.19 cisco.ios 2.8.1 cisco.iosxr 2.9.0 cisco.ise 1.2.1 cisco.meraki 2.8.0 cisco.mso 1.4.0 cisco.nso 1.0.3 cisco.nxos 2.9.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.2 community.aws 2.6.1 community.azure 1.1.0 community.ciscosmb 1.0.5 community.crypto 2.3.4 community.digitalocean 1.20.0 community.dns 2.2.0 community.docker 2.6.0 community.fortios 1.0.0 community.general 4.8.3 community.google 1.0.0 community.grafana 1.5.0 community.hashi_vault 2.5.0 community.hrobot 1.4.0 community.kubernetes 2.0.1 community.kubevirt 1.0.0 community.libvirt 1.1.0 community.mongodb 1.4.1 community.mysql 2.3.8 community.network 3.3.0 community.okd 2.2.0 community.postgresql 1.7.4 community.proxysql 1.4.0 community.rabbitmq 1.2.1 community.routeros 2.1.0 community.sap 1.0.0 community.sap_libs 1.1.0 community.skydive 1.0.0 community.sops 1.2.2 community.vmware 1.18.2 community.windows 1.10.0 community.zabbix 1.7.0 containers.podman 1.9.3 cyberark.conjur 1.1.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 1.1.1 dellemc.openmanage 4.4.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.17.0 fortinet.fortimanager 2.1.5 fortinet.fortios 2.1.6 frr.frr 1.0.4 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.6.0 hpe.nimble 1.1.4 ibm.qradar 1.0.3 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.2.2 inspur.sm 1.3.0 junipernetworks.junos 2.10.0 kubernetes.core 2.3.2 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.18.0 netapp.elementsw 21.7.0 netapp.ontap 21.20.0 netapp.storagegrid 21.10.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.0 netbox.netbox 3.7.1 ngine_io.cloudstack 2.2.4 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.8.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 1.6.6 purestorage.flasharray 1.13.0 purestorage.flashblade 1.9.0 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 1.0.2 t_systems_mms.icinga_director 1.30.0 theforeman.foreman 2.2.0 vmware.vmware_rest 2.2.0 vyos.vyos 2.8.0 wti.remote 1.0.4 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore Name: boto Version: 2.49.0 Summary: Amazon Web Services Library Home-page: https://github.com/boto/boto/ Author: Mitch Garnaat Author-email: [email protected] License: MIT Location: /home/rmahroua/.local/lib/python3.9/site-packages Requires: Required-by: aws-automation --- Name: boto3 Version: 1.24.89 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.9/site-packages Requires: botocore, jmespath, s3transfer Required-by: aws-automation, aws-shell --- Name: botocore Version: 1.27.89 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.9/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: aws-automation, awscli, boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment Fedora release 34 (Thirty Four) ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) object_expire_days: 30 object_storage_class: "glacier" object_transition_days: 15 ### s3_bucket: encryption: "AES256" force: false lifecycle: storage_class: "{{ object_storage_class | default('intelligent_tiering', true) }}" transition_days: "{{ object_transition_days | default(30) }}" expiration_days: "{{ object_expire_days | default(90) }}" name: "test-bucket" ### - name: Manage S3 bucket lifecycle rules in AWS community.aws.s3_lifecycle: name: "{{ s3_bucket.name }}" state: present status: enabled expiration_days: "{{ s3_bucket.lifecycle.expiration_days | int }}" transitions: - storage_class: "{{ s3_bucket.lifecycle.storage_class }}" transition_days: "{{ s3_bucket.lifecycle.transition_days | int }}" ``` ### Expected Results I expect the module to treat my input value as integer instead of string ### Actual Results ```console (paste below) "msg": "Parameter validation failed:\nInvalid type for parameter LifecycleConfiguration.Rules[0].Transitions[0].Days, value: 5, type: <class 'str'>, valid types: <class 'int'>", ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
@Razique thanks for reporting and digging into the code. Are you willing to provide a PR that fixes the issue? * bugfix * changelog fragment * integration test, that covers this case Yup! I might need to update my workstation, haven't pushed changes in a while. I'll try to get to that soon.
2023-05-05T08:28:09
ansible-collections/community.aws
1,821
ansible-collections__community.aws-1821
[ "1819" ]
dbba718f2a216399c103d76bff4c168b5a805118
diff --git a/plugins/modules/cloudfront_distribution.py b/plugins/modules/cloudfront_distribution.py --- a/plugins/modules/cloudfront_distribution.py +++ b/plugins/modules/cloudfront_distribution.py @@ -1417,6 +1417,7 @@ from collections import OrderedDict import datetime +import re try: import botocore @@ -1676,7 +1677,7 @@ def __init__(self, module): "http2and3", ] ) - self.__s3_bucket_domain_identifier = ".s3.amazonaws.com" + self.__s3_bucket_domain_regex = re.compile(r"\.s3(?:\.[^.]+)?\.amazonaws\.com$") def add_missing_key(self, dict_object, key_to_set, value_to_set): if key_to_set not in dict_object and value_to_set is not None: @@ -1818,7 +1819,7 @@ def validate_origin(self, client, existing_config, origin, default_origin_path): ) else: origin_shield_region = origin_shield_region.lower() - if self.__s3_bucket_domain_identifier in origin.get("domain_name").lower(): + if self.__s3_bucket_domain_regex.search(origin.get("domain_name").lower()): if origin.get("s3_origin_access_identity_enabled") is not None: if origin["s3_origin_access_identity_enabled"]: s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin) @@ -1834,10 +1835,10 @@ def validate_origin(self, client, existing_config, origin, default_origin_path): origin["s3_origin_config"] = dict(origin_access_identity=oai) - if "custom_origin_config" in origin: - self.module.fail_json( - msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive" - ) + if "custom_origin_config" in origin: + self.module.fail_json( + msg="s3 origin domains and custom_origin_config are mutually exclusive" + ) else: origin = self.add_missing_key( origin, "custom_origin_config", existing_config.get("custom_origin_config", {})
diff --git a/tests/integration/targets/cloudfront_distribution/tasks/main.yml b/tests/integration/targets/cloudfront_distribution/tasks/main.yml --- a/tests/integration/targets/cloudfront_distribution/tasks/main.yml +++ b/tests/integration/targets/cloudfront_distribution/tasks/main.yml @@ -568,6 +568,43 @@ ignore_errors: true - name: check that custom origin with origin access identity fails + # "s3 origin domains and custom_origin_config are mutually exclusive" + assert: + that: + - update_origin_to_s3_with_origin_access_and_with_custom_origin_config.failed + + - name: check that custom_origin_config can't be used with an region-agnostic S3 domain + cloudfront_distribution: + distribution_id: "{{ distribution_id }}" + origins: + - domain_name: "{{ resource_prefix }}-bucket.s3.{{ aws_region }}.amazonaws.com" + id: "{{ resource_prefix }}3.example.com" + custom_origin_config: + http_port: 8080 + state: present + register: update_origin_to_s3_with_origin_access_and_with_custom_origin_config + ignore_errors: true + + - name: check that custom origin with region-agnostic S3 domain fails + # "s3 origin domains and custom_origin_config are mutually exclusive" + assert: + that: + - update_origin_to_s3_with_origin_access_and_with_custom_origin_config.failed + + - name: check that custom_origin_config can't be used with an region-aware S3 domain + cloudfront_distribution: + distribution_id: "{{ distribution_id }}" + origins: + - domain_name: "{{ resource_prefix }}-bucket.s3.amazonaws.com" + id: "{{ resource_prefix }}3.example.com" + custom_origin_config: + http_port: 8080 + state: present + register: update_origin_to_s3_with_origin_access_and_with_custom_origin_config + ignore_errors: true + + - name: check that custom origin with region-aware S3 domain fails + # "s3 origin domains and custom_origin_config are mutually exclusive" assert: that: - update_origin_to_s3_with_origin_access_and_with_custom_origin_config.failed
cloudfront_distribution doesn't recognise S3 origin ### Summary When I refer to an S3 bucket domain in the form `{bucket_name}.s3.{region}.amazonaws.com`, as per [Origin Domain spec](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesDomainName), it isn't recognised as an S3 domain [here](https://github.com/ansible-collections/community.aws/blob/e80bf933412ea5c7ab2a94af945170cb2ebd900f/plugins/modules/cloudfront_distribution.py#L1821), so a `custom_origin_config` entry is added automatically [here](https://github.com/ansible-collections/community.aws/blob/e80bf933412ea5c7ab2a94af945170cb2ebd900f/plugins/modules/cloudfront_distribution.py#L1842), which results in an error: "botocore.errorfactory.InvalidOrigin: An error occurred (InvalidOrigin) when calling the CreateDistribution operation: You must specify either a CustomOrigin or an S3Origin. You cannot specify both." The problem is in the method used for recognising S3 domains: whether it contains `.s3.amazonaws.com` or not (note the missing region part). ### Issue Type Bug Report ### Component Name cloudfront_distribution ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.14.5] config file = /etc/ansible/ansible.cfg configured module search path = ['/home/fules/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python3/dist-packages/ansible ansible collection location = /home/fules/.ansible/collections:/usr/share/ansible/collections executable location = /usr/bin/ansible python version = 3.10.6 (main, Mar 10 2023, 10:55:28) [GCC 11.3.0] (/usr/bin/python3) jinja version = 3.0.3 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /home/fules/.ansible/collections/ansible_collections Collection Version ------------- ------- amazon.aws 6.0.1 community.aws 6.0.0 # /usr/lib/python3/dist-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 5.4.0 ansible.netcommon 4.1.0 ansible.posix 1.5.2 ansible.utils 2.9.0 ansible.windows 1.13.0 arista.eos 6.0.1 awx.awx 21.14.0 azure.azcollection 1.15.0 check_point.mgmt 4.0.0 chocolatey.chocolatey 1.4.0 cisco.aci 2.6.0 cisco.asa 4.0.0 cisco.dnac 6.7.1 cisco.intersight 1.0.27 cisco.ios 4.5.0 cisco.iosxr 4.1.0 cisco.ise 2.5.12 cisco.meraki 2.15.1 cisco.mso 2.4.0 cisco.nso 1.0.3 cisco.nxos 4.3.0 cisco.ucs 1.8.0 cloud.common 2.1.3 cloudscale_ch.cloud 2.2.4 community.aws 5.4.0 community.azure 2.0.0 community.ciscosmb 1.0.5 community.crypto 2.12.0 community.digitalocean 1.23.0 community.dns 2.5.3 community.docker 3.4.3 community.fortios 1.0.0 community.general 6.6.0 community.google 1.0.0 community.grafana 1.5.4 community.hashi_vault 4.2.0 community.hrobot 1.8.0 community.libvirt 1.2.0 community.mongodb 1.5.2 community.mysql 3.6.0 community.network 5.0.0 community.okd 2.3.0 community.postgresql 2.3.2 community.proxysql 1.5.1 community.rabbitmq 1.2.3 community.routeros 2.8.0 community.sap 1.0.0 community.sap_libs 1.4.1 community.skydive 1.0.0 community.sops 1.6.1 community.vmware 3.5.0 community.windows 1.12.0 community.zabbix 1.9.3 containers.podman 1.10.1 cyberark.conjur 1.2.0 cyberark.pas 1.0.17 dellemc.enterprise_sonic 2.0.0 dellemc.openmanage 6.3.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 dellemc.powerflex 1.6.0 dellemc.unity 1.6.0 f5networks.f5_modules 1.23.0 fortinet.fortimanager 2.1.7 fortinet.fortios 2.2.3 frr.frr 2.0.2 gluster.gluster 1.0.2 google.cloud 1.1.3 grafana.grafana 1.1.1 hetzner.hcloud 1.11.0 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.11.0 infinidat.infinibox 1.3.12 infoblox.nios_modules 1.4.1 inspur.ispim 1.3.0 inspur.sm 2.3.0 junipernetworks.junos 4.1.0 kubernetes.core 2.4.0 lowlydba.sqlserver 1.3.1 mellanox.onyx 1.0.0 microsoft.ad 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.22.0 netapp.elementsw 21.7.0 netapp.ontap 22.5.0 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.4.0 netbox.netbox 3.12.0 ngine_io.cloudstack 2.3.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.3 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.4.1 purestorage.flasharray 1.17.2 purestorage.flashblade 1.11.0 purestorage.fusion 1.4.2 sensu.sensu_go 1.13.2 splunk.es 2.1.0 t_systems_mms.icinga_director 1.32.2 theforeman.foreman 3.10.0 vmware.vmware_rest 2.3.1 vultr.cloud 1.7.0 vyos.vyos 4.0.2 wti.remote 1.0.4 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore WARNING: Package(s) not found: boto Name: boto3 Version: 1.26.137 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.10/dist-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.29.137 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.10/dist-packages Requires: jmespath, python-dateutil, urllib3 Required-by: boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed CONFIG_FILE() = /etc/ansible/ansible.cfg ``` ### OS / Environment $ lsb_release -a No LSB modules are available. Distributor ID: Ubuntu Description: Ubuntu 22.04.1 LTS Release: 22.04 Codename: jammy ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) - hosts: localhost collections: - community.aws - amazon.aws vars_files: - external_vars.yaml tasks: - name: Reading landing page domain from certificate acm_certificate_info: profile: "{{ aws_profile }}" region: "us-east-1" certificate_arn: "{{ landing_page_certificate_arn }}" register: landing_page_certificates - set_fact: landing_page_domain: "{{ landing_page_certificates.certificates[0].domain_name }}" - name: Create the S3 bucket s3_bucket: profile: "{{ aws_profile }}" state: present name: "{{ landing_page_domain }}" object_ownership: "BucketOwnerEnforced" encryption: "AES256" versioning: false public_access: block_public_acls: false block_public_policy: false ignore_public_acls: false restrict_public_buckets: false register: landing_page_bucket - name: Create CloudFront Access Identity cloudfront_origin_access_identity: state: present caller_reference: "LandingPageAccessIdentity" comment: "no comment" register: landing_page_access_identity - name: Create CloudFront Distribution cloudfront_distribution: profile: "{{ aws_profile }}" state: present http_version: "http2" caller_reference: "LandingPageDistribution" comment: "no comment" alias: "{{ landing_page_domain }}" viewer_certificate: acm_certificate_arn: "{{ landing_page_certificate_arn }}" ssl_support_method: "sni-only" minimum_protocol_version: "TLSv1.2_2021" origins: - id: "{{ landing_page_domain }}" domain_name: "{{ landing_page_bucket.name }}.s3.{{ aws_region }}.amazonaws.com" s3_origin_config: origin_access_identity: "origin-access-identity/cloudfront/{{ landing_page_access_identity.cloud_front_origin_access_identity.id }}" origin_shield: enabled: false # default_origin_domain_name: "{{ landing_page_bucket.name }}" default_root_object: "index.html" price_class: "PriceClass_200" wait: true register: landing_page_distribution ``` ### Expected Results I expected that only the `s3_origin_config` is generated in the origin, and the `custom_origin_config` isn't. ### Actual Results ```console (paste below) botocore.errorfactory.InvalidOrigin: An error occurred (InvalidOrigin) when calling the CreateDistribution operation: You must specify either a CustomOrigin or an S3Origin. You cannot specify both. ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
2023-05-23T18:32:08
ansible-collections/community.aws
1,822
ansible-collections__community.aws-1822
[ "255" ]
e80bf933412ea5c7ab2a94af945170cb2ebd900f
diff --git a/plugins/modules/cloudfront_distribution.py b/plugins/modules/cloudfront_distribution.py --- a/plugins/modules/cloudfront_distribution.py +++ b/plugins/modules/cloudfront_distribution.py @@ -2316,7 +2316,8 @@ def validate_distribution_id_from_alias(self, aliases): def wait_until_processed(self, client, wait_timeout, distribution_id, caller_reference): if distribution_id is None: - distribution_id = self.validate_distribution_from_caller_reference(caller_reference=caller_reference)["Id"] + distribution = self.validate_distribution_from_caller_reference(caller_reference=caller_reference) + distribution_id = distribution["Distribution"]["Id"] try: waiter = client.get_waiter("distribution_deployed")
cloudfront_distribution.py - wait_until_processed appears broken <!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> A cloudfront_distribution which performs its operation and the cloudfront distribution is created successfully. However, when `wait: yes` is enabled the error is detailed below. Digging around the source I've been unable to find anything on my own where the key error could be originating from. This behavior only occurs on creation with `state: present`, not on deletion with `state: absent` ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> cloudfront_distribution ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 13:18 $ ansible --version ansible 2.10.1 config file = /Users/redacted/redacted/ansible-roles/ansible.role-cloudfront-stream-distribution/ansible.cfg configured module search path = ['/Users/redacted/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.7/site-packages/ansible executable location = /usr/local/bin/ansible python version = 3.7.7 (default, Mar 10 2020, 15:43:27) [Clang 10.0.0 (clang-1000.11.45.5)] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below 13:43 $ ansible-config dump --only-changed INTERPRETER_PYTHON(/Users/redacted/redacted/ansible-roles/ansible.role-cloudfront-stream-distribution/ansible.cfg) = auto (END) ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ``` Darwin 17.7.0 Darwin Kernel Version 17.7.0: Wed Apr 24 21:17:24 PDT 2019; root:xnu-4570.71.45~1/RELEASE_X86_64 x86_64 MacOS 10.13.6 ``` ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> I haven't been able to fully deconstruct it yet to a minimal case, I will try to update with one if I can. Right now I can reproduce by using `wait: yes`, and it goes away with `wait: no` <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> The ansible routine to wait until the distribution is made and reports healthy and then exit with my registered variable populated. ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below TASK [/Users/redacted/redacted/ansible-roles/ansible.role-cloudfront-stream-distribution : Create / Manage / Delete distribution (This may take some time)] *** An exception occurred during task execution. To see the full traceback, use -vvv. The error was: KeyError: 'Id' fatal: [localhost]: FAILED! => {"changed": false, "module_stderr": "Traceback (most recent call last):\n File \"/Users/redacted/.ansible/tmp/ansible-tmp-1602015472.329021-59538-203288307626216/AnsiballZ_cloudfront_distribution.py\", line 102, in <module>\n _ansiballz_main()\n File \"/Users/redacted/.ansible/tmp/ansible-tmp-1602015472.329021-59538-203288307626216/AnsiballZ_cloudfront_distribution.py\", line 94, in _ansiballz_main\n invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)\n File \"/Users/redacted/.ansible/tmp/ansible-tmp-1602015472.329021-59538-203288307626216/AnsiballZ_cloudfront_distribution.py\", line 40, in invoke_module\n runpy.run_module(mod_name='ansible_collections.community.aws.plugins.modules.cloudfront_distribution', init_globals=None, run_name='__main__', alter_sys=True)\n File \"/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py\", line 205, in run_module\n return _run_module_code(code, init_globals, run_name, mod_spec)\n File \"/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py\", line 96, in _run_module_code\n mod_name, mod_spec, pkg_name, script_name)\n File \"/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/var/folders/w_/1n5tpdtx3h1f5k3skn3d6d9r0000gn/T/ansible_community.aws.cloudfront_distribution_payload_lk_w9xob/ansible_community.aws.cloudfront_distribution_payload.zip/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py\", line 2252, in <module>\n File \"/var/folders/w_/1n5tpdtx3h1f5k3skn3d6d9r0000gn/T/ansible_community.aws.cloudfront_distribution_payload_lk_w9xob/ansible_community.aws.cloudfront_distribution_payload.zip/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py\", line 2242, in main\n File \"/var/folders/w_/1n5tpdtx3h1f5k3skn3d6d9r0000gn/T/ansible_community.aws.cloudfront_distribution_payload_lk_w9xob/ansible_community.aws.cloudfront_distribution_payload.zip/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py\", line 2072, in wait_until_processed\nKeyError: 'Id'\n", "module_stdout": "", "msg": "MODULE FAILURE\nSee stdout/stderr for the exact error", "rc": 1} ```
Files identified in the description: * [`plugins/modules/cloudfront_distribution.py`](https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/cloudfront_distribution.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @s-hertel @tremble @willthames @wilvk @wimnat [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify --->
2023-05-23T18:42:01
ansible-collections/community.aws
1,835
ansible-collections__community.aws-1835
[ "255" ]
818324aeb0afa4f2ea9c2db0a39c8f16546e036a
diff --git a/plugins/modules/cloudfront_distribution.py b/plugins/modules/cloudfront_distribution.py --- a/plugins/modules/cloudfront_distribution.py +++ b/plugins/modules/cloudfront_distribution.py @@ -2316,7 +2316,8 @@ def validate_distribution_id_from_alias(self, aliases): def wait_until_processed(self, client, wait_timeout, distribution_id, caller_reference): if distribution_id is None: - distribution_id = self.validate_distribution_from_caller_reference(caller_reference=caller_reference)["Id"] + distribution = self.validate_distribution_from_caller_reference(caller_reference=caller_reference) + distribution_id = distribution["Distribution"]["Id"] try: waiter = client.get_waiter("distribution_deployed")
cloudfront_distribution.py - wait_until_processed appears broken <!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> A cloudfront_distribution which performs its operation and the cloudfront distribution is created successfully. However, when `wait: yes` is enabled the error is detailed below. Digging around the source I've been unable to find anything on my own where the key error could be originating from. This behavior only occurs on creation with `state: present`, not on deletion with `state: absent` ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> cloudfront_distribution ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 13:18 $ ansible --version ansible 2.10.1 config file = /Users/redacted/redacted/ansible-roles/ansible.role-cloudfront-stream-distribution/ansible.cfg configured module search path = ['/Users/redacted/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.7/site-packages/ansible executable location = /usr/local/bin/ansible python version = 3.7.7 (default, Mar 10 2020, 15:43:27) [Clang 10.0.0 (clang-1000.11.45.5)] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below 13:43 $ ansible-config dump --only-changed INTERPRETER_PYTHON(/Users/redacted/redacted/ansible-roles/ansible.role-cloudfront-stream-distribution/ansible.cfg) = auto (END) ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ``` Darwin 17.7.0 Darwin Kernel Version 17.7.0: Wed Apr 24 21:17:24 PDT 2019; root:xnu-4570.71.45~1/RELEASE_X86_64 x86_64 MacOS 10.13.6 ``` ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> I haven't been able to fully deconstruct it yet to a minimal case, I will try to update with one if I can. Right now I can reproduce by using `wait: yes`, and it goes away with `wait: no` <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> The ansible routine to wait until the distribution is made and reports healthy and then exit with my registered variable populated. ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below TASK [/Users/redacted/redacted/ansible-roles/ansible.role-cloudfront-stream-distribution : Create / Manage / Delete distribution (This may take some time)] *** An exception occurred during task execution. To see the full traceback, use -vvv. The error was: KeyError: 'Id' fatal: [localhost]: FAILED! => {"changed": false, "module_stderr": "Traceback (most recent call last):\n File \"/Users/redacted/.ansible/tmp/ansible-tmp-1602015472.329021-59538-203288307626216/AnsiballZ_cloudfront_distribution.py\", line 102, in <module>\n _ansiballz_main()\n File \"/Users/redacted/.ansible/tmp/ansible-tmp-1602015472.329021-59538-203288307626216/AnsiballZ_cloudfront_distribution.py\", line 94, in _ansiballz_main\n invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)\n File \"/Users/redacted/.ansible/tmp/ansible-tmp-1602015472.329021-59538-203288307626216/AnsiballZ_cloudfront_distribution.py\", line 40, in invoke_module\n runpy.run_module(mod_name='ansible_collections.community.aws.plugins.modules.cloudfront_distribution', init_globals=None, run_name='__main__', alter_sys=True)\n File \"/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py\", line 205, in run_module\n return _run_module_code(code, init_globals, run_name, mod_spec)\n File \"/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py\", line 96, in _run_module_code\n mod_name, mod_spec, pkg_name, script_name)\n File \"/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/var/folders/w_/1n5tpdtx3h1f5k3skn3d6d9r0000gn/T/ansible_community.aws.cloudfront_distribution_payload_lk_w9xob/ansible_community.aws.cloudfront_distribution_payload.zip/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py\", line 2252, in <module>\n File \"/var/folders/w_/1n5tpdtx3h1f5k3skn3d6d9r0000gn/T/ansible_community.aws.cloudfront_distribution_payload_lk_w9xob/ansible_community.aws.cloudfront_distribution_payload.zip/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py\", line 2242, in main\n File \"/var/folders/w_/1n5tpdtx3h1f5k3skn3d6d9r0000gn/T/ansible_community.aws.cloudfront_distribution_payload_lk_w9xob/ansible_community.aws.cloudfront_distribution_payload.zip/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py\", line 2072, in wait_until_processed\nKeyError: 'Id'\n", "module_stdout": "", "msg": "MODULE FAILURE\nSee stdout/stderr for the exact error", "rc": 1} ```
Files identified in the description: * [`plugins/modules/cloudfront_distribution.py`](https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/cloudfront_distribution.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @s-hertel @tremble @willthames @wilvk @wimnat [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify --->
2023-06-01T10:49:10
ansible-collections/community.aws
1,838
ansible-collections__community.aws-1838
[ "255" ]
48edfac4c4d6358f964fe5707f2ba40d9c5d7669
diff --git a/plugins/modules/cloudfront_distribution.py b/plugins/modules/cloudfront_distribution.py --- a/plugins/modules/cloudfront_distribution.py +++ b/plugins/modules/cloudfront_distribution.py @@ -2089,7 +2089,8 @@ def validate_distribution_id_from_alias(self, aliases): def wait_until_processed(self, client, wait_timeout, distribution_id, caller_reference): if distribution_id is None: - distribution_id = self.validate_distribution_from_caller_reference(caller_reference=caller_reference)['Id'] + distribution = self.validate_distribution_from_caller_reference(caller_reference=caller_reference) + distribution_id = distribution["Distribution"]["Id"] try: waiter = client.get_waiter('distribution_deployed')
cloudfront_distribution.py - wait_until_processed appears broken <!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> A cloudfront_distribution which performs its operation and the cloudfront distribution is created successfully. However, when `wait: yes` is enabled the error is detailed below. Digging around the source I've been unable to find anything on my own where the key error could be originating from. This behavior only occurs on creation with `state: present`, not on deletion with `state: absent` ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> cloudfront_distribution ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 13:18 $ ansible --version ansible 2.10.1 config file = /Users/redacted/redacted/ansible-roles/ansible.role-cloudfront-stream-distribution/ansible.cfg configured module search path = ['/Users/redacted/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.7/site-packages/ansible executable location = /usr/local/bin/ansible python version = 3.7.7 (default, Mar 10 2020, 15:43:27) [Clang 10.0.0 (clang-1000.11.45.5)] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below 13:43 $ ansible-config dump --only-changed INTERPRETER_PYTHON(/Users/redacted/redacted/ansible-roles/ansible.role-cloudfront-stream-distribution/ansible.cfg) = auto (END) ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ``` Darwin 17.7.0 Darwin Kernel Version 17.7.0: Wed Apr 24 21:17:24 PDT 2019; root:xnu-4570.71.45~1/RELEASE_X86_64 x86_64 MacOS 10.13.6 ``` ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> I haven't been able to fully deconstruct it yet to a minimal case, I will try to update with one if I can. Right now I can reproduce by using `wait: yes`, and it goes away with `wait: no` <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> The ansible routine to wait until the distribution is made and reports healthy and then exit with my registered variable populated. ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below TASK [/Users/redacted/redacted/ansible-roles/ansible.role-cloudfront-stream-distribution : Create / Manage / Delete distribution (This may take some time)] *** An exception occurred during task execution. To see the full traceback, use -vvv. The error was: KeyError: 'Id' fatal: [localhost]: FAILED! => {"changed": false, "module_stderr": "Traceback (most recent call last):\n File \"/Users/redacted/.ansible/tmp/ansible-tmp-1602015472.329021-59538-203288307626216/AnsiballZ_cloudfront_distribution.py\", line 102, in <module>\n _ansiballz_main()\n File \"/Users/redacted/.ansible/tmp/ansible-tmp-1602015472.329021-59538-203288307626216/AnsiballZ_cloudfront_distribution.py\", line 94, in _ansiballz_main\n invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)\n File \"/Users/redacted/.ansible/tmp/ansible-tmp-1602015472.329021-59538-203288307626216/AnsiballZ_cloudfront_distribution.py\", line 40, in invoke_module\n runpy.run_module(mod_name='ansible_collections.community.aws.plugins.modules.cloudfront_distribution', init_globals=None, run_name='__main__', alter_sys=True)\n File \"/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py\", line 205, in run_module\n return _run_module_code(code, init_globals, run_name, mod_spec)\n File \"/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py\", line 96, in _run_module_code\n mod_name, mod_spec, pkg_name, script_name)\n File \"/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/var/folders/w_/1n5tpdtx3h1f5k3skn3d6d9r0000gn/T/ansible_community.aws.cloudfront_distribution_payload_lk_w9xob/ansible_community.aws.cloudfront_distribution_payload.zip/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py\", line 2252, in <module>\n File \"/var/folders/w_/1n5tpdtx3h1f5k3skn3d6d9r0000gn/T/ansible_community.aws.cloudfront_distribution_payload_lk_w9xob/ansible_community.aws.cloudfront_distribution_payload.zip/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py\", line 2242, in main\n File \"/var/folders/w_/1n5tpdtx3h1f5k3skn3d6d9r0000gn/T/ansible_community.aws.cloudfront_distribution_payload_lk_w9xob/ansible_community.aws.cloudfront_distribution_payload.zip/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py\", line 2072, in wait_until_processed\nKeyError: 'Id'\n", "module_stdout": "", "msg": "MODULE FAILURE\nSee stdout/stderr for the exact error", "rc": 1} ```
Files identified in the description: * [`plugins/modules/cloudfront_distribution.py`](https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/cloudfront_distribution.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @s-hertel @tremble @willthames @wilvk @wimnat [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify --->
2023-06-01T10:56:29
ansible-collections/community.aws
1,839
ansible-collections__community.aws-1839
[ "1756" ]
3a801c0ac90bad029eb87aeac9c37306760ae6cb
diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -627,7 +627,7 @@ def _prepare_terminal(self): disable_prompt_complete = None end_mark = "".join([random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH)]) disable_prompt_cmd = to_bytes( - "PS1='' ; printf '\\n%s\\n' '" + end_mark + "'\n", + "PS1='' ; bind 'set enable-bracketed-paste off'; printf '\\n%s\\n' '" + end_mark + "'\n", errors="surrogate_or_strict", ) disable_prompt_reply = re.compile(r"\r\r\n" + re.escape(end_mark) + r"\r\r\n", re.MULTILINE)
unexpected output from Python interpreter discovery with aws_ssm connection plugin ### Summary I had good luck with the aws_ssm plugin until attempting to use it against the latest Amazon Linux AMI. Simple commands that work well with a CentOS 7 host fails when trying to run them against AMI. It appears to be something to do with interpreting shell output. A 'raw' command like this works fine: ``` ansible -i inventory.aws_ec2.yaml -m 'raw' -a 'whoami' tag_role_FAKEMX ``` However when running the equivalent 'command' module it fails for Amazon Linux, but works on CentOS 7. ``` ❯ ansible -i inventory.aws_ec2.yaml -m 'command' -a 'whoami' tag_role_FAKEMX -l ec2-13-58-203-89.us-east-2.compute.amazonaws.com [WARNING]: Unhandled error in Python interpreter discovery for host ec2-13-58-203-89.us-east-2.compute.amazonaws.com: unexpected output from Python interpreter discovery ec2-13-58-203-89.us-east-2.compute.amazonaws.com | FAILED | rc=-1 >> failed to transfer file to /Users/username/.ansible/tmp/ansible-local-78807y4966nip/tmpqn_vbdbk /AnsiballZ_command.py: % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0Warning: Failed to open the file 2004h2004l/AnsiballZ_command.py: No such file Warning: or directory 1 129k 1 1531 0 0 19698 0 0:00:06 --:--:-- 0:00:06 19883 curl: (23) Failure writing output to destination ``` Versus CentOS 7: ``` ❯ ansible -i inventory.aws_ec2.yaml -m 'command' -a 'whoami' tag_role_FAKEMX -l ip-10-240-172-59.us-east-2.compute.internal [WARNING]: Platform linux on host ip-10-240-172-59.us-east-2.compute.internal is using the discovered Python interpreter at /usr/libexec/platform-python, but future installation of another Python interpreter could change the meaning of that path. See https://docs.ansible.com/ansible- core/2.14/reference_appendices/interpreter_discovery.html for more information. ip-10-240-172-59.us-east-2.compute.internal | CHANGED | rc=0 >> root ``` From '-vvvv' output I see things like this: ``` <i-0cc859c89f4aaf5f4> ssm_retry: (success) (0, '\x1b[?2004h\x1b[?2004l\r\r\r\nPLATFORM\r\r\nLinux\r\r\nFOUND\r\r\n/usr/bin/python3.9\r\r\n/usr/bin/python3\r\r\nENDFOUND\r\r\n\x1b[?2004h\x1b[?2004l\r\r\r', '') [WARNING]: Unhandled error in Python interpreter discovery for host ec2-13-58-203-89.us-east-2.compute.amazonaws.com: unexpected output from Python interpreter discovery Using module file /Users/username/.asdf/installs/python/3.11.2/lib/python3.11/site-packages/ansible/modules/command.py ``` That's a failure to find the python version. Versus: ``` <i-03c9cbe64572b3eb0> ssm_retry: (success) (0, 'PLATFORM\r\r\nLinux\r\r\nFOUND\r\r\n/usr/libexec/platform-python\r\r\n/usr/bin/python2.7\r\r\n/usr/bin/python\r\r\n/usr/bin/python\r\r\nENDFOUND\r\r', '') <ip-10-240-172-59.us-east-2.compute.internal> Python interpreter discovery fallback (pipelining support required for extended interpreter discovery)` ``` Where it didn't complain and seems to be successful. Notice the additional "x1b[?2004h\x1b[?2004" strings in the output. I see the similar string in other problems, like "Warning: Failed to open the file 2004h2004l/AnsiballZ_command.py" Note that if I set the python interpreter it will remove the warning, but it will still error out on the "Failed to open the file 2004h2004l/AnsiballZ_command.py: No such file Warning: or directory" Tried this with community.aws collection versions 4.5.0, 5.2.0, and 5.3.0 and the errors is the same every time. Also tried a few different amazon.aws collection versions and had the same error. These examples are taken from a Mac running python 3.11.2 and ansible 2.14.2, but the same errors occured in a Linux-based Execution Environment for AWX running in EKS. ### Issue Type Bug Report ### Component Name aws_ssm connection ### Ansible Version ``` ansible [core 2.14.2] config file = None configured module search path = ['/Users/username/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /Users/username/.asdf/installs/python/3.11.2/lib/python3.11/site-packages/ansible ansible collection location = /Users/username/.ansible/collections:/usr/share/ansible/collections executable location = /Users/username/.asdf/installs/python/3.11.2/bin/ansible python version = 3.11.2 (main, Feb 21 2023, 11:07:56) [Clang 13.1.6 (clang-1316.0.21.2.5)] (/Users/username/.asdf/installs/python/3.11.2/bin/python3.11) jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```# /Users/username/.asdf/installs/python/3.11.2/lib/python3.11/site-packages/ansible_collections Collection Version ----------------------------- ------- ansible.netcommon 4.1.0 ansible.posix 1.5.1 ansible.utils 2.9.0 ansible.windows 1.13.0 arista.eos 6.0.0 awx.awx 21.11.0 azure.azcollection 1.14.0 check_point.mgmt 4.0.0 chocolatey.chocolatey 1.4.0 cisco.aci 2.3.0 cisco.asa 4.0.0 cisco.dnac 6.6.3 cisco.intersight 1.0.23 cisco.ios 4.3.1 cisco.iosxr 4.1.0 cisco.ise 2.5.12 cisco.meraki 2.15.0 cisco.mso 2.2.1 cisco.nso 1.0.3 cisco.nxos 4.0.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.4 community.azure 2.0.0 community.ciscosmb 1.0.5 community.crypto 2.10.0 community.digitalocean 1.23.0 community.dns 2.5.0 community.docker 3.4.0 community.fortios 1.0.0 community.general 6.3.0 community.google 1.0.0 community.grafana 1.5.3 community.hashi_vault 4.1.0 community.hrobot 1.7.0 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 5.0.0 community.okd 2.2.0 community.postgresql 2.3.2 community.proxysql 1.5.1 community.rabbitmq 1.2.3 community.routeros 2.7.0 community.sap 1.0.0 community.sap_libs 1.4.0 community.skydive 1.0.0 community.sops 1.6.0 community.vmware 3.3.0 community.windows 1.12.0 community.zabbix 1.9.1 containers.podman 1.10.1 cyberark.conjur 1.2.0 cyberark.pas 1.0.17 dellemc.enterprise_sonic 2.0.0 dellemc.openmanage 6.3.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 dellemc.powerflex 1.5.0 dellemc.unity 1.5.0 f5networks.f5_modules 1.22.0 fortinet.fortimanager 2.1.7 fortinet.fortios 2.2.2 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.1.2 grafana.grafana 1.1.0 hetzner.hcloud 1.9.1 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.11.0 infinidat.infinibox 1.3.12 infoblox.nios_modules 1.4.1 inspur.ispim 1.2.0 inspur.sm 2.3.0 junipernetworks.junos 4.1.0 kubernetes.core 2.3.2 lowlydba.sqlserver 1.3.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.22.0 netapp.elementsw 21.7.0 netapp.ontap 22.2.0 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.4.0 netbox.netbox 3.10.0 ngine_io.cloudstack 2.3.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.3 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.4.1 purestorage.flasharray 1.16.2 purestorage.flashblade 1.10.0 purestorage.fusion 1.3.0 sensu.sensu_go 1.13.2 splunk.es 2.1.0 t_systems_mms.icinga_director 1.32.0 theforeman.foreman 3.8.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.7.0 vyos.vyos 4.0.0 wti.remote 1.0.4 # /Users/username/.ansible/collections/ansible_collections Collection Version ------------- ------- amazon.aws 4.3.0 community.aws 5.2.0 ``` ### AWS SDK versions ``` ❯ pip show boto boto3 botocore WARNING: Package(s) not found: boto Name: boto3 Version: 1.26.76 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /Users/username/.asdf/installs/python/3.11.2/lib/python3.11/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.29.76 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /Users/username/.asdf/installs/python/3.11.2/lib/python3.11/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: boto3, s3transfer ``` ### Configuration ``` ❯ ansible-config dump --only-changed CONFIG_FILE() = None ``` ### OS / Environment Darwin ENG-NMOSEMAN-MB 22.3.0 Darwin Kernel Version 22.3.0: Mon Jan 30 20:42:11 PST 2023; root:xnu-8792.81.3~2/RELEASE_X86_64 x86_64 i386 Darwin and modified version of quay.io/ansible/awx-ee:latest to include community.aws, and ssm stuff. ### Steps to Reproduce ``` ❯ ansible -i inventory.aws_ec2.yaml -m 'command' -a 'whoami' tag_role_FAKEMX -l ec2-13-58-203-89.us-east-2.compute.amazonaws.com [WARNING]: Unhandled error in Python interpreter discovery for host ec2-13-58-203-89.us-east-2.compute.amazonaws.com: unexpected output from Python interpreter discovery ec2-13-58-203-89.us-east-2.compute.amazonaws.com | FAILED | rc=-1 >> failed to transfer file to /Users/username/.ansible/tmp/ansible-local-78807y4966nip/tmpqn_vbdbk /AnsiballZ_command.py: % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0Warning: Failed to open the file 2004h2004l/AnsiballZ_command.py: No such file Warning: or directory 1 129k 1 1531 0 0 19698 0 0:00:06 --:--:-- 0:00:06 19883 curl: (23) Failure writing output to destination ``` ### Expected Results ``` ❯ ansible -i inventory.aws_ec2.yaml -m 'command' -a 'whoami' tag_role_FAKEMX -l ip-10-240-172-59.us-east-2.compute.internal ip-10-240-172-59.us-east-2.compute.internal | CHANGED | rc=0 >> root ``` ### Actual Results ```failed to transfer file to /Users/username/.ansible/tmp/ansible-local-78807y4966nip/tmpqn_vbdbk /AnsiballZ_command.py: % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0Warning: Failed to open the file 2004h2004l/AnsiballZ_command.py: No such file Warning: or directory 1 129k 1 1531 0 0 19698 0 0:00:06 --:--:-- 0:00:06 19883 curl: (23) Failure writing output to destination ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
We just ran into this issue here. It seems that Amazon Linux outputs colorized text when Ansible runs any remote shell commands which causes parsing of the result to fail. Our solution was to not use aws_ssm connection - instead we setup SSH to make connections through Session Manager: https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started-enable-ssh-connections.html I was facing this same issue with hosts running both Ubuntu 22.04 and Amazon Linux 2023, and I was finally able to trace the extra output to a root cause. This is due to newer versions of Bash/readline turning on the option `enable-bracketed-paste` by default (more details [here](https://utcc.utoronto.ca/~cks/space/blog/unix/BashBracketedPasteChange)). I have a patch that will disable this option (will send a PR later today), which allows `ansible -m ping` to work on several hosts I have access to, including Ubuntu 18.04, Amazon Linux 2023, and Ubuntu 22.04.
2023-06-01T21:08:37
ansible-collections/community.aws
1,849
ansible-collections__community.aws-1849
[ "1819" ]
ef9bd1829d224604cea563d86db3564cce8fcdfb
diff --git a/plugins/modules/cloudfront_distribution.py b/plugins/modules/cloudfront_distribution.py --- a/plugins/modules/cloudfront_distribution.py +++ b/plugins/modules/cloudfront_distribution.py @@ -1417,6 +1417,7 @@ from collections import OrderedDict import datetime +import re try: import botocore @@ -1676,7 +1677,7 @@ def __init__(self, module): "http2and3", ] ) - self.__s3_bucket_domain_identifier = ".s3.amazonaws.com" + self.__s3_bucket_domain_regex = re.compile(r"\.s3(?:\.[^.]+)?\.amazonaws\.com$") def add_missing_key(self, dict_object, key_to_set, value_to_set): if key_to_set not in dict_object and value_to_set is not None: @@ -1818,7 +1819,7 @@ def validate_origin(self, client, existing_config, origin, default_origin_path): ) else: origin_shield_region = origin_shield_region.lower() - if self.__s3_bucket_domain_identifier in origin.get("domain_name").lower(): + if self.__s3_bucket_domain_regex.search(origin.get("domain_name").lower()): if origin.get("s3_origin_access_identity_enabled") is not None: if origin["s3_origin_access_identity_enabled"]: s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin) @@ -1834,10 +1835,10 @@ def validate_origin(self, client, existing_config, origin, default_origin_path): origin["s3_origin_config"] = dict(origin_access_identity=oai) - if "custom_origin_config" in origin: - self.module.fail_json( - msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive" - ) + if "custom_origin_config" in origin: + self.module.fail_json( + msg="s3 origin domains and custom_origin_config are mutually exclusive", + ) else: origin = self.add_missing_key( origin, "custom_origin_config", existing_config.get("custom_origin_config", {})
diff --git a/tests/integration/targets/cloudfront_distribution/tasks/main.yml b/tests/integration/targets/cloudfront_distribution/tasks/main.yml --- a/tests/integration/targets/cloudfront_distribution/tasks/main.yml +++ b/tests/integration/targets/cloudfront_distribution/tasks/main.yml @@ -568,6 +568,43 @@ ignore_errors: true - name: check that custom origin with origin access identity fails + # "s3 origin domains and custom_origin_config are mutually exclusive" + assert: + that: + - update_origin_to_s3_with_origin_access_and_with_custom_origin_config.failed + + - name: check that custom_origin_config can't be used with an region-agnostic S3 domain + cloudfront_distribution: + distribution_id: "{{ distribution_id }}" + origins: + - domain_name: "{{ resource_prefix }}-bucket.s3.{{ aws_region }}.amazonaws.com" + id: "{{ resource_prefix }}3.example.com" + custom_origin_config: + http_port: 8080 + state: present + register: update_origin_to_s3_with_origin_access_and_with_custom_origin_config + ignore_errors: true + + - name: check that custom origin with region-agnostic S3 domain fails + # "s3 origin domains and custom_origin_config are mutually exclusive" + assert: + that: + - update_origin_to_s3_with_origin_access_and_with_custom_origin_config.failed + + - name: check that custom_origin_config can't be used with an region-aware S3 domain + cloudfront_distribution: + distribution_id: "{{ distribution_id }}" + origins: + - domain_name: "{{ resource_prefix }}-bucket.s3.amazonaws.com" + id: "{{ resource_prefix }}3.example.com" + custom_origin_config: + http_port: 8080 + state: present + register: update_origin_to_s3_with_origin_access_and_with_custom_origin_config + ignore_errors: true + + - name: check that custom origin with region-aware S3 domain fails + # "s3 origin domains and custom_origin_config are mutually exclusive" assert: that: - update_origin_to_s3_with_origin_access_and_with_custom_origin_config.failed
cloudfront_distribution doesn't recognise S3 origin ### Summary When I refer to an S3 bucket domain in the form `{bucket_name}.s3.{region}.amazonaws.com`, as per [Origin Domain spec](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesDomainName), it isn't recognised as an S3 domain [here](https://github.com/ansible-collections/community.aws/blob/e80bf933412ea5c7ab2a94af945170cb2ebd900f/plugins/modules/cloudfront_distribution.py#L1821), so a `custom_origin_config` entry is added automatically [here](https://github.com/ansible-collections/community.aws/blob/e80bf933412ea5c7ab2a94af945170cb2ebd900f/plugins/modules/cloudfront_distribution.py#L1842), which results in an error: "botocore.errorfactory.InvalidOrigin: An error occurred (InvalidOrigin) when calling the CreateDistribution operation: You must specify either a CustomOrigin or an S3Origin. You cannot specify both." The problem is in the method used for recognising S3 domains: whether it contains `.s3.amazonaws.com` or not (note the missing region part). ### Issue Type Bug Report ### Component Name cloudfront_distribution ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.14.5] config file = /etc/ansible/ansible.cfg configured module search path = ['/home/fules/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python3/dist-packages/ansible ansible collection location = /home/fules/.ansible/collections:/usr/share/ansible/collections executable location = /usr/bin/ansible python version = 3.10.6 (main, Mar 10 2023, 10:55:28) [GCC 11.3.0] (/usr/bin/python3) jinja version = 3.0.3 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /home/fules/.ansible/collections/ansible_collections Collection Version ------------- ------- amazon.aws 6.0.1 community.aws 6.0.0 # /usr/lib/python3/dist-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 5.4.0 ansible.netcommon 4.1.0 ansible.posix 1.5.2 ansible.utils 2.9.0 ansible.windows 1.13.0 arista.eos 6.0.1 awx.awx 21.14.0 azure.azcollection 1.15.0 check_point.mgmt 4.0.0 chocolatey.chocolatey 1.4.0 cisco.aci 2.6.0 cisco.asa 4.0.0 cisco.dnac 6.7.1 cisco.intersight 1.0.27 cisco.ios 4.5.0 cisco.iosxr 4.1.0 cisco.ise 2.5.12 cisco.meraki 2.15.1 cisco.mso 2.4.0 cisco.nso 1.0.3 cisco.nxos 4.3.0 cisco.ucs 1.8.0 cloud.common 2.1.3 cloudscale_ch.cloud 2.2.4 community.aws 5.4.0 community.azure 2.0.0 community.ciscosmb 1.0.5 community.crypto 2.12.0 community.digitalocean 1.23.0 community.dns 2.5.3 community.docker 3.4.3 community.fortios 1.0.0 community.general 6.6.0 community.google 1.0.0 community.grafana 1.5.4 community.hashi_vault 4.2.0 community.hrobot 1.8.0 community.libvirt 1.2.0 community.mongodb 1.5.2 community.mysql 3.6.0 community.network 5.0.0 community.okd 2.3.0 community.postgresql 2.3.2 community.proxysql 1.5.1 community.rabbitmq 1.2.3 community.routeros 2.8.0 community.sap 1.0.0 community.sap_libs 1.4.1 community.skydive 1.0.0 community.sops 1.6.1 community.vmware 3.5.0 community.windows 1.12.0 community.zabbix 1.9.3 containers.podman 1.10.1 cyberark.conjur 1.2.0 cyberark.pas 1.0.17 dellemc.enterprise_sonic 2.0.0 dellemc.openmanage 6.3.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 dellemc.powerflex 1.6.0 dellemc.unity 1.6.0 f5networks.f5_modules 1.23.0 fortinet.fortimanager 2.1.7 fortinet.fortios 2.2.3 frr.frr 2.0.2 gluster.gluster 1.0.2 google.cloud 1.1.3 grafana.grafana 1.1.1 hetzner.hcloud 1.11.0 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.11.0 infinidat.infinibox 1.3.12 infoblox.nios_modules 1.4.1 inspur.ispim 1.3.0 inspur.sm 2.3.0 junipernetworks.junos 4.1.0 kubernetes.core 2.4.0 lowlydba.sqlserver 1.3.1 mellanox.onyx 1.0.0 microsoft.ad 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.22.0 netapp.elementsw 21.7.0 netapp.ontap 22.5.0 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.4.0 netbox.netbox 3.12.0 ngine_io.cloudstack 2.3.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.3 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.4.1 purestorage.flasharray 1.17.2 purestorage.flashblade 1.11.0 purestorage.fusion 1.4.2 sensu.sensu_go 1.13.2 splunk.es 2.1.0 t_systems_mms.icinga_director 1.32.2 theforeman.foreman 3.10.0 vmware.vmware_rest 2.3.1 vultr.cloud 1.7.0 vyos.vyos 4.0.2 wti.remote 1.0.4 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore WARNING: Package(s) not found: boto Name: boto3 Version: 1.26.137 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.10/dist-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.29.137 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /usr/local/lib/python3.10/dist-packages Requires: jmespath, python-dateutil, urllib3 Required-by: boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed CONFIG_FILE() = /etc/ansible/ansible.cfg ``` ### OS / Environment $ lsb_release -a No LSB modules are available. Distributor ID: Ubuntu Description: Ubuntu 22.04.1 LTS Release: 22.04 Codename: jammy ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) - hosts: localhost collections: - community.aws - amazon.aws vars_files: - external_vars.yaml tasks: - name: Reading landing page domain from certificate acm_certificate_info: profile: "{{ aws_profile }}" region: "us-east-1" certificate_arn: "{{ landing_page_certificate_arn }}" register: landing_page_certificates - set_fact: landing_page_domain: "{{ landing_page_certificates.certificates[0].domain_name }}" - name: Create the S3 bucket s3_bucket: profile: "{{ aws_profile }}" state: present name: "{{ landing_page_domain }}" object_ownership: "BucketOwnerEnforced" encryption: "AES256" versioning: false public_access: block_public_acls: false block_public_policy: false ignore_public_acls: false restrict_public_buckets: false register: landing_page_bucket - name: Create CloudFront Access Identity cloudfront_origin_access_identity: state: present caller_reference: "LandingPageAccessIdentity" comment: "no comment" register: landing_page_access_identity - name: Create CloudFront Distribution cloudfront_distribution: profile: "{{ aws_profile }}" state: present http_version: "http2" caller_reference: "LandingPageDistribution" comment: "no comment" alias: "{{ landing_page_domain }}" viewer_certificate: acm_certificate_arn: "{{ landing_page_certificate_arn }}" ssl_support_method: "sni-only" minimum_protocol_version: "TLSv1.2_2021" origins: - id: "{{ landing_page_domain }}" domain_name: "{{ landing_page_bucket.name }}.s3.{{ aws_region }}.amazonaws.com" s3_origin_config: origin_access_identity: "origin-access-identity/cloudfront/{{ landing_page_access_identity.cloud_front_origin_access_identity.id }}" origin_shield: enabled: false # default_origin_domain_name: "{{ landing_page_bucket.name }}" default_root_object: "index.html" price_class: "PriceClass_200" wait: true register: landing_page_distribution ``` ### Expected Results I expected that only the `s3_origin_config` is generated in the origin, and the `custom_origin_config` isn't. ### Actual Results ```console (paste below) botocore.errorfactory.InvalidOrigin: An error occurred (InvalidOrigin) when calling the CreateDistribution operation: You must specify either a CustomOrigin or an S3Origin. You cannot specify both. ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
2023-06-22T18:32:39
ansible-collections/community.aws
1,850
ansible-collections__community.aws-1850
[ "1775" ]
509ccad9fdf8961d2c1e7fde672f45979de6f1eb
diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -20,12 +20,26 @@ ``ansible_user`` variables to configure the remote user. The ``become_user`` parameter should be used to configure which user to run commands as. Remote commands will often default to running as the ``ssm-agent`` user, however this will also depend on how SSM has been configured. + - This plugin requires an S3 bucket to send files to/from the remote instance. This is required even for modules + which do not explicitly send files (such as the C(shell) or C(command) modules), because Ansible sends over the C(.py) files of the module itself, via S3. + - Files sent via S3 will be named in S3 with the EC2 host ID (e.g. C(i-123abc/)) as the prefix. + - The files in S3 will be deleted by the end of the playbook run. If the play is terminated ungracefully, the files may remain in the bucket. + If the bucket has versioning enabled, the files will remain in version history. If your tasks involve sending secrets to/from the remote instance + (e.g. within a C(shell) command, or a SQL password in the C(community.postgresql.postgresql_query) module) then those passwords will be included in plaintext in those files in S3 indefinitely, + visible to anyone with access to that bucket. Therefore it is recommended to use a bucket with versioning disabled/suspended. + - The files in S3 will be deleted even if the C(keep_remote_files) setting is C(true). + requirements: - The remote EC2 instance must be running the AWS Systems Manager Agent (SSM Agent). U(https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started.html) - The control machine must have the AWS session manager plugin installed. U(https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html) - The remote EC2 Linux instance must have curl installed. + - The remote EC2 Linux instance and the controller both need network connectivity to S3. + - The remote instance does not require IAM credentials for S3. This module will generate a presigned URL for S3 from the controller, + and then will pass that URL to the target over SSM, telling the target to download/upload from S3 with C(curl). + - The controller requires IAM permissions to upload, download and delete files from the specified S3 bucket. This includes + `s3:GetObject`, `s3:PutObject`, `s3:ListBucket`, `s3:DeleteObject` and `s3:GetBucketLocation`. options: access_key_id:
SSM connector docs should explain the S3 part ### Summary The [SSM connector docs](https://docs.ansible.com/ansible/latest/collections/community/aws/aws_ssm_connection.html#ansible-collections-community-aws-aws-ssm-connection) don't mention S3 up the top. They only mention it in the details of the arguments, which is a bit unclear for someone completely new to this. In the "Requirements" section, it should say * that you need to have already created an S3 bucket. * that the target and controller both need network connectivity to S3 * why a bucket is required, even if you're not running any `copy` commands. (One sentence explanation is probably fine.) * which IAM permissions are required on the target (e.g. `s3:GetObject`, or `s3:GetObjectVersion`, etc, or also ListBucket?) * which IAM permissions are required on the controller (s3:PutObject, s3:DeleteObject. Anything else? e.g. presigned URLs?) * which prefix within S3 the objects are saved to * whether the files in S3 are deleted when done. * whether the files in S3 are deleted if the general Ansible setting [`keep_remote_files=True`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-keep-remote-files). ### Issue Type Documentation Report ### Component Name community.aws.aws_ssm connection ### Ansible Version ```console (paste below) ansible [core 2.13.5] config file = /Users/matthew/Documents/mms/new-repo/ansible.cfg configured module search path = ['/Users/matthew/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /Users/matthew/.pyenv/versions/3.10.0/lib/python3.10/site-packages/ansible ansible collection location = /Users/matthew/.ansible/collections:/usr/share/ansible/collections executable location = /Users/matthew/.pyenv/versions/3.10.0/bin/ansible python version = 3.10.0 (default, Nov 12 2021, 11:20:43) [Clang 12.0.5 (clang-1205.0.22.11)] jinja version = 3.1.2 libyaml = False ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /Users/matthew/.pyenv/versions/3.10.0/lib/python3.10/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 3.4.0 ansible.netcommon 3.1.1 ansible.posix 1.4.0 ansible.utils 2.6.1 ansible.windows 1.11.1 arista.eos 5.0.1 awx.awx 21.5.0 azure.azcollection 1.13.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.3.0 cisco.aci 2.2.0 cisco.asa 3.1.0 cisco.dnac 6.6.0 cisco.intersight 1.0.19 cisco.ios 3.3.1 cisco.iosxr 3.3.1 cisco.ise 2.5.3 cisco.meraki 2.11.0 cisco.mso 2.0.0 cisco.nso 1.0.3 cisco.nxos 3.1.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.2 community.aws 3.5.0 community.azure 1.1.0 community.ciscosmb 1.0.5 community.crypto 2.5.0 community.digitalocean 1.21.0 community.dns 2.3.2 community.docker 2.7.1 community.fortios 1.0.0 community.general 5.6.0 community.google 1.0.0 community.grafana 1.5.2 community.hashi_vault 3.2.0 community.hrobot 1.5.2 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 4.0.1 community.okd 2.2.0 community.postgresql 2.2.0 community.proxysql 1.4.0 community.rabbitmq 1.2.2 community.routeros 2.3.0 community.sap 1.0.0 community.sap_libs 1.3.0 community.skydive 1.0.0 community.sops 1.4.0 community.vmware 2.9.1 community.windows 1.11.0 community.zabbix 1.8.0 containers.podman 1.9.4 cyberark.conjur 1.2.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 1.1.2 dellemc.openmanage 5.5.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.19.0 fortinet.fortimanager 2.1.5 fortinet.fortios 2.1.7 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.8.2 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.9.0 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.3.0 inspur.ispim 1.0.1 inspur.sm 2.0.0 junipernetworks.junos 3.1.0 kubernetes.core 2.3.2 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.19.0 netapp.elementsw 21.7.0 netapp.ontap 21.23.0 netapp.storagegrid 21.11.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.1 netbox.netbox 3.7.1 ngine_io.cloudstack 2.2.4 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.9.1 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.2.3 purestorage.flasharray 1.13.0 purestorage.flashblade 1.10.0 purestorage.fusion 1.1.0 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 2.1.0 t_systems_mms.icinga_director 1.31.0 theforeman.foreman 3.6.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.1.0 vyos.vyos 3.0.1 wti.remote 1.0.4 ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ANY_ERRORS_FATAL(/Users/matthew/Documents/mms/new-repo/ansible.cfg) = True DEFAULT_KEEP_REMOTE_FILES(env: ANSIBLE_KEEP_REMOTE_FILES) = False DEFAULT_STDOUT_CALLBACK(/Users/matthew/Documents/mms/new-repo/ansible.cfg) = yaml INVENTORY_UNPARSED_IS_FAILED(/Users/matthew/Documents/mms/new-repo/ansible.cfg) = True LOCALHOST_WARNING(/Users/matthew/Documents/mms/new-repo/ansible.cfg) = False ``` ### OS / Environment Mac OS ### Additional Information _No response_ ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
@mdavis-xyz looks like you're more familar with ssm connections. Are you willing to provide a PR that improves the documentation? I need confirmation of the answers. e.g. I'm not sure what permissions are required for the controller. especially for [presigned URLs](https://github.com/boto/boto3/issues/3670). Also, what's the right way to put hyperlinks in the docs? e.g. [keep_remote_files=True](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-keep-remote-files)? I can put in that absolute hyperlink, but I assume we'd want to keep the links within each particular version of the docs? Q: Why a bucket is required, even if you're not running any copy commands. (One sentence explanation is probably fine.): A: Ansible is to designed to not require anything (except Python) to be installed on the target. For each Ansible module, Ansible copies a python script to the target, and then executes it. This is true for all modules, not just the file copying ones like `copy`. It is possible to send files directly over SSM, however that is slow compared to using S3. That is, the controller uploads files to S3, and then sends a shell command to the target, telling it to download the file from S3. Q: Which IAM permissions are required on the target (e.g. s3:GetObject, or s3:GetObjectVersion, etc, or also ListBucket?) A: No S3 IAM permissions are required on the target. To simplify IAM permissions and reduce dependency requirements, the controller generates a pre-signed URL for each file, and then tells the target to run `curl https://...`. Q: which IAM permissions are required on the controller (s3:PutObject, s3:DeleteObject. Anything else? e.g. presigned URLs?) A: I'm not sure Q: which prefix within S3 the objects are saved to A: The file `/path/to/something.txt` For EC2 instance `i-123` will be saved at `s3://bucket/i-123//path/to/something.txt`. Q: whether the files in S3 are deleted when done. A: yes it does A: whether the files in S3 are deleted if the general Ansible setting [keep_remote_files=True](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-keep-remote-files). Q: That setting is ignored for files in S3. One other reason for S3 is that if you send files directly over SSM (e.g. `echo blah | base64 -d > file`), the contents will be visible persistently in .bash_history, and perhaps in SSM execution history. For some files that might be a security risk. (Just a guess)
2023-06-23T00:55:29
ansible-collections/community.aws
1,886
ansible-collections__community.aws-1886
[ "1832", "1832" ]
3a801c0ac90bad029eb87aeac9c37306760ae6cb
diff --git a/plugins/modules/mq_broker.py b/plugins/modules/mq_broker.py --- a/plugins/modules/mq_broker.py +++ b/plugins/modules/mq_broker.py @@ -237,6 +237,7 @@ "storage_type": "StorageType", "subnet_ids": "SubnetIds", "users": "Users", + "tags": "Tags", }
diff --git a/tests/integration/targets/mq/defaults/main.yml b/tests/integration/targets/mq/defaults/main.yml --- a/tests/integration/targets/mq/defaults/main.yml +++ b/tests/integration/targets/mq/defaults/main.yml @@ -5,3 +5,5 @@ vpc_name: "{{ resource_prefix }}-vpc" vpc_cidr: "10.0.0.0/16" subnet_cidr: "10.0.1.0/24" sg_name: "{{resource_prefix}}-sg" +tags: + workload_type: other \ No newline at end of file diff --git a/tests/integration/targets/mq/tasks/broker_tests.yml b/tests/integration/targets/mq/tasks/broker_tests.yml --- a/tests/integration/targets/mq/tasks/broker_tests.yml +++ b/tests/integration/targets/mq/tasks/broker_tests.yml @@ -3,6 +3,7 @@ broker_name: "{{ broker_name }}" security_groups: "{{ broker_sg_ids.split(',') }}" subnet_ids: "{{ broker_subnet_ids.split(',') }}" + tags: "{{ tags }}" register: result - set_fact: broker_id: "{{ result.broker['broker_id'] }}" @@ -20,6 +21,7 @@ - result_c1.broker['broker_name'] == broker_name - result_c1.broker['broker_state'] == 'CREATION_IN_PROGRESS' - ( result_c1.broker['storage_type'] | upper ) == 'EFS' + - result_c1.broker['tags'] == tags when: not ansible_check_mode - debug: msg: "Wait until broker {{ broker_name }} ({{ broker_id }}) enters running state. This may take several minutes" diff --git a/tests/integration/targets/mq/tasks/main.yml b/tests/integration/targets/mq/tasks/main.yml --- a/tests/integration/targets/mq/tasks/main.yml +++ b/tests/integration/targets/mq/tasks/main.yml @@ -32,4 +32,4 @@ - name: cleanup broker include_tasks: broker_cleanup.yml - - include_tasks: env_cleanup.yml \ No newline at end of file + - include_tasks: env_cleanup.yml
mq_broker: Tagging a broker on creation does not work ### Summary When creating a new MQ broker using the following task, the broker does not get tagged. ``` - name: create broker with minimal parameters mq_broker: broker_name: "{{ broker_name }}" security_groups: "{{ broker_sg_ids.split(',') }}" subnet_ids: "{{ broker_subnet_ids.split(',') }}" tags: "Foo": "Bar" "FooBar": "foobar" ``` Actual result: ``` changed: [testhost] => { "broker": { "broker_arn": "arn:aws:mq:us-east-1:123456789100:broker:ansible-test-52903175--mq:b-70e0807b-102d-42ae-8805-94ec6395436c", "broker_id": "b-70e0807b-102d-42ae-8805-94ec6395436c", "response_metadata": { "http_headers": { "access-control-allow-origin": "*", "access-control-expose-headers": "x-amzn-errortype,x-amzn-requestid,x-amzn-errormessage,x-amzn-trace-id,x-amz-apigw-id,date", "cache-control": "no-cache; no-store, must-revalidate, private", "connection": "keep-alive", "content-length": "191", "content-type": "application/json", "date": "Wed, 31 May 2023 13:25:16 GMT", "expires": "0", "pragma": "no-cache", "x-amz-apigw-id": "FyidUFppIAMF1zw=", "x-amzn-requestid": "12345bcb-5678-890d-972c-26a92712aaeb", "x-amzn-trace-id": "Root=1-64774abb-2b3bf58a2b0cbf7800afdef6" }, "http_status_code": 200, "request_id": "59392bcb-5406-460d-972c-26a92712aaeb", "retry_attempts": 0 } }, ``` ### Issue Type Bug Report ### Component Name mq_broker ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.14.3] ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list Collection Version ----------------------------- ------- amazon.aws 6.0.0 community.aws 6.0.0 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore Name: boto3 Version: 1.22.0 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /Users/alinabuzachis/anaconda3/envs/py310/lib/python3.10/site-packages Requires: botocore, jmespath, s3transfer Required-by: gouttelette --- Name: botocore Version: 1.25.13 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /Users/alinabuzachis/anaconda3/envs/py310/lib/python3.10/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: aiobotocore, awscli, boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment _No response_ ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) ``` ### Expected Results Create an MQ broker using the task I pasted before. ### Actual Results ```console (paste below) ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct mq_broker: Tagging a broker on creation does not work ### Summary When creating a new MQ broker using the following task, the broker does not get tagged. ``` - name: create broker with minimal parameters mq_broker: broker_name: "{{ broker_name }}" security_groups: "{{ broker_sg_ids.split(',') }}" subnet_ids: "{{ broker_subnet_ids.split(',') }}" tags: "Foo": "Bar" "FooBar": "foobar" ``` Actual result: ``` changed: [testhost] => { "broker": { "broker_arn": "arn:aws:mq:us-east-1:123456789100:broker:ansible-test-52903175--mq:b-70e0807b-102d-42ae-8805-94ec6395436c", "broker_id": "b-70e0807b-102d-42ae-8805-94ec6395436c", "response_metadata": { "http_headers": { "access-control-allow-origin": "*", "access-control-expose-headers": "x-amzn-errortype,x-amzn-requestid,x-amzn-errormessage,x-amzn-trace-id,x-amz-apigw-id,date", "cache-control": "no-cache; no-store, must-revalidate, private", "connection": "keep-alive", "content-length": "191", "content-type": "application/json", "date": "Wed, 31 May 2023 13:25:16 GMT", "expires": "0", "pragma": "no-cache", "x-amz-apigw-id": "FyidUFppIAMF1zw=", "x-amzn-requestid": "12345bcb-5678-890d-972c-26a92712aaeb", "x-amzn-trace-id": "Root=1-64774abb-2b3bf58a2b0cbf7800afdef6" }, "http_status_code": 200, "request_id": "59392bcb-5406-460d-972c-26a92712aaeb", "retry_attempts": 0 } }, ``` ### Issue Type Bug Report ### Component Name mq_broker ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.14.3] ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list Collection Version ----------------------------- ------- amazon.aws 6.0.0 community.aws 6.0.0 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore Name: boto3 Version: 1.22.0 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /Users/alinabuzachis/anaconda3/envs/py310/lib/python3.10/site-packages Requires: botocore, jmespath, s3transfer Required-by: gouttelette --- Name: botocore Version: 1.25.13 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /Users/alinabuzachis/anaconda3/envs/py310/lib/python3.10/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: aiobotocore, awscli, boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment _No response_ ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) ``` ### Expected Results Create an MQ broker using the task I pasted before. ### Actual Results ```console (paste below) ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
2023-07-12T10:06:57
ansible-collections/community.aws
1,888
ansible-collections__community.aws-1888
[ "1879" ]
f4b0ba5fb17ed7f2e2d3ac955a0b87cdf423169a
diff --git a/plugins/modules/mq_broker.py b/plugins/modules/mq_broker.py --- a/plugins/modules/mq_broker.py +++ b/plugins/modules/mq_broker.py @@ -124,6 +124,19 @@ - At least one must be provided during creation. type: list elements: str + wait: + description: + - Specifies whether the module waits for the desired C(state). + - The time to wait can be controlled by setting I(wait_timeout). + type: bool + default: false + version_added: 7.1.0 + wait_timeout: + description: + - How long to wait (in seconds) for the broker to reach the desired state if I(wait=true). + default: 900 + type: int + version_added: 7.1.0 extends_documentation_fragment: - amazon.aws.boto3 @@ -215,6 +228,9 @@ # handled by AnsibleAWSModule pass +from time import sleep +from time import time + from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule @@ -384,22 +400,77 @@ def get_broker_info(conn, module, broker_id): module.fail_json_aws(e, msg="Couldn't get broker details.") +def wait_for_status(conn, module): + interval_secs = 5 + timeout = module.params.get("wait_timeout", 900) + broker_name = module.params.get("broker_name") + desired_state = module.params.get("state") + done = False + + paginator = conn.get_paginator("list_brokers") + page_iterator = paginator.paginate(PaginationConfig={"MaxItems": 100, "PageSize": 100, "StartingToken": ""}) + wait_timeout = time() + timeout + + while wait_timeout > time(): + try: + filtered_iterator = page_iterator.search(f"BrokerSummaries[?BrokerName == `{broker_name}`][]") + broker_list = list(filtered_iterator) + + if module.check_mode: + return + + if len(broker_list) < 1 and desired_state == "absent": + done = True + break + + if desired_state in ["present", "rebooted"] and broker_list[0]["BrokerState"] == "RUNNING": + done = True + break + + if broker_list[0]["BrokerState"] == "CREATION_FAILED": + break + + sleep(interval_secs) + + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't paginate brokers.") + + if not done: + module.fail_json(msg="desired state not reached") + + def reboot_broker(conn, module, broker_id): + wait = module.params.get("wait") + try: - return conn.reboot_broker(BrokerId=broker_id) + response = conn.reboot_broker(BrokerId=broker_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't reboot broker.") + if wait: + wait_for_status(conn, module) + + return response + def delete_broker(conn, module, broker_id): + wait = module.params.get("wait") + try: - return conn.delete_broker(BrokerId=broker_id) + response = conn.delete_broker(BrokerId=broker_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete broker.") + if wait: + wait_for_status(conn, module) + + return response + def create_broker(conn, module): kwargs = _fill_kwargs(module) + wait = module.params.get("wait") + if "EngineVersion" in kwargs and kwargs["EngineVersion"] == "latest": kwargs["EngineVersion"] = get_latest_engine_version(conn, module, kwargs["EngineType"]) if kwargs["AuthenticationStrategy"] == "LDAP": @@ -416,11 +487,15 @@ def create_broker(conn, module): changed = True result = conn.create_broker(**kwargs) # + if wait: + wait_for_status(conn, module) + return {"broker": camel_dict_to_snake_dict(result, ignore_list=["Tags"]), "changed": changed} def update_broker(conn, module, broker_id): kwargs = _fill_kwargs(module, apply_defaults=False, ignore_create_params=True) + wait = module.params.get("wait") # replace name with id broker_name = kwargs["BrokerName"] del kwargs["BrokerName"] @@ -443,6 +518,9 @@ def update_broker(conn, module, broker_id): api_result = conn.update_broker(**kwargs) # # + if wait: + wait_for_status(conn, module) + return {"broker": result, "changed": changed} @@ -484,6 +562,8 @@ def main(): argument_spec = dict( broker_name=dict(required=True, type="str"), state=dict(default="present", choices=["present", "absent", "restarted"]), + wait=dict(default=False, type="bool"), + wait_timeout=dict(default=900, type="int"), # parameters only allowed on create deployment_mode=dict(choices=["SINGLE_INSTANCE", "ACTIVE_STANDBY_MULTI_AZ", "CLUSTER_MULTI_AZ"]), use_aws_owned_key=dict(type="bool"),
diff --git a/tests/integration/targets/mq/tasks/broker_tests.yml b/tests/integration/targets/mq/tasks/broker_tests.yml --- a/tests/integration/targets/mq/tasks/broker_tests.yml +++ b/tests/integration/targets/mq/tasks/broker_tests.yml @@ -4,6 +4,7 @@ security_groups: "{{ broker_sg_ids.split(',') }}" subnet_ids: "{{ broker_subnet_ids.split(',') }}" tags: "{{ tags }}" + wait: true register: result - set_fact: broker_id: "{{ result.broker['broker_id'] }}" @@ -19,20 +20,10 @@ - ( result.changed | bool ) - result_c1.broker['broker_id'] == broker_id - result_c1.broker['broker_name'] == broker_name - - result_c1.broker['broker_state'] == 'CREATION_IN_PROGRESS' + - result_c1.broker['broker_state'] == 'RUNNING' - ( result_c1.broker['storage_type'] | upper ) == 'EFS' - result_c1.broker['tags'] == tags when: not ansible_check_mode -- debug: - msg: "Wait until broker {{ broker_name }} ({{ broker_id }}) enters running state. This may take several minutes" -- name: wait for startup - mq_broker_info: - broker_id: "{{ broker_id }}" - register: result - until: result.broker['broker_state'] == 'RUNNING' - retries: 15 - delay: 60 - when: not ansible_check_mode - name: repeat creation mq_broker: broker_name: "{{ broker_name }}"
mq_broker: add wait and wait_for parameters ### Summary Add wait and wait_timeout parameters to wait for a specific state. https://github.com/ansible-collections/community.aws/pull/1831#issuecomment-1582566844 ### Issue Type Feature Idea ### Component Name mq_broker ### Additional Information ```yaml wait: description: - Specifies whether the module waits for the desired C(state). type: bool wait_timeout: description: - How long to wait (in seconds) for the broker to reach the desired state. default: 600 type: int ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
2023-07-13T20:52:58
ansible-collections/community.aws
1,892
ansible-collections__community.aws-1892
[ "1891" ]
0371c374bd60ad8ae71ba3ec2db873f01515444e
diff --git a/plugins/modules/ecs_taskdefinition.py b/plugins/modules/ecs_taskdefinition.py --- a/plugins/modules/ecs_taskdefinition.py +++ b/plugins/modules/ecs_taskdefinition.py @@ -627,6 +627,27 @@ expression: description: A cluster query language expression to apply to the constraint. type: str + runtime_platform: + version_added: 6.4.0 + description: + - runtime platform configuration for the task + required: false + type: dict + default: { + "operatingSystemFamily": "LINUX", + "cpuArchitecture": "X86_64" + } + suboptions: + cpuArchitecture: + description: The CPU Architecture type to be used by the task + type: str + required: false + choices: ['X86_64', 'ARM64'] + operatingSystemFamily: + description: OS type to be used by the task + type: str + required: false + choices: ['LINUX', 'WINDOWS_SERVER_2019_FULL', 'WINDOWS_SERVER_2019_CORE', 'WINDOWS_SERVER_2022_FULL', 'WINDOWS_SERVER_2022_CORE'] extends_documentation_fragment: - amazon.aws.common.modules - amazon.aws.region.modules @@ -813,6 +834,7 @@ def register_task( cpu, memory, placement_constraints, + runtime_platform, ): validated_containers = [] @@ -873,6 +895,8 @@ def register_task( params["executionRoleArn"] = execution_role_arn if placement_constraints: params["placementConstraints"] = placement_constraints + if runtime_platform: + params["runtimePlatform"] = runtime_platform try: response = self.ecs.register_task_definition(aws_retry=True, **params) @@ -939,6 +963,24 @@ def main(): elements="dict", options=dict(type=dict(type="str"), expression=dict(type="str")), ), + runtime_platform=dict( + required=False, + default={"operatingSystemFamily": "LINUX", "cpuArchitecture": "X86_64"}, + type="dict", + options=dict( + cpuArchitecture=dict(required=False, choices=["X86_64", "ARM64"]), + operatingSystemFamily=dict( + required=False, + choices=[ + "LINUX", + "WINDOWS_SERVER_2019_FULL", + "WINDOWS_SERVER_2019_CORE", + "WINDOWS_SERVER_2022_FULL", + "WINDOWS_SERVER_2022_CORE", + ], + ), + ), + ), ) module = AnsibleAWSModule( @@ -1157,6 +1199,7 @@ def _task_definition_matches( module.params["cpu"], module.params["memory"], module.params["placement_constraints"], + module.params["runtime_platform"], ) results["changed"] = True
diff --git a/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml b/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml --- a/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml +++ b/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml @@ -908,6 +908,65 @@ started_by: ansible_user register: fargate_run_task_output_with_assign_ip +- name: create task definition for ARM + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-arm" + network_mode: awsvpc + launch_type: FARGATE + cpu: 512 + memory: 1024 + execution_role_arn: "{{ iam_execution_role.arn }}" + state: present + runtime_platform: + cpuArchitecture: "ARM64" + operatingSystemFamily: "LINUX" + vars: + ecs_task_host_port: 8080 + register: fargate_arm_task_definition + +- name: check that initial task definition for ARM changes + assert: + that: + - fargate_arm_task_definition.changed + +- name: recreate task definition for ARM + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-arm" + network_mode: awsvpc + launch_type: FARGATE + cpu: 512 + memory: 1024 + execution_role_arn: "{{ iam_execution_role.arn }}" + state: present + runtime_platform: + cpuArchitecture: "ARM64" + operatingSystemFamily: "LINUX" + vars: + ecs_task_host_port: 8080 + register: fargate_arm_task_definition_again + +- name: check that task definition for ARM does not change + assert: + that: + - not fargate_arm_task_definition_again.changed + +- name: delete task definition for ARM + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-arm" + network_mode: awsvpc + launch_type: FARGATE + cpu: 512 + memory: 1024 + execution_role_arn: "{{ iam_execution_role.arn }}" + state: present + runtime_platform: + cpuArchitecture: "ARM64" + operatingSystemFamily: "LINUX" + vars: + ecs_task_host_port: 8080 # ============================================================ # End tests for Fargate
Unable to configure runtimePlatform in ECS Task Definition to use ARM ### Summary I am trying to create task definitions via Ansible, however runtimePlatform can not be configured, this means it is always the default: ``` "runtimePlatform": { "cpuArchitecture": "X86_64", "operatingSystemFamily": "LINUX" }, ``` Runtime Platform should be configurable to make use of ARM instances on AWS Fargate. And I am pretty sure, this way also Windows systems on fargate could be supported, however no idea if fargate supports windows currently, windows is not my area :-D Currently I am writing a patch to support this, the issue is just to link my PR :) ### Issue Type Feature Idea ### Component Name ecs_task_definition ### Additional Information <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
2023-07-21T09:15:43
ansible-collections/community.aws
1,935
ansible-collections__community.aws-1935
[ "1891" ]
ca773ed0194dc90a399f5b4680838bd865a4877b
diff --git a/plugins/modules/ecs_taskdefinition.py b/plugins/modules/ecs_taskdefinition.py --- a/plugins/modules/ecs_taskdefinition.py +++ b/plugins/modules/ecs_taskdefinition.py @@ -627,6 +627,27 @@ expression: description: A cluster query language expression to apply to the constraint. type: str + runtime_platform: + version_added: 6.4.0 + description: + - runtime platform configuration for the task + required: false + type: dict + default: { + "operatingSystemFamily": "LINUX", + "cpuArchitecture": "X86_64" + } + suboptions: + cpuArchitecture: + description: The CPU Architecture type to be used by the task + type: str + required: false + choices: ['X86_64', 'ARM64'] + operatingSystemFamily: + description: OS type to be used by the task + type: str + required: false + choices: ['LINUX', 'WINDOWS_SERVER_2019_FULL', 'WINDOWS_SERVER_2019_CORE', 'WINDOWS_SERVER_2022_FULL', 'WINDOWS_SERVER_2022_CORE'] extends_documentation_fragment: - amazon.aws.common.modules - amazon.aws.region.modules @@ -813,6 +834,7 @@ def register_task( cpu, memory, placement_constraints, + runtime_platform, ): validated_containers = [] @@ -873,6 +895,8 @@ def register_task( params["executionRoleArn"] = execution_role_arn if placement_constraints: params["placementConstraints"] = placement_constraints + if runtime_platform: + params["runtimePlatform"] = runtime_platform try: response = self.ecs.register_task_definition(aws_retry=True, **params) @@ -939,6 +963,24 @@ def main(): elements="dict", options=dict(type=dict(type="str"), expression=dict(type="str")), ), + runtime_platform=dict( + required=False, + default={"operatingSystemFamily": "LINUX", "cpuArchitecture": "X86_64"}, + type="dict", + options=dict( + cpuArchitecture=dict(required=False, choices=["X86_64", "ARM64"]), + operatingSystemFamily=dict( + required=False, + choices=[ + "LINUX", + "WINDOWS_SERVER_2019_FULL", + "WINDOWS_SERVER_2019_CORE", + "WINDOWS_SERVER_2022_FULL", + "WINDOWS_SERVER_2022_CORE", + ], + ), + ), + ), ) module = AnsibleAWSModule( @@ -1157,6 +1199,7 @@ def _task_definition_matches( module.params["cpu"], module.params["memory"], module.params["placement_constraints"], + module.params["runtime_platform"], ) results["changed"] = True
diff --git a/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml b/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml --- a/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml +++ b/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml @@ -908,6 +908,65 @@ started_by: ansible_user register: fargate_run_task_output_with_assign_ip +- name: create task definition for ARM + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-arm" + network_mode: awsvpc + launch_type: FARGATE + cpu: 512 + memory: 1024 + execution_role_arn: "{{ iam_execution_role.arn }}" + state: present + runtime_platform: + cpuArchitecture: "ARM64" + operatingSystemFamily: "LINUX" + vars: + ecs_task_host_port: 8080 + register: fargate_arm_task_definition + +- name: check that initial task definition for ARM changes + assert: + that: + - fargate_arm_task_definition.changed + +- name: recreate task definition for ARM + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-arm" + network_mode: awsvpc + launch_type: FARGATE + cpu: 512 + memory: 1024 + execution_role_arn: "{{ iam_execution_role.arn }}" + state: present + runtime_platform: + cpuArchitecture: "ARM64" + operatingSystemFamily: "LINUX" + vars: + ecs_task_host_port: 8080 + register: fargate_arm_task_definition_again + +- name: check that task definition for ARM does not change + assert: + that: + - not fargate_arm_task_definition_again.changed + +- name: delete task definition for ARM + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-arm" + network_mode: awsvpc + launch_type: FARGATE + cpu: 512 + memory: 1024 + execution_role_arn: "{{ iam_execution_role.arn }}" + state: present + runtime_platform: + cpuArchitecture: "ARM64" + operatingSystemFamily: "LINUX" + vars: + ecs_task_host_port: 8080 # ============================================================ # End tests for Fargate
Unable to configure runtimePlatform in ECS Task Definition to use ARM ### Summary I am trying to create task definitions via Ansible, however runtimePlatform can not be configured, this means it is always the default: ``` "runtimePlatform": { "cpuArchitecture": "X86_64", "operatingSystemFamily": "LINUX" }, ``` Runtime Platform should be configurable to make use of ARM instances on AWS Fargate. And I am pretty sure, this way also Windows systems on fargate could be supported, however no idea if fargate supports windows currently, windows is not my area :-D Currently I am writing a patch to support this, the issue is just to link my PR :) ### Issue Type Feature Idea ### Component Name ecs_task_definition ### Additional Information <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
2023-09-07T10:56:36
ansible-collections/community.aws
1,947
ansible-collections__community.aws-1947
[ "1946" ]
4c5cb406e191e773216893cad3f86a4e46acbaa3
diff --git a/plugins/modules/msk_cluster.py b/plugins/modules/msk_cluster.py --- a/plugins/modules/msk_cluster.py +++ b/plugins/modules/msk_cluster.py @@ -54,6 +54,17 @@ - kafka.m5.xlarge - kafka.m5.2xlarge - kafka.m5.4xlarge + - kafka.m5.8xlarge + - kafka.m5.12xlarge + - kafka.m5.16xlarge + - kafka.m5.24xlarge + - kafka.m7g.large + - kafka.m7g.xlarge + - kafka.m7g.2xlarge + - kafka.m7g.4xlarge + - kafka.m7g.8xlarge + - kafka.m7g.12xlarge + - kafka.m7g.16xlarge default: kafka.t3.small type: str ebs_volume_size: @@ -662,6 +673,17 @@ def main(): "kafka.m5.xlarge", "kafka.m5.2xlarge", "kafka.m5.4xlarge", + "kafka.m5.8xlarge", + "kafka.m5.12xlarge", + "kafka.m5.16xlarge", + "kafka.m5.24xlarge", + "kafka.m7g.large", + "kafka.m7g.xlarge", + "kafka.m7g.2xlarge", + "kafka.m7g.4xlarge", + "kafka.m7g.8xlarge", + "kafka.m7g.12xlarge", + "kafka.m7g.16xlarge", ], default="kafka.t3.small", ),
add an MSK broker instance type ### Summary Broker instances of MSK with the current provisioning type can be selected up to m5.24xlarge, but only m5.4xlarge is available for the ansible module. ### Issue Type Feature Idea ### Component Name msk_cluster ### Additional Information <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) - name: create msk cluster msk_cluster: name: test-msk-cluster state: present version: "2.8.1" nodes: 2 ebs_volume_size: 10 subnets: subnet-temp wait: true instance_type: "kafka.m5.8xlarge" ``` recevied error message that `FAILED! => {"changed": false, "msg": "value of instance_type must be one of: kafka.t3.small, kafka.m5.large, kafka.m5.xlarge, kafka.m5.2xlarge, kafka.m5.4xlarge, got: kafka.m5.8xlarge"}` - https://docs.aws.amazon.com/msk/latest/developerguide/msk-create-cluster.html#broker-instance-types ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
2023-09-15T05:37:05
ansible-collections/community.aws
1,971
ansible-collections__community.aws-1971
[ "1832", "1832" ]
68d7bea81d34de8e1c663675de3c57abaef62125
diff --git a/plugins/modules/mq_broker.py b/plugins/modules/mq_broker.py --- a/plugins/modules/mq_broker.py +++ b/plugins/modules/mq_broker.py @@ -237,6 +237,7 @@ "storage_type": "StorageType", "subnet_ids": "SubnetIds", "users": "Users", + "tags": "Tags", }
diff --git a/tests/integration/targets/mq/defaults/main.yml b/tests/integration/targets/mq/defaults/main.yml --- a/tests/integration/targets/mq/defaults/main.yml +++ b/tests/integration/targets/mq/defaults/main.yml @@ -5,3 +5,5 @@ vpc_name: "{{ resource_prefix }}-vpc" vpc_cidr: "10.0.0.0/16" subnet_cidr: "10.0.1.0/24" sg_name: "{{resource_prefix}}-sg" +tags: + workload_type: other \ No newline at end of file diff --git a/tests/integration/targets/mq/tasks/broker_tests.yml b/tests/integration/targets/mq/tasks/broker_tests.yml --- a/tests/integration/targets/mq/tasks/broker_tests.yml +++ b/tests/integration/targets/mq/tasks/broker_tests.yml @@ -3,6 +3,7 @@ broker_name: "{{ broker_name }}" security_groups: "{{ broker_sg_ids.split(',') }}" subnet_ids: "{{ broker_subnet_ids.split(',') }}" + tags: "{{ tags }}" register: result - set_fact: broker_id: "{{ result.broker['broker_id'] }}" @@ -20,6 +21,7 @@ - result_c1.broker['broker_name'] == broker_name - result_c1.broker['broker_state'] == 'CREATION_IN_PROGRESS' - ( result_c1.broker['storage_type'] | upper ) == 'EFS' + - result_c1.broker['tags'] == tags when: not ansible_check_mode - debug: msg: "Wait until broker {{ broker_name }} ({{ broker_id }}) enters running state. This may take several minutes" diff --git a/tests/integration/targets/mq/tasks/main.yml b/tests/integration/targets/mq/tasks/main.yml --- a/tests/integration/targets/mq/tasks/main.yml +++ b/tests/integration/targets/mq/tasks/main.yml @@ -32,4 +32,4 @@ - name: cleanup broker include_tasks: broker_cleanup.yml - - include_tasks: env_cleanup.yml \ No newline at end of file + - include_tasks: env_cleanup.yml
mq_broker: Tagging a broker on creation does not work ### Summary When creating a new MQ broker using the following task, the broker does not get tagged. ``` - name: create broker with minimal parameters mq_broker: broker_name: "{{ broker_name }}" security_groups: "{{ broker_sg_ids.split(',') }}" subnet_ids: "{{ broker_subnet_ids.split(',') }}" tags: "Foo": "Bar" "FooBar": "foobar" ``` Actual result: ``` changed: [testhost] => { "broker": { "broker_arn": "arn:aws:mq:us-east-1:123456789100:broker:ansible-test-52903175--mq:b-70e0807b-102d-42ae-8805-94ec6395436c", "broker_id": "b-70e0807b-102d-42ae-8805-94ec6395436c", "response_metadata": { "http_headers": { "access-control-allow-origin": "*", "access-control-expose-headers": "x-amzn-errortype,x-amzn-requestid,x-amzn-errormessage,x-amzn-trace-id,x-amz-apigw-id,date", "cache-control": "no-cache; no-store, must-revalidate, private", "connection": "keep-alive", "content-length": "191", "content-type": "application/json", "date": "Wed, 31 May 2023 13:25:16 GMT", "expires": "0", "pragma": "no-cache", "x-amz-apigw-id": "FyidUFppIAMF1zw=", "x-amzn-requestid": "12345bcb-5678-890d-972c-26a92712aaeb", "x-amzn-trace-id": "Root=1-64774abb-2b3bf58a2b0cbf7800afdef6" }, "http_status_code": 200, "request_id": "59392bcb-5406-460d-972c-26a92712aaeb", "retry_attempts": 0 } }, ``` ### Issue Type Bug Report ### Component Name mq_broker ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.14.3] ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list Collection Version ----------------------------- ------- amazon.aws 6.0.0 community.aws 6.0.0 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore Name: boto3 Version: 1.22.0 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /Users/alinabuzachis/anaconda3/envs/py310/lib/python3.10/site-packages Requires: botocore, jmespath, s3transfer Required-by: gouttelette --- Name: botocore Version: 1.25.13 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /Users/alinabuzachis/anaconda3/envs/py310/lib/python3.10/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: aiobotocore, awscli, boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment _No response_ ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) ``` ### Expected Results Create an MQ broker using the task I pasted before. ### Actual Results ```console (paste below) ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct mq_broker: Tagging a broker on creation does not work ### Summary When creating a new MQ broker using the following task, the broker does not get tagged. ``` - name: create broker with minimal parameters mq_broker: broker_name: "{{ broker_name }}" security_groups: "{{ broker_sg_ids.split(',') }}" subnet_ids: "{{ broker_subnet_ids.split(',') }}" tags: "Foo": "Bar" "FooBar": "foobar" ``` Actual result: ``` changed: [testhost] => { "broker": { "broker_arn": "arn:aws:mq:us-east-1:123456789100:broker:ansible-test-52903175--mq:b-70e0807b-102d-42ae-8805-94ec6395436c", "broker_id": "b-70e0807b-102d-42ae-8805-94ec6395436c", "response_metadata": { "http_headers": { "access-control-allow-origin": "*", "access-control-expose-headers": "x-amzn-errortype,x-amzn-requestid,x-amzn-errormessage,x-amzn-trace-id,x-amz-apigw-id,date", "cache-control": "no-cache; no-store, must-revalidate, private", "connection": "keep-alive", "content-length": "191", "content-type": "application/json", "date": "Wed, 31 May 2023 13:25:16 GMT", "expires": "0", "pragma": "no-cache", "x-amz-apigw-id": "FyidUFppIAMF1zw=", "x-amzn-requestid": "12345bcb-5678-890d-972c-26a92712aaeb", "x-amzn-trace-id": "Root=1-64774abb-2b3bf58a2b0cbf7800afdef6" }, "http_status_code": 200, "request_id": "59392bcb-5406-460d-972c-26a92712aaeb", "retry_attempts": 0 } }, ``` ### Issue Type Bug Report ### Component Name mq_broker ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.14.3] ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list Collection Version ----------------------------- ------- amazon.aws 6.0.0 community.aws 6.0.0 ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore Name: boto3 Version: 1.22.0 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /Users/alinabuzachis/anaconda3/envs/py310/lib/python3.10/site-packages Requires: botocore, jmespath, s3transfer Required-by: gouttelette --- Name: botocore Version: 1.25.13 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /Users/alinabuzachis/anaconda3/envs/py310/lib/python3.10/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: aiobotocore, awscli, boto3, s3transfer ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment _No response_ ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) ``` ### Expected Results Create an MQ broker using the task I pasted before. ### Actual Results ```console (paste below) ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
2023-10-20T12:52:03
ansible-collections/community.aws
2,010
ansible-collections__community.aws-2010
[ "1566" ]
4bdcecda3d37e1ccd4d568ec641b59d0a745bbca
diff --git a/plugins/modules/elb_network_lb.py b/plugins/modules/elb_network_lb.py --- a/plugins/modules/elb_network_lb.py +++ b/plugins/modules/elb_network_lb.py @@ -69,6 +69,17 @@ description: - The name of the target group. - Mutually exclusive with I(TargetGroupArn). + AlpnPolicy: + description: + - The name of the Application-Layer Protocol Negotiation (ALPN) policy. + type: str + choices: + - HTTP1Only + - HTTP2Only + - HTTP2Optional + - HTTP2Preferred + - None + version_added: 7.1.0 name: description: - The name of the load balancer. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric @@ -283,6 +294,13 @@ returned: when state is present type: str sample: "" + alpn_policy: + description: The name of the Application-Layer Protocol Negotiation (ALPN) policy. + returned: when state is present + type: list + elements: str + version_added: 7.1.0 + sample: ["HTTP1Only", "HTTP2Only"] load_balancer_arn: description: The Amazon Resource Name (ARN) of the load balancer. returned: when state is present @@ -449,6 +467,10 @@ def main(): SslPolicy=dict(type="str"), Certificates=dict(type="list", elements="dict"), DefaultActions=dict(type="list", required=True, elements="dict"), + AlpnPolicy=dict( + type="str", + choices=["HTTP1Only", "HTTP2Only", "HTTP2Optional", "HTTP2Preferred", "None"], + ), ), ), name=dict(required=True, type="str"),
diff --git a/tests/integration/targets/elb_network_lb/tasks/test_modifying_nlb_listeners.yml b/tests/integration/targets/elb_network_lb/tasks/test_modifying_nlb_listeners.yml --- a/tests/integration/targets/elb_network_lb/tasks/test_modifying_nlb_listeners.yml +++ b/tests/integration/targets/elb_network_lb/tasks/test_modifying_nlb_listeners.yml @@ -73,3 +73,83 @@ that: - nlb.changed - not nlb.listeners + +# TLS listeners +- name: Add a TLS listener + elb_network_lb: + name: "{{ nlb_name }}" + subnets: "{{ nlb_subnets }}" + state: present + listeners: + - Protocol: TLS + Port: 443 + Certificates: + - CertificateArn: "{{ cert.arn }}" + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + SslPolicy: ELBSecurityPolicy-TLS-1-0-2015-04 + AlpnPolicy: HTTP2Optional + register: _add + +- assert: + that: + - _add.listeners[0].alpn_policy == ["HTTP2Optional"] + - _add.listeners[0].ssl_policy == "ELBSecurityPolicy-TLS-1-0-2015-04" + +- name: Add a TLS listener (idempotency) + elb_network_lb: + name: "{{ nlb_name }}" + subnets: "{{ nlb_subnets }}" + listeners: + - Protocol: TLS + Port: 443 + Certificates: + - CertificateArn: "{{ cert.arn }}" + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + SslPolicy: ELBSecurityPolicy-TLS-1-0-2015-04 + AlpnPolicy: HTTP2Optional + register: _idempotency + +- assert: + that: + - _idempotency is not changed + - _idempotency.listeners[0].alpn_policy == ["HTTP2Optional"] + - _idempotency.listeners[0].ssl_policy == "ELBSecurityPolicy-TLS-1-0-2015-04" + +- name: Update TLS listener of NLB + elb_network_lb: + name: "{{ nlb_name }}" + subnets: "{{ nlb_subnets }}" + listeners: + - Protocol: TLS + Port: 443 + Certificates: + - CertificateArn: "{{ cert.arn }}" + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + SslPolicy: ELBSecurityPolicy-TLS13-1-2-FIPS-2023-04 + AlpnPolicy: HTTP1Only + register: _update + +- assert: + that: + - _update is changed + - _update.listeners[0].alpn_policy == ["HTTP1Only"] + - _update.listeners[0].ssl_policy == "ELBSecurityPolicy-TLS13-1-2-FIPS-2023-04" + +- name: remove listener from NLB + elb_network_lb: + name: "{{ nlb_name }}" + subnets: "{{ nlb_subnets }}" + state: present + listeners: [] + register: nlb + +- assert: + that: + - nlb.changed + - not nlb.listeners
elb_network_lb - Add support for ALPN policies ### Summary Add support for Latency graphs (MeasureLatency) during creation of a Route53 Health Check **References** * https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html#alpn-policies * https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_listener > **AlpnPolicy (list)** > [TLS listeners] The name of the Application-Layer Protocol Negotiation (ALPN) policy. You can specify one policy name. The following are the possible values: > > * HTTP1Only > * HTTP2Only > * HTTP2Optional > * HTTP2Preferred > * None ### Issue Type Feature Idea ### Component Name elb_network_lb ### Additional Information **Example Input** <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) AlpnPolicy: - HTTP2Optional ``` **Example Code Change** Add ``` AlpnPolicy=dict(type='list', elements='str') ``` to https://github.com/ansible-collections/community.aws/blob/5.0.0/plugins/modules/elb_network_lb.py#L442-L447 ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
Files identified in the description: * [`plugins/modules/elb_network_lb.py`](https://github.com/['ansible-collections/amazon.aws', 'ansible-collections/community.aws', 'ansible-collections/community.vmware']/blob/main/plugins/modules/elb_network_lb.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @markuman @s-hertel @tremble [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify --->
2023-11-28T18:45:40
ansible-collections/community.aws
2,030
ansible-collections__community.aws-2030
[ "1756" ]
f96afea912582ad4911592fb760b12a21ab4b18e
diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -627,7 +627,7 @@ def _prepare_terminal(self): disable_prompt_complete = None end_mark = "".join([random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH)]) disable_prompt_cmd = to_bytes( - "PS1='' ; printf '\\n%s\\n' '" + end_mark + "'\n", + "PS1='' ; bind 'set enable-bracketed-paste off'; printf '\\n%s\\n' '" + end_mark + "'\n", errors="surrogate_or_strict", ) disable_prompt_reply = re.compile(r"\r\r\n" + re.escape(end_mark) + r"\r\r\n", re.MULTILINE)
unexpected output from Python interpreter discovery with aws_ssm connection plugin ### Summary I had good luck with the aws_ssm plugin until attempting to use it against the latest Amazon Linux AMI. Simple commands that work well with a CentOS 7 host fails when trying to run them against AMI. It appears to be something to do with interpreting shell output. A 'raw' command like this works fine: ``` ansible -i inventory.aws_ec2.yaml -m 'raw' -a 'whoami' tag_role_FAKEMX ``` However when running the equivalent 'command' module it fails for Amazon Linux, but works on CentOS 7. ``` ❯ ansible -i inventory.aws_ec2.yaml -m 'command' -a 'whoami' tag_role_FAKEMX -l ec2-13-58-203-89.us-east-2.compute.amazonaws.com [WARNING]: Unhandled error in Python interpreter discovery for host ec2-13-58-203-89.us-east-2.compute.amazonaws.com: unexpected output from Python interpreter discovery ec2-13-58-203-89.us-east-2.compute.amazonaws.com | FAILED | rc=-1 >> failed to transfer file to /Users/username/.ansible/tmp/ansible-local-78807y4966nip/tmpqn_vbdbk /AnsiballZ_command.py: % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0Warning: Failed to open the file 2004h2004l/AnsiballZ_command.py: No such file Warning: or directory 1 129k 1 1531 0 0 19698 0 0:00:06 --:--:-- 0:00:06 19883 curl: (23) Failure writing output to destination ``` Versus CentOS 7: ``` ❯ ansible -i inventory.aws_ec2.yaml -m 'command' -a 'whoami' tag_role_FAKEMX -l ip-10-240-172-59.us-east-2.compute.internal [WARNING]: Platform linux on host ip-10-240-172-59.us-east-2.compute.internal is using the discovered Python interpreter at /usr/libexec/platform-python, but future installation of another Python interpreter could change the meaning of that path. See https://docs.ansible.com/ansible- core/2.14/reference_appendices/interpreter_discovery.html for more information. ip-10-240-172-59.us-east-2.compute.internal | CHANGED | rc=0 >> root ``` From '-vvvv' output I see things like this: ``` <i-0cc859c89f4aaf5f4> ssm_retry: (success) (0, '\x1b[?2004h\x1b[?2004l\r\r\r\nPLATFORM\r\r\nLinux\r\r\nFOUND\r\r\n/usr/bin/python3.9\r\r\n/usr/bin/python3\r\r\nENDFOUND\r\r\n\x1b[?2004h\x1b[?2004l\r\r\r', '') [WARNING]: Unhandled error in Python interpreter discovery for host ec2-13-58-203-89.us-east-2.compute.amazonaws.com: unexpected output from Python interpreter discovery Using module file /Users/username/.asdf/installs/python/3.11.2/lib/python3.11/site-packages/ansible/modules/command.py ``` That's a failure to find the python version. Versus: ``` <i-03c9cbe64572b3eb0> ssm_retry: (success) (0, 'PLATFORM\r\r\nLinux\r\r\nFOUND\r\r\n/usr/libexec/platform-python\r\r\n/usr/bin/python2.7\r\r\n/usr/bin/python\r\r\n/usr/bin/python\r\r\nENDFOUND\r\r', '') <ip-10-240-172-59.us-east-2.compute.internal> Python interpreter discovery fallback (pipelining support required for extended interpreter discovery)` ``` Where it didn't complain and seems to be successful. Notice the additional "x1b[?2004h\x1b[?2004" strings in the output. I see the similar string in other problems, like "Warning: Failed to open the file 2004h2004l/AnsiballZ_command.py" Note that if I set the python interpreter it will remove the warning, but it will still error out on the "Failed to open the file 2004h2004l/AnsiballZ_command.py: No such file Warning: or directory" Tried this with community.aws collection versions 4.5.0, 5.2.0, and 5.3.0 and the errors is the same every time. Also tried a few different amazon.aws collection versions and had the same error. These examples are taken from a Mac running python 3.11.2 and ansible 2.14.2, but the same errors occured in a Linux-based Execution Environment for AWX running in EKS. ### Issue Type Bug Report ### Component Name aws_ssm connection ### Ansible Version ``` ansible [core 2.14.2] config file = None configured module search path = ['/Users/username/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /Users/username/.asdf/installs/python/3.11.2/lib/python3.11/site-packages/ansible ansible collection location = /Users/username/.ansible/collections:/usr/share/ansible/collections executable location = /Users/username/.asdf/installs/python/3.11.2/bin/ansible python version = 3.11.2 (main, Feb 21 2023, 11:07:56) [Clang 13.1.6 (clang-1316.0.21.2.5)] (/Users/username/.asdf/installs/python/3.11.2/bin/python3.11) jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```# /Users/username/.asdf/installs/python/3.11.2/lib/python3.11/site-packages/ansible_collections Collection Version ----------------------------- ------- ansible.netcommon 4.1.0 ansible.posix 1.5.1 ansible.utils 2.9.0 ansible.windows 1.13.0 arista.eos 6.0.0 awx.awx 21.11.0 azure.azcollection 1.14.0 check_point.mgmt 4.0.0 chocolatey.chocolatey 1.4.0 cisco.aci 2.3.0 cisco.asa 4.0.0 cisco.dnac 6.6.3 cisco.intersight 1.0.23 cisco.ios 4.3.1 cisco.iosxr 4.1.0 cisco.ise 2.5.12 cisco.meraki 2.15.0 cisco.mso 2.2.1 cisco.nso 1.0.3 cisco.nxos 4.0.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.4 community.azure 2.0.0 community.ciscosmb 1.0.5 community.crypto 2.10.0 community.digitalocean 1.23.0 community.dns 2.5.0 community.docker 3.4.0 community.fortios 1.0.0 community.general 6.3.0 community.google 1.0.0 community.grafana 1.5.3 community.hashi_vault 4.1.0 community.hrobot 1.7.0 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 5.0.0 community.okd 2.2.0 community.postgresql 2.3.2 community.proxysql 1.5.1 community.rabbitmq 1.2.3 community.routeros 2.7.0 community.sap 1.0.0 community.sap_libs 1.4.0 community.skydive 1.0.0 community.sops 1.6.0 community.vmware 3.3.0 community.windows 1.12.0 community.zabbix 1.9.1 containers.podman 1.10.1 cyberark.conjur 1.2.0 cyberark.pas 1.0.17 dellemc.enterprise_sonic 2.0.0 dellemc.openmanage 6.3.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 dellemc.powerflex 1.5.0 dellemc.unity 1.5.0 f5networks.f5_modules 1.22.0 fortinet.fortimanager 2.1.7 fortinet.fortios 2.2.2 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.1.2 grafana.grafana 1.1.0 hetzner.hcloud 1.9.1 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.11.0 infinidat.infinibox 1.3.12 infoblox.nios_modules 1.4.1 inspur.ispim 1.2.0 inspur.sm 2.3.0 junipernetworks.junos 4.1.0 kubernetes.core 2.3.2 lowlydba.sqlserver 1.3.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.22.0 netapp.elementsw 21.7.0 netapp.ontap 22.2.0 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.4.0 netbox.netbox 3.10.0 ngine_io.cloudstack 2.3.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.3 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.4.1 purestorage.flasharray 1.16.2 purestorage.flashblade 1.10.0 purestorage.fusion 1.3.0 sensu.sensu_go 1.13.2 splunk.es 2.1.0 t_systems_mms.icinga_director 1.32.0 theforeman.foreman 3.8.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.7.0 vyos.vyos 4.0.0 wti.remote 1.0.4 # /Users/username/.ansible/collections/ansible_collections Collection Version ------------- ------- amazon.aws 4.3.0 community.aws 5.2.0 ``` ### AWS SDK versions ``` ❯ pip show boto boto3 botocore WARNING: Package(s) not found: boto Name: boto3 Version: 1.26.76 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /Users/username/.asdf/installs/python/3.11.2/lib/python3.11/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.29.76 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /Users/username/.asdf/installs/python/3.11.2/lib/python3.11/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: boto3, s3transfer ``` ### Configuration ``` ❯ ansible-config dump --only-changed CONFIG_FILE() = None ``` ### OS / Environment Darwin ENG-NMOSEMAN-MB 22.3.0 Darwin Kernel Version 22.3.0: Mon Jan 30 20:42:11 PST 2023; root:xnu-8792.81.3~2/RELEASE_X86_64 x86_64 i386 Darwin and modified version of quay.io/ansible/awx-ee:latest to include community.aws, and ssm stuff. ### Steps to Reproduce ``` ❯ ansible -i inventory.aws_ec2.yaml -m 'command' -a 'whoami' tag_role_FAKEMX -l ec2-13-58-203-89.us-east-2.compute.amazonaws.com [WARNING]: Unhandled error in Python interpreter discovery for host ec2-13-58-203-89.us-east-2.compute.amazonaws.com: unexpected output from Python interpreter discovery ec2-13-58-203-89.us-east-2.compute.amazonaws.com | FAILED | rc=-1 >> failed to transfer file to /Users/username/.ansible/tmp/ansible-local-78807y4966nip/tmpqn_vbdbk /AnsiballZ_command.py: % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0Warning: Failed to open the file 2004h2004l/AnsiballZ_command.py: No such file Warning: or directory 1 129k 1 1531 0 0 19698 0 0:00:06 --:--:-- 0:00:06 19883 curl: (23) Failure writing output to destination ``` ### Expected Results ``` ❯ ansible -i inventory.aws_ec2.yaml -m 'command' -a 'whoami' tag_role_FAKEMX -l ip-10-240-172-59.us-east-2.compute.internal ip-10-240-172-59.us-east-2.compute.internal | CHANGED | rc=0 >> root ``` ### Actual Results ```failed to transfer file to /Users/username/.ansible/tmp/ansible-local-78807y4966nip/tmpqn_vbdbk /AnsiballZ_command.py: % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0Warning: Failed to open the file 2004h2004l/AnsiballZ_command.py: No such file Warning: or directory 1 129k 1 1531 0 0 19698 0 0:00:06 --:--:-- 0:00:06 19883 curl: (23) Failure writing output to destination ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
We just ran into this issue here. It seems that Amazon Linux outputs colorized text when Ansible runs any remote shell commands which causes parsing of the result to fail. Our solution was to not use aws_ssm connection - instead we setup SSH to make connections through Session Manager: https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started-enable-ssh-connections.html I was facing this same issue with hosts running both Ubuntu 22.04 and Amazon Linux 2023, and I was finally able to trace the extra output to a root cause. This is due to newer versions of Bash/readline turning on the option `enable-bracketed-paste` by default (more details [here](https://utcc.utoronto.ca/~cks/space/blog/unix/BashBracketedPasteChange)). I have a patch that will disable this option (will send a PR later today), which allows `ansible -m ping` to work on several hosts I have access to, including Ubuntu 18.04, Amazon Linux 2023, and Ubuntu 22.04. > https://github.com/ansible-collections/community.aws/pull/1839 Confirmed it's working with Amazon Linux I've ended up having the same issue switching from amazon Linux 2 to amazon Linux 2023, my error was: ``` service-use1-bh | UNREACHABLE! => { "changed": false, "msg": "Failed to create temporary directory. In some cases, you may have been able to authenticate and did not have permissions on the target directory. Consider changing the remote tmp path in ansible.cfg to a path rooted in \"/tmp\", for more error information use -vvv. Failed command was: ( umask 77 && mkdir -p \"` echo \u001b[?2004h\u001b[?2004l/.ansible/tmp `\"&& mkdir \"` echo \u001b[?2004h\u001b[?2004l/.ansible/tmp/ansible-tmp-1694091988.5261781-5952-41494157774678 `\" && echo ansible-tmp-1694091988.5261781-5952-41494157774678=\"` echo \u001b[?2004h\u001b[?2004l/.ansible/tmp/ansible-tmp-1694091988.5261781-5952-41494157774678 `\" ), exited with result 1, stdout output: \u001b[?2004h\u001b[?2004l\r\r\r\nmkdir: cannot create directory ‘2004h2004l’: Permission denied\r\r\n\u001b[?2004h\u001b[?2004l\r\r\r", "unreachable": true } ``` https://github.com/ansible-collections/community.aws/pull/1839 fixes it, but it's taking ages to be merged for unknown reasons. In the meantime I've set `set enable-bracketed-paste off` into `/etc/inputrc` which is, needless to say, not a fix at all since you need to configure all servers this way, which is exactly what ansible is meant to do. In my case was just one so _for now_ it's sorted, thanks to @dennisjlee !
2024-01-03T12:36:36
ansible-collections/community.aws
2,031
ansible-collections__community.aws-2031
[ "1775" ]
815789a1984d45fe2a9232af0ae54b118088e92f
diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -20,12 +20,27 @@ ``ansible_user`` variables to configure the remote user. The ``become_user`` parameter should be used to configure which user to run commands as. Remote commands will often default to running as the ``ssm-agent`` user, however this will also depend on how SSM has been configured. + - This plugin requires an S3 bucket to send files to/from the remote instance. This is required even for modules + which do not explicitly send files (such as the C(shell) or C(command) modules), because Ansible sends over the C(.py) files of the module itself, via S3. + - Files sent via S3 will be named in S3 with the EC2 host ID (e.g. C(i-123abc/)) as the prefix. + - The files in S3 will be deleted by the end of the playbook run. If the play is terminated ungracefully, the files may remain in the bucket. + If the bucket has versioning enabled, the files will remain in version history. If your tasks involve sending secrets to/from the remote instance + (e.g. within a C(shell) command, or a SQL password in the C(community.postgresql.postgresql_query) module) then those passwords will be included in + plaintext in those files in S3 indefinitely, visible to anyone with access to that bucket. Therefore it is recommended to use a bucket with versioning + disabled/suspended. + - The files in S3 will be deleted even if the C(keep_remote_files) setting is C(true). + requirements: - The remote EC2 instance must be running the AWS Systems Manager Agent (SSM Agent). U(https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started.html) - The control machine must have the AWS session manager plugin installed. U(https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html) - The remote EC2 Linux instance must have curl installed. + - The remote EC2 Linux instance and the controller both need network connectivity to S3. + - The remote instance does not require IAM credentials for S3. This module will generate a presigned URL for S3 from the controller, + and then will pass that URL to the target over SSM, telling the target to download/upload from S3 with C(curl). + - The controller requires IAM permissions to upload, download and delete files from the specified S3 bucket. This includes + `s3:GetObject`, `s3:PutObject`, `s3:ListBucket`, `s3:DeleteObject` and `s3:GetBucketLocation`. options: access_key_id:
SSM connector docs should explain the S3 part ### Summary The [SSM connector docs](https://docs.ansible.com/ansible/latest/collections/community/aws/aws_ssm_connection.html#ansible-collections-community-aws-aws-ssm-connection) don't mention S3 up the top. They only mention it in the details of the arguments, which is a bit unclear for someone completely new to this. In the "Requirements" section, it should say * that you need to have already created an S3 bucket. * that the target and controller both need network connectivity to S3 * why a bucket is required, even if you're not running any `copy` commands. (One sentence explanation is probably fine.) * which IAM permissions are required on the target (e.g. `s3:GetObject`, or `s3:GetObjectVersion`, etc, or also ListBucket?) * which IAM permissions are required on the controller (s3:PutObject, s3:DeleteObject. Anything else? e.g. presigned URLs?) * which prefix within S3 the objects are saved to * whether the files in S3 are deleted when done. * whether the files in S3 are deleted if the general Ansible setting [`keep_remote_files=True`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-keep-remote-files). ### Issue Type Documentation Report ### Component Name community.aws.aws_ssm connection ### Ansible Version ```console (paste below) ansible [core 2.13.5] config file = /Users/matthew/Documents/mms/new-repo/ansible.cfg configured module search path = ['/Users/matthew/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /Users/matthew/.pyenv/versions/3.10.0/lib/python3.10/site-packages/ansible ansible collection location = /Users/matthew/.ansible/collections:/usr/share/ansible/collections executable location = /Users/matthew/.pyenv/versions/3.10.0/bin/ansible python version = 3.10.0 (default, Nov 12 2021, 11:20:43) [Clang 12.0.5 (clang-1205.0.22.11)] jinja version = 3.1.2 libyaml = False ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /Users/matthew/.pyenv/versions/3.10.0/lib/python3.10/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 3.4.0 ansible.netcommon 3.1.1 ansible.posix 1.4.0 ansible.utils 2.6.1 ansible.windows 1.11.1 arista.eos 5.0.1 awx.awx 21.5.0 azure.azcollection 1.13.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.3.0 cisco.aci 2.2.0 cisco.asa 3.1.0 cisco.dnac 6.6.0 cisco.intersight 1.0.19 cisco.ios 3.3.1 cisco.iosxr 3.3.1 cisco.ise 2.5.3 cisco.meraki 2.11.0 cisco.mso 2.0.0 cisco.nso 1.0.3 cisco.nxos 3.1.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.2 community.aws 3.5.0 community.azure 1.1.0 community.ciscosmb 1.0.5 community.crypto 2.5.0 community.digitalocean 1.21.0 community.dns 2.3.2 community.docker 2.7.1 community.fortios 1.0.0 community.general 5.6.0 community.google 1.0.0 community.grafana 1.5.2 community.hashi_vault 3.2.0 community.hrobot 1.5.2 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 4.0.1 community.okd 2.2.0 community.postgresql 2.2.0 community.proxysql 1.4.0 community.rabbitmq 1.2.2 community.routeros 2.3.0 community.sap 1.0.0 community.sap_libs 1.3.0 community.skydive 1.0.0 community.sops 1.4.0 community.vmware 2.9.1 community.windows 1.11.0 community.zabbix 1.8.0 containers.podman 1.9.4 cyberark.conjur 1.2.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 1.1.2 dellemc.openmanage 5.5.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.19.0 fortinet.fortimanager 2.1.5 fortinet.fortios 2.1.7 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.8.2 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.9.0 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.3.0 inspur.ispim 1.0.1 inspur.sm 2.0.0 junipernetworks.junos 3.1.0 kubernetes.core 2.3.2 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.19.0 netapp.elementsw 21.7.0 netapp.ontap 21.23.0 netapp.storagegrid 21.11.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.1 netbox.netbox 3.7.1 ngine_io.cloudstack 2.2.4 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.9.1 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.2.3 purestorage.flasharray 1.13.0 purestorage.flashblade 1.10.0 purestorage.fusion 1.1.0 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 2.1.0 t_systems_mms.icinga_director 1.31.0 theforeman.foreman 3.6.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.1.0 vyos.vyos 3.0.1 wti.remote 1.0.4 ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ANY_ERRORS_FATAL(/Users/matthew/Documents/mms/new-repo/ansible.cfg) = True DEFAULT_KEEP_REMOTE_FILES(env: ANSIBLE_KEEP_REMOTE_FILES) = False DEFAULT_STDOUT_CALLBACK(/Users/matthew/Documents/mms/new-repo/ansible.cfg) = yaml INVENTORY_UNPARSED_IS_FAILED(/Users/matthew/Documents/mms/new-repo/ansible.cfg) = True LOCALHOST_WARNING(/Users/matthew/Documents/mms/new-repo/ansible.cfg) = False ``` ### OS / Environment Mac OS ### Additional Information _No response_ ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
@mdavis-xyz looks like you're more familar with ssm connections. Are you willing to provide a PR that improves the documentation? I need confirmation of the answers. e.g. I'm not sure what permissions are required for the controller. especially for [presigned URLs](https://github.com/boto/boto3/issues/3670). Also, what's the right way to put hyperlinks in the docs? e.g. [keep_remote_files=True](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-keep-remote-files)? I can put in that absolute hyperlink, but I assume we'd want to keep the links within each particular version of the docs? Q: Why a bucket is required, even if you're not running any copy commands. (One sentence explanation is probably fine.): A: Ansible is to designed to not require anything (except Python) to be installed on the target. For each Ansible module, Ansible copies a python script to the target, and then executes it. This is true for all modules, not just the file copying ones like `copy`. It is possible to send files directly over SSM, however that is slow compared to using S3. That is, the controller uploads files to S3, and then sends a shell command to the target, telling it to download the file from S3. Q: Which IAM permissions are required on the target (e.g. s3:GetObject, or s3:GetObjectVersion, etc, or also ListBucket?) A: No S3 IAM permissions are required on the target. To simplify IAM permissions and reduce dependency requirements, the controller generates a pre-signed URL for each file, and then tells the target to run `curl https://...`. Q: which IAM permissions are required on the controller (s3:PutObject, s3:DeleteObject. Anything else? e.g. presigned URLs?) A: I'm not sure Q: which prefix within S3 the objects are saved to A: The file `/path/to/something.txt` For EC2 instance `i-123` will be saved at `s3://bucket/i-123//path/to/something.txt`. Q: whether the files in S3 are deleted when done. A: yes it does A: whether the files in S3 are deleted if the general Ansible setting [keep_remote_files=True](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-keep-remote-files). Q: That setting is ignored for files in S3. One other reason for S3 is that if you send files directly over SSM (e.g. `echo blah | base64 -d > file`), the contents will be visible persistently in .bash_history, and perhaps in SSM execution history. For some files that might be a security risk. (Just a guess) I was struggling with the S3 permissions as well due to missing documentary. Finally I found out that the Ansible host as the target need these actions allowed: `s3:GetObject`, `s3:PutObject`, `s3:ListBucket`, `s3:DeleteObject` and `s3:GetBucketLocation`. I did this using a bucket policy. A short documentation would be helpful, thank you! Edit: Update required actions
2024-01-03T12:38:15
ansible-collections/community.aws
2,032
ansible-collections__community.aws-2032
[ "1775" ]
f96afea912582ad4911592fb760b12a21ab4b18e
diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -20,12 +20,26 @@ ``ansible_user`` variables to configure the remote user. The ``become_user`` parameter should be used to configure which user to run commands as. Remote commands will often default to running as the ``ssm-agent`` user, however this will also depend on how SSM has been configured. + - This plugin requires an S3 bucket to send files to/from the remote instance. This is required even for modules + which do not explicitly send files (such as the C(shell) or C(command) modules), because Ansible sends over the C(.py) files of the module itself, via S3. + - Files sent via S3 will be named in S3 with the EC2 host ID (e.g. C(i-123abc/)) as the prefix. + - The files in S3 will be deleted by the end of the playbook run. If the play is terminated ungracefully, the files may remain in the bucket. + If the bucket has versioning enabled, the files will remain in version history. If your tasks involve sending secrets to/from the remote instance + (e.g. within a C(shell) command, or a SQL password in the C(community.postgresql.postgresql_query) module) then those passwords will be included in plaintext in those files in S3 indefinitely, + visible to anyone with access to that bucket. Therefore it is recommended to use a bucket with versioning disabled/suspended. + - The files in S3 will be deleted even if the C(keep_remote_files) setting is C(true). + requirements: - The remote EC2 instance must be running the AWS Systems Manager Agent (SSM Agent). U(https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started.html) - The control machine must have the AWS session manager plugin installed. U(https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html) - The remote EC2 Linux instance must have curl installed. + - The remote EC2 Linux instance and the controller both need network connectivity to S3. + - The remote instance does not require IAM credentials for S3. This module will generate a presigned URL for S3 from the controller, + and then will pass that URL to the target over SSM, telling the target to download/upload from S3 with C(curl). + - The controller requires IAM permissions to upload, download and delete files from the specified S3 bucket. This includes + `s3:GetObject`, `s3:PutObject`, `s3:ListBucket`, `s3:DeleteObject` and `s3:GetBucketLocation`. options: access_key_id:
SSM connector docs should explain the S3 part ### Summary The [SSM connector docs](https://docs.ansible.com/ansible/latest/collections/community/aws/aws_ssm_connection.html#ansible-collections-community-aws-aws-ssm-connection) don't mention S3 up the top. They only mention it in the details of the arguments, which is a bit unclear for someone completely new to this. In the "Requirements" section, it should say * that you need to have already created an S3 bucket. * that the target and controller both need network connectivity to S3 * why a bucket is required, even if you're not running any `copy` commands. (One sentence explanation is probably fine.) * which IAM permissions are required on the target (e.g. `s3:GetObject`, or `s3:GetObjectVersion`, etc, or also ListBucket?) * which IAM permissions are required on the controller (s3:PutObject, s3:DeleteObject. Anything else? e.g. presigned URLs?) * which prefix within S3 the objects are saved to * whether the files in S3 are deleted when done. * whether the files in S3 are deleted if the general Ansible setting [`keep_remote_files=True`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-keep-remote-files). ### Issue Type Documentation Report ### Component Name community.aws.aws_ssm connection ### Ansible Version ```console (paste below) ansible [core 2.13.5] config file = /Users/matthew/Documents/mms/new-repo/ansible.cfg configured module search path = ['/Users/matthew/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /Users/matthew/.pyenv/versions/3.10.0/lib/python3.10/site-packages/ansible ansible collection location = /Users/matthew/.ansible/collections:/usr/share/ansible/collections executable location = /Users/matthew/.pyenv/versions/3.10.0/bin/ansible python version = 3.10.0 (default, Nov 12 2021, 11:20:43) [Clang 12.0.5 (clang-1205.0.22.11)] jinja version = 3.1.2 libyaml = False ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /Users/matthew/.pyenv/versions/3.10.0/lib/python3.10/site-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 3.4.0 ansible.netcommon 3.1.1 ansible.posix 1.4.0 ansible.utils 2.6.1 ansible.windows 1.11.1 arista.eos 5.0.1 awx.awx 21.5.0 azure.azcollection 1.13.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.3.0 cisco.aci 2.2.0 cisco.asa 3.1.0 cisco.dnac 6.6.0 cisco.intersight 1.0.19 cisco.ios 3.3.1 cisco.iosxr 3.3.1 cisco.ise 2.5.3 cisco.meraki 2.11.0 cisco.mso 2.0.0 cisco.nso 1.0.3 cisco.nxos 3.1.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.2 community.aws 3.5.0 community.azure 1.1.0 community.ciscosmb 1.0.5 community.crypto 2.5.0 community.digitalocean 1.21.0 community.dns 2.3.2 community.docker 2.7.1 community.fortios 1.0.0 community.general 5.6.0 community.google 1.0.0 community.grafana 1.5.2 community.hashi_vault 3.2.0 community.hrobot 1.5.2 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 4.0.1 community.okd 2.2.0 community.postgresql 2.2.0 community.proxysql 1.4.0 community.rabbitmq 1.2.2 community.routeros 2.3.0 community.sap 1.0.0 community.sap_libs 1.3.0 community.skydive 1.0.0 community.sops 1.4.0 community.vmware 2.9.1 community.windows 1.11.0 community.zabbix 1.8.0 containers.podman 1.9.4 cyberark.conjur 1.2.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 1.1.2 dellemc.openmanage 5.5.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.19.0 fortinet.fortimanager 2.1.5 fortinet.fortios 2.1.7 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.0.2 hetzner.hcloud 1.8.2 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.9.0 infinidat.infinibox 1.3.3 infoblox.nios_modules 1.3.0 inspur.ispim 1.0.1 inspur.sm 2.0.0 junipernetworks.junos 3.1.0 kubernetes.core 2.3.2 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.19.0 netapp.elementsw 21.7.0 netapp.ontap 21.23.0 netapp.storagegrid 21.11.0 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.1 netbox.netbox 3.7.1 ngine_io.cloudstack 2.2.4 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.9.1 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.2.3 purestorage.flasharray 1.13.0 purestorage.flashblade 1.10.0 purestorage.fusion 1.1.0 sensu.sensu_go 1.13.1 servicenow.servicenow 1.0.6 splunk.es 2.1.0 t_systems_mms.icinga_director 1.31.0 theforeman.foreman 3.6.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.1.0 vyos.vyos 3.0.1 wti.remote 1.0.4 ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ANY_ERRORS_FATAL(/Users/matthew/Documents/mms/new-repo/ansible.cfg) = True DEFAULT_KEEP_REMOTE_FILES(env: ANSIBLE_KEEP_REMOTE_FILES) = False DEFAULT_STDOUT_CALLBACK(/Users/matthew/Documents/mms/new-repo/ansible.cfg) = yaml INVENTORY_UNPARSED_IS_FAILED(/Users/matthew/Documents/mms/new-repo/ansible.cfg) = True LOCALHOST_WARNING(/Users/matthew/Documents/mms/new-repo/ansible.cfg) = False ``` ### OS / Environment Mac OS ### Additional Information _No response_ ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
@mdavis-xyz looks like you're more familar with ssm connections. Are you willing to provide a PR that improves the documentation? I need confirmation of the answers. e.g. I'm not sure what permissions are required for the controller. especially for [presigned URLs](https://github.com/boto/boto3/issues/3670). Also, what's the right way to put hyperlinks in the docs? e.g. [keep_remote_files=True](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-keep-remote-files)? I can put in that absolute hyperlink, but I assume we'd want to keep the links within each particular version of the docs? Q: Why a bucket is required, even if you're not running any copy commands. (One sentence explanation is probably fine.): A: Ansible is to designed to not require anything (except Python) to be installed on the target. For each Ansible module, Ansible copies a python script to the target, and then executes it. This is true for all modules, not just the file copying ones like `copy`. It is possible to send files directly over SSM, however that is slow compared to using S3. That is, the controller uploads files to S3, and then sends a shell command to the target, telling it to download the file from S3. Q: Which IAM permissions are required on the target (e.g. s3:GetObject, or s3:GetObjectVersion, etc, or also ListBucket?) A: No S3 IAM permissions are required on the target. To simplify IAM permissions and reduce dependency requirements, the controller generates a pre-signed URL for each file, and then tells the target to run `curl https://...`. Q: which IAM permissions are required on the controller (s3:PutObject, s3:DeleteObject. Anything else? e.g. presigned URLs?) A: I'm not sure Q: which prefix within S3 the objects are saved to A: The file `/path/to/something.txt` For EC2 instance `i-123` will be saved at `s3://bucket/i-123//path/to/something.txt`. Q: whether the files in S3 are deleted when done. A: yes it does A: whether the files in S3 are deleted if the general Ansible setting [keep_remote_files=True](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-keep-remote-files). Q: That setting is ignored for files in S3. One other reason for S3 is that if you send files directly over SSM (e.g. `echo blah | base64 -d > file`), the contents will be visible persistently in .bash_history, and perhaps in SSM execution history. For some files that might be a security risk. (Just a guess) I was struggling with the S3 permissions as well due to missing documentary. Finally I found out that the Ansible host as the target need these actions allowed: `s3:GetObject`, `s3:PutObject`, `s3:ListBucket`, `s3:DeleteObject` and `s3:GetBucketLocation`. I did this using a bucket policy. A short documentation would be helpful, thank you! Edit: Update required actions
2024-01-03T12:38:17
ansible-collections/community.aws
2,038
ansible-collections__community.aws-2038
[ "290" ]
441c78841085fdbc39b6f58169e33fc404ba090c
diff --git a/plugins/modules/cloudfront_distribution.py b/plugins/modules/cloudfront_distribution.py --- a/plugins/modules/cloudfront_distribution.py +++ b/plugins/modules/cloudfront_distribution.py @@ -205,9 +205,25 @@ description: - The ID of the header policy that CloudFront adds to responses that it sends to viewers. type: str + cache_policy_id: + version_added: 7.1.0 + description: + - The ID of the cache policy for CloudFront to use for the default cache behavior. + - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option. + - For more information see the CloudFront documentation + at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html) + type: str + origin_request_policy_id: + version_added: 7.1.0 + description: + - The ID of the origin request policy for CloudFront to use for the default cache behavior. + - For more information see the CloudFront documentation + at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html) + type: str forwarded_values: description: - A dict that specifies how CloudFront handles query strings and cookies. + - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option. type: dict suboptions: query_string: @@ -326,9 +342,25 @@ description: - The ID of the header policy that CloudFront adds to responses that it sends to viewers. type: str + cache_policy_id: + version_added: 7.1.0 + description: + - The ID of the cache policy for CloudFront to use for the cache behavior. + - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option. + - For more information see the CloudFront documentation + at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html) + type: str + origin_request_policy_id: + version_added: 7.1.0 + description: + - The ID of the origin request policy for CloudFront to use for the cache behavior. + - For more information see the CloudFront documentation + at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html) + type: str forwarded_values: description: - A dict that specifies how CloudFront handles query strings and cookies. + - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option. type: dict suboptions: query_string: @@ -1914,7 +1946,10 @@ def validate_cache_behavior(self, config, cache_behavior, valid_origins, is_defa cache_behavior = self.validate_cache_behavior_first_level_keys( config, cache_behavior, valid_origins, is_default_cache ) - cache_behavior = self.validate_forwarded_values(config, cache_behavior.get("forwarded_values"), cache_behavior) + if cache_behavior.get("cache_policy_id") is None: + cache_behavior = self.validate_forwarded_values( + config, cache_behavior.get("forwarded_values"), cache_behavior + ) cache_behavior = self.validate_allowed_methods(config, cache_behavior.get("allowed_methods"), cache_behavior) cache_behavior = self.validate_lambda_function_associations( config, cache_behavior.get("lambda_function_associations"), cache_behavior @@ -1926,19 +1961,34 @@ def validate_cache_behavior(self, config, cache_behavior, valid_origins, is_defa return cache_behavior def validate_cache_behavior_first_level_keys(self, config, cache_behavior, valid_origins, is_default_cache): - try: - cache_behavior = self.add_key_else_change_dict_key( - cache_behavior, "min_ttl", "min_t_t_l", config.get("min_t_t_l", self.__default_cache_behavior_min_ttl) - ) - cache_behavior = self.add_key_else_change_dict_key( - cache_behavior, "max_ttl", "max_t_t_l", config.get("max_t_t_l", self.__default_cache_behavior_max_ttl) - ) - cache_behavior = self.add_key_else_change_dict_key( - cache_behavior, - "default_ttl", - "default_t_t_l", - config.get("default_t_t_l", self.__default_cache_behavior_default_ttl), + if cache_behavior.get("cache_policy_id") is not None and cache_behavior.get("forwarded_values") is not None: + if is_default_cache: + cache_behavior_name = "Default cache behavior" + else: + cache_behavior_name = f"Cache behavior for path {cache_behavior['path_pattern']}" + self.module.fail_json( + msg=f"{cache_behavior_name} cannot have both a cache_policy_id and a forwarded_values option." ) + try: + if cache_behavior.get("cache_policy_id") is None: + cache_behavior = self.add_key_else_change_dict_key( + cache_behavior, + "min_ttl", + "min_t_t_l", + config.get("min_t_t_l", self.__default_cache_behavior_min_ttl), + ) + cache_behavior = self.add_key_else_change_dict_key( + cache_behavior, + "max_ttl", + "max_t_t_l", + config.get("max_t_t_l", self.__default_cache_behavior_max_ttl), + ) + cache_behavior = self.add_key_else_change_dict_key( + cache_behavior, + "default_ttl", + "default_t_t_l", + config.get("default_t_t_l", self.__default_cache_behavior_default_ttl), + ) cache_behavior = self.add_missing_key( cache_behavior, "compress", config.get("compress", self.__default_cache_behavior_compress) )
diff --git a/tests/integration/targets/cloudfront_distribution/tasks/main.yml b/tests/integration/targets/cloudfront_distribution/tasks/main.yml --- a/tests/integration/targets/cloudfront_distribution/tasks/main.yml +++ b/tests/integration/targets/cloudfront_distribution/tasks/main.yml @@ -632,6 +632,22 @@ - result.origins['quantity'] > 0 - result.origins['items'] | selectattr('s3_origin_config', 'defined') | map(attribute='s3_origin_config') | selectattr('origin_access_identity', 'eq', origin_access_identity) | list | length == 1 + - name: update distribution to use cache_policy_id and origin_request_policy_id + cloudfront_distribution: + distribution_id: "{{ distribution_id }}" + default_cache_behavior: + cache_policy_id: "658327ea-f89d-4fab-a63d-7e88639e58f6" + origin_request_policy_id: "88a5eaf4-2fd4-4709-b370-b4c650ea3fcf" + state: present + register: update_distribution_with_cache_policies + + - name: ensure that the cache_policy_id and origin_request_policy_id was set + assert: + that: + - update_distribution_with_cache_policies.changed + - update_distribution_with_cache_policies.default_cache_behavior.cache_policy_id == '658327ea-f89d-4fab-a63d-7e88639e58f6' + - update_distribution_with_cache_policies.default_cache_behavior.origin_request_policy_id == '88a5eaf4-2fd4-4709-b370-b4c650ea3fcf' + always: # TEARDOWN STARTS HERE - name: delete the s3 bucket
cloudfront_distribution - Add support for setting cache and origin request policy ids in cache behavior ##### SUMMARY Add support for setting cache and origin request policy ids in the cache behaviors block when creating a distribution in cloudfront_distribution. ##### ISSUE TYPE - Feature Idea ##### COMPONENT NAME cloudfront_distribution ##### ADDITIONAL INFORMATION Currently we are unable to set cache policy or origin request policy ids in the cache behaviors section so that they are added to a distribution. <!--- Paste example playbooks or commands between quotes below --> Example playbook code: ```yaml cache_behaviors: - path_pattern: "*/" target_origin_id: "" field_level_encryption_id: "" cache_policy_id: "{{ cache_policy_id }}" origin_request_policy_id: "{{ origin_request_policy_id }}" ``` Example python code: ```python cache_behavior['cache_policy_id'] = cache_behavior.get('cache_policy_id', config.get('cache_policy_id')) cache_behavior['origin_request_policy_id'] = cache_behavior.get('origin_request_policy_id', config.get('origin_request_policy_id')) ``` <!--- HINT: You can also paste gist.github.com links for larger files -->
Files identified in the description: * [`plugins/modules/cloudfront_distribution.py`](https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/cloudfront_distribution.py) If these files are inaccurate, please update the `component name` section of the description or use the `!component` bot command. [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: components_banner ---> cc @jillr @s-hertel @tremble @willthames @wilvk @wimnat [click here for bot help](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) <!--- boilerplate: notify ---> Thank you for raising this idea @sethernet. Would you like to raise a PR for this issue? Theoretically the parameter `cache_policy_id: 658327ea-f89d-4fab-a63d-7e88639e58f6` works already. But the default values for min/max/default_ttl that are added, are incompatible together.
2024-01-09T11:05:21
ansible-collections/community.aws
2,039
ansible-collections__community.aws-2039
[ "1879" ]
441c78841085fdbc39b6f58169e33fc404ba090c
diff --git a/plugins/modules/mq_broker.py b/plugins/modules/mq_broker.py --- a/plugins/modules/mq_broker.py +++ b/plugins/modules/mq_broker.py @@ -124,6 +124,19 @@ - At least one must be provided during creation. type: list elements: str + wait: + description: + - Specifies whether the module waits for the desired C(state). + - The time to wait can be controlled by setting I(wait_timeout). + type: bool + default: false + version_added: 7.1.0 + wait_timeout: + description: + - How long to wait (in seconds) for the broker to reach the desired state if I(wait=true). + default: 900 + type: int + version_added: 7.1.0 extends_documentation_fragment: - amazon.aws.boto3 @@ -215,6 +228,9 @@ # handled by AnsibleAWSModule pass +from time import sleep +from time import time + from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule @@ -384,22 +400,77 @@ def get_broker_info(conn, module, broker_id): module.fail_json_aws(e, msg="Couldn't get broker details.") +def wait_for_status(conn, module): + interval_secs = 5 + timeout = module.params.get("wait_timeout", 900) + broker_name = module.params.get("broker_name") + desired_state = module.params.get("state") + done = False + + paginator = conn.get_paginator("list_brokers") + page_iterator = paginator.paginate(PaginationConfig={"MaxItems": 100, "PageSize": 100, "StartingToken": ""}) + wait_timeout = time() + timeout + + while wait_timeout > time(): + try: + filtered_iterator = page_iterator.search(f"BrokerSummaries[?BrokerName == `{broker_name}`][]") + broker_list = list(filtered_iterator) + + if module.check_mode: + return + + if len(broker_list) < 1 and desired_state == "absent": + done = True + break + + if desired_state in ["present", "rebooted"] and broker_list[0]["BrokerState"] == "RUNNING": + done = True + break + + if broker_list[0]["BrokerState"] == "CREATION_FAILED": + break + + sleep(interval_secs) + + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't paginate brokers.") + + if not done: + module.fail_json(msg="desired state not reached") + + def reboot_broker(conn, module, broker_id): + wait = module.params.get("wait") + try: - return conn.reboot_broker(BrokerId=broker_id) + response = conn.reboot_broker(BrokerId=broker_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't reboot broker.") + if wait: + wait_for_status(conn, module) + + return response + def delete_broker(conn, module, broker_id): + wait = module.params.get("wait") + try: - return conn.delete_broker(BrokerId=broker_id) + response = conn.delete_broker(BrokerId=broker_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete broker.") + if wait: + wait_for_status(conn, module) + + return response + def create_broker(conn, module): kwargs = _fill_kwargs(module) + wait = module.params.get("wait") + if "EngineVersion" in kwargs and kwargs["EngineVersion"] == "latest": kwargs["EngineVersion"] = get_latest_engine_version(conn, module, kwargs["EngineType"]) if kwargs["AuthenticationStrategy"] == "LDAP": @@ -416,11 +487,15 @@ def create_broker(conn, module): changed = True result = conn.create_broker(**kwargs) # + if wait: + wait_for_status(conn, module) + return {"broker": camel_dict_to_snake_dict(result, ignore_list=["Tags"]), "changed": changed} def update_broker(conn, module, broker_id): kwargs = _fill_kwargs(module, apply_defaults=False, ignore_create_params=True) + wait = module.params.get("wait") # replace name with id broker_name = kwargs["BrokerName"] del kwargs["BrokerName"] @@ -443,6 +518,9 @@ def update_broker(conn, module, broker_id): api_result = conn.update_broker(**kwargs) # # + if wait: + wait_for_status(conn, module) + return {"broker": result, "changed": changed} @@ -484,6 +562,8 @@ def main(): argument_spec = dict( broker_name=dict(required=True, type="str"), state=dict(default="present", choices=["present", "absent", "restarted"]), + wait=dict(default=False, type="bool"), + wait_timeout=dict(default=900, type="int"), # parameters only allowed on create deployment_mode=dict(choices=["SINGLE_INSTANCE", "ACTIVE_STANDBY_MULTI_AZ", "CLUSTER_MULTI_AZ"]), use_aws_owned_key=dict(type="bool"),
diff --git a/tests/integration/targets/mq/tasks/broker_tests.yml b/tests/integration/targets/mq/tasks/broker_tests.yml --- a/tests/integration/targets/mq/tasks/broker_tests.yml +++ b/tests/integration/targets/mq/tasks/broker_tests.yml @@ -4,6 +4,7 @@ security_groups: "{{ broker_sg_ids.split(',') }}" subnet_ids: "{{ broker_subnet_ids.split(',') }}" tags: "{{ tags }}" + wait: true register: result - set_fact: broker_id: "{{ result.broker['broker_id'] }}" @@ -19,20 +20,10 @@ - ( result.changed | bool ) - result_c1.broker['broker_id'] == broker_id - result_c1.broker['broker_name'] == broker_name - - result_c1.broker['broker_state'] == 'CREATION_IN_PROGRESS' + - result_c1.broker['broker_state'] == 'RUNNING' - ( result_c1.broker['storage_type'] | upper ) == 'EFS' - result_c1.broker['tags'] == tags when: not ansible_check_mode -- debug: - msg: "Wait until broker {{ broker_name }} ({{ broker_id }}) enters running state. This may take several minutes" -- name: wait for startup - mq_broker_info: - broker_id: "{{ broker_id }}" - register: result - until: result.broker['broker_state'] == 'RUNNING' - retries: 15 - delay: 60 - when: not ansible_check_mode - name: repeat creation mq_broker: broker_name: "{{ broker_name }}"
mq_broker: add wait and wait_for parameters ### Summary Add wait and wait_timeout parameters to wait for a specific state. https://github.com/ansible-collections/community.aws/pull/1831#issuecomment-1582566844 ### Issue Type Feature Idea ### Component Name mq_broker ### Additional Information ```yaml wait: description: - Specifies whether the module waits for the desired C(state). type: bool wait_timeout: description: - How long to wait (in seconds) for the broker to reach the desired state. default: 600 type: int ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
2024-01-09T11:05:25
ansible-collections/community.aws
2,045
ansible-collections__community.aws-2045
[ "1946" ]
9e0829519746dafd56c5da73e012ef759f4a79bb
diff --git a/plugins/modules/msk_cluster.py b/plugins/modules/msk_cluster.py --- a/plugins/modules/msk_cluster.py +++ b/plugins/modules/msk_cluster.py @@ -54,6 +54,17 @@ - kafka.m5.xlarge - kafka.m5.2xlarge - kafka.m5.4xlarge + - kafka.m5.8xlarge + - kafka.m5.12xlarge + - kafka.m5.16xlarge + - kafka.m5.24xlarge + - kafka.m7g.large + - kafka.m7g.xlarge + - kafka.m7g.2xlarge + - kafka.m7g.4xlarge + - kafka.m7g.8xlarge + - kafka.m7g.12xlarge + - kafka.m7g.16xlarge default: kafka.t3.small type: str ebs_volume_size: @@ -662,6 +673,17 @@ def main(): "kafka.m5.xlarge", "kafka.m5.2xlarge", "kafka.m5.4xlarge", + "kafka.m5.8xlarge", + "kafka.m5.12xlarge", + "kafka.m5.16xlarge", + "kafka.m5.24xlarge", + "kafka.m7g.large", + "kafka.m7g.xlarge", + "kafka.m7g.2xlarge", + "kafka.m7g.4xlarge", + "kafka.m7g.8xlarge", + "kafka.m7g.12xlarge", + "kafka.m7g.16xlarge", ], default="kafka.t3.small", ),
add an MSK broker instance type ### Summary Broker instances of MSK with the current provisioning type can be selected up to m5.24xlarge, but only m5.4xlarge is available for the ansible module. ### Issue Type Feature Idea ### Component Name msk_cluster ### Additional Information <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) - name: create msk cluster msk_cluster: name: test-msk-cluster state: present version: "2.8.1" nodes: 2 ebs_volume_size: 10 subnets: subnet-temp wait: true instance_type: "kafka.m5.8xlarge" ``` recevied error message that `FAILED! => {"changed": false, "msg": "value of instance_type must be one of: kafka.t3.small, kafka.m5.large, kafka.m5.xlarge, kafka.m5.2xlarge, kafka.m5.4xlarge, got: kafka.m5.8xlarge"}` - https://docs.aws.amazon.com/msk/latest/developerguide/msk-create-cluster.html#broker-instance-types ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
2024-01-11T14:21:24
ourownstory/neural_prophet
456
ourownstory__neural_prophet-456
[ "455" ]
bbe0021b17445cae5d5ef71f673e89ed333962ee
diff --git a/neuralprophet/benchmark.py b/neuralprophet/benchmark.py --- a/neuralprophet/benchmark.py +++ b/neuralprophet/benchmark.py @@ -1,14 +1,17 @@ from dataclasses import dataclass, field from typing import List, Optional, Tuple, Type from abc import ABC, abstractmethod +import logging import pandas as pd import numpy as np from neuralprophet import NeuralProphet - NeuralProphetModel = NeuralProphet +log = logging.getLogger("NP.benchmark") +log.warning("Benchmarking Framework is not covered by tests. Please report any bugs you find.") + @dataclass class Dataset: @@ -170,9 +173,9 @@ class SimpleBenchmark: >>> results_train, results_val = benchmark.run() """ - model_classes_and_params: List[tuple[Model, dict]] + model_classes_and_params: List[Tuple[Model, dict]] datasets: List[Dataset] - metrics: list[str] + metrics: List[str] test_percentage: float def setup_experiments(self):
benchmark typing error ### Discussed in https://github.com/ourownstory/neural_prophet/discussions/449 <div type='discussions-op-text'> <sup>Originally posted by **akthammomani** October 23, 2021</sup> @ourownstory, I went a head and installed the Library again using below in a fresh env: ``` git clone <copied link from github> cd neural_prophet pip install . ``` so when trying below: ``` from neuralprophet import NeuralProphet, set_log_level from neuralprophet.benchmark import Dataset, NeuralProphetModel, SimpleExperiment, CrossValidationExperiment ``` I started to see a new error as shown below: ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) ~\AppData\Local\Temp/ipykernel_43524/177920820.py in <module> 1 from neuralprophet import NeuralProphet, set_log_level ----> 2 from neuralprophet.benchmark import Dataset, NeuralProphetModel, SimpleExperiment, CrossValidationExperiment ~\Anaconda3\envs\nprophet_v1\lib\site-packages\neuralprophet\benchmark.py in <module> 158 159 --> 160 @dataclass 161 class SimpleBenchmark: 162 """ ~\Anaconda3\envs\nprophet_v1\lib\site-packages\neuralprophet\benchmark.py in SimpleBenchmark() 171 """ 172 --> 173 model_classes_and_params: List[tuple[Model, dict]] 174 datasets: List[Dataset] 175 metrics: list[str] TypeError: 'type' object is not subscriptable ``` Please let me know if you have any suggestions.... Thank you very much BR//Aktham </div>
2021-10-24T17:04:18
ourownstory/neural_prophet
705
ourownstory__neural_prophet-705
[ "692", "692" ]
02cbe0461453ef5d41d50808747609c892362bac
diff --git a/neuralprophet/df_utils.py b/neuralprophet/df_utils.py --- a/neuralprophet/df_utils.py +++ b/neuralprophet/df_utils.py @@ -1192,12 +1192,12 @@ def _infer_frequency(df, freq, min_freq_percentage=0.7): if frequencies[np.argmax(distribution)] == 2.6784e15 or frequencies[np.argmax(distribution)] == 2.592e15: dominant_freq_percentage = get_dist_considering_two_freqs(distribution) / len(df["ds"]) num_freq = 2.6784e15 - inferred_freq = "MS" if pd.to_datetime(df["ds"][0]).day < 15 else "M" + inferred_freq = "MS" if pd.to_datetime(df["ds"].iloc[0]).day < 15 else "M" # exception - yearly df (365 days freq or 366 days freq) elif frequencies[np.argmax(distribution)] == 3.1536e16 or frequencies[np.argmax(distribution)] == 3.16224e16: dominant_freq_percentage = get_dist_considering_two_freqs(distribution) / len(df["ds"]) num_freq = 3.1536e16 - inferred_freq = "YS" if pd.to_datetime(df["ds"][0]).day < 15 else "Y" + inferred_freq = "YS" if pd.to_datetime(df["ds"].iloc[0]).day < 15 else "Y" # exception - quarterly df (most common == 92 days - 3rd,4th quarters and second most common == 91 days 2nd quarter and 1st quarter in leap year) elif ( frequencies[np.argmax(distribution)] == 7.9488e15 @@ -1205,7 +1205,7 @@ def _infer_frequency(df, freq, min_freq_percentage=0.7): ): dominant_freq_percentage = get_dist_considering_two_freqs(distribution) / len(df["ds"]) num_freq = 7.9488e15 - inferred_freq = "QS" if pd.to_datetime(df["ds"][0]).day < 15 else "Q" + inferred_freq = "QS" if pd.to_datetime(df["ds"].iloc[0]).day < 15 else "Q" # exception - Business day (most common == day delta and second most common == 3 days delta and second most common is at least 12% of the deltas) elif ( frequencies[np.argmax(distribution)] == 8.64e13
KeyError when creating a global model using freq='MS' for fit function **Prerequisites** * [X] Put an X between the brackets on this line if you have done all of the following: * Reproduced the problem in a new virtualenv with only neuralprophet installed, directly from github: ```shell git clone <copied link from github> cd neural_prophet pip install . ``` * Checked the Answered Questions on the Github Disscussion board: https://github.com/ourownstory/neural_prophet/discussions If you have the same question but the Answer does not solve your issue, please continue the conversation there. * Checked that your issue isn't already filed: https://github.com/ourownstory/neural_prophet/issues If you have the same issue but there is a twist to your situation, please add an explanation there. * Considered whether your bug might actually be solveable by getting a question answered: * Please [post a package use question](https://github.com/ourownstory/neural_prophet/discussions/categories/q-a-get-help-using-neuralprophet) * Please [post a forecasting best practice question](https://github.com/ourownstory/neural_prophet/discussions/categories/q-a-forecasting-best-practices) * Please [post an idea or feedback](https://github.com/ourownstory/neural_prophet/discussions/categories/ideas-feedback) **Describe the bug** A clear and concise description of what the bug is. Bug Description: When I run the fit function and use freq='MS' as a parameter I am getting a "KeyError: 0" and I suspect something is wrong with line 1195 in the _infer_frequency function in the df_utils.py file **To Reproduce** Steps to reproduce the behavior: 1. With the data 'the data from the energy_data_example.ipynb file' 2. Setting the model hyperparameters 'use defaults' 3. After running these code lines 'first change the df_ercot data such that it only includes the first day of each month and get rid of the hour stamp to prep data for the fit function. Also, remove rows where there are duplicates (make sure there is only one row for each first day of month date)' 4. When using this function 'run all the lines and change the fit function parameter 'freq' to be 'MS' instead of 'H'' 5. See error **Expected behavior** A clear and concise description of what you expected to happen. I expected the global model to run as normal and fit my data with no errors. **What actually happens** Describe what happens, and how often it happens. I get a Key Error: 0 and It happens everytime I use freq = "MS" in the fit function **Screenshots** Error Messages screenshots: ![image](https://user-images.githubusercontent.com/57101911/180514510-fd474ca1-ef30-44b0-bbba-1649f1cdc618.png) From forecaster.py: ![image](https://user-images.githubusercontent.com/57101911/180514627-8b44dca9-ca7d-4bec-8755-9bbfe964d96e.png) From df_utils.py ![image](https://user-images.githubusercontent.com/57101911/180514751-f86cbadb-ca87-4601-88f3-5d3cb9ecbc1e.png) ![image](https://user-images.githubusercontent.com/57101911/180514835-ab2405d8-f123-4451-b5b5-b663ed4d572a.png) ![image](https://user-images.githubusercontent.com/57101911/180514872-b91e08af-b636-4952-a390-36523bb5dd1e.png) Where I think the bug is: ![image](https://user-images.githubusercontent.com/57101911/180515505-b2f60bf8-fddf-4139-a8d9-7b0d37509f49.png) If applicable, add screenshots and console printouts to help explain your problem. **Environement (please complete the following information):** - Python environment [e.g. Python 3.8, in standalone venv with no other packages] - NeuralProphet version and install method [e.g. 2.7, installed from PYPI with `pip install neuralprophet`] I downloaded installed the package in a standalone venv directly from this GitHub **Additional context** Add any other context about the problem here. KeyError when creating a global model using freq='MS' for fit function **Prerequisites** * [X] Put an X between the brackets on this line if you have done all of the following: * Reproduced the problem in a new virtualenv with only neuralprophet installed, directly from github: ```shell git clone <copied link from github> cd neural_prophet pip install . ``` * Checked the Answered Questions on the Github Disscussion board: https://github.com/ourownstory/neural_prophet/discussions If you have the same question but the Answer does not solve your issue, please continue the conversation there. * Checked that your issue isn't already filed: https://github.com/ourownstory/neural_prophet/issues If you have the same issue but there is a twist to your situation, please add an explanation there. * Considered whether your bug might actually be solveable by getting a question answered: * Please [post a package use question](https://github.com/ourownstory/neural_prophet/discussions/categories/q-a-get-help-using-neuralprophet) * Please [post a forecasting best practice question](https://github.com/ourownstory/neural_prophet/discussions/categories/q-a-forecasting-best-practices) * Please [post an idea or feedback](https://github.com/ourownstory/neural_prophet/discussions/categories/ideas-feedback) **Describe the bug** A clear and concise description of what the bug is. Bug Description: When I run the fit function and use freq='MS' as a parameter I am getting a "KeyError: 0" and I suspect something is wrong with line 1195 in the _infer_frequency function in the df_utils.py file **To Reproduce** Steps to reproduce the behavior: 1. With the data 'the data from the energy_data_example.ipynb file' 2. Setting the model hyperparameters 'use defaults' 3. After running these code lines 'first change the df_ercot data such that it only includes the first day of each month and get rid of the hour stamp to prep data for the fit function. Also, remove rows where there are duplicates (make sure there is only one row for each first day of month date)' 4. When using this function 'run all the lines and change the fit function parameter 'freq' to be 'MS' instead of 'H'' 5. See error **Expected behavior** A clear and concise description of what you expected to happen. I expected the global model to run as normal and fit my data with no errors. **What actually happens** Describe what happens, and how often it happens. I get a Key Error: 0 and It happens everytime I use freq = "MS" in the fit function **Screenshots** Error Messages screenshots: ![image](https://user-images.githubusercontent.com/57101911/180514510-fd474ca1-ef30-44b0-bbba-1649f1cdc618.png) From forecaster.py: ![image](https://user-images.githubusercontent.com/57101911/180514627-8b44dca9-ca7d-4bec-8755-9bbfe964d96e.png) From df_utils.py ![image](https://user-images.githubusercontent.com/57101911/180514751-f86cbadb-ca87-4601-88f3-5d3cb9ecbc1e.png) ![image](https://user-images.githubusercontent.com/57101911/180514835-ab2405d8-f123-4451-b5b5-b663ed4d572a.png) ![image](https://user-images.githubusercontent.com/57101911/180514872-b91e08af-b636-4952-a390-36523bb5dd1e.png) Where I think the bug is: ![image](https://user-images.githubusercontent.com/57101911/180515505-b2f60bf8-fddf-4139-a8d9-7b0d37509f49.png) If applicable, add screenshots and console printouts to help explain your problem. **Environement (please complete the following information):** - Python environment [e.g. Python 3.8, in standalone venv with no other packages] - NeuralProphet version and install method [e.g. 2.7, installed from PYPI with `pip install neuralprophet`] I downloaded installed the package in a standalone venv directly from this GitHub **Additional context** Add any other context about the problem here.
2022-08-05T20:03:32
ourownstory/neural_prophet
714
ourownstory__neural_prophet-714
[ "49" ]
3081b6ba40e0bf15f57ab3c5e8dbaae7aa875a08
diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -2293,7 +2293,12 @@ def _add_batch_regularizations(self, loss, e, iter_progress): reg_events_loss = utils.reg_func_events(self.config_events, self.config_country_holidays, self.model) reg_loss += reg_events_loss - # Regularize regressors: sparsify regressor features coefficients + # Regularize lagged regressors: sparsify covariate features coefficients + if self.config_covar is not None: + reg_covariate_loss = utils.reg_func_covariates(self.config_covar, self.model) + reg_loss += reg_covariate_loss + + # Regularize future regressors: sparsify regressor features coefficients if self.config_regressors is not None: reg_regressor_loss = utils.reg_func_regressors(self.config_regressors, self.model) reg_loss += reg_regressor_loss diff --git a/neuralprophet/utils.py b/neuralprophet/utils.py --- a/neuralprophet/utils.py +++ b/neuralprophet/utils.py @@ -6,6 +6,7 @@ import torch from collections import OrderedDict from neuralprophet import hdays as hdays_part2 +from neuralprophet import utils_torch import holidays as pyholidays import warnings import logging @@ -133,6 +134,33 @@ def reg_func_events(config_events, config_country_holidays, model): return reg_events_loss +def reg_func_covariates(config_covariates, model): + """ + Regularization of lagged covariates to induce sparsity + + Parameters + ---------- + config_covariates : configure.Covar + Configurations for user specified lagged covariates + model : TimeNet + TimeNet model object + + Returns + ------- + scalar + Regularization loss + """ + reg_covariate_loss = 0.0 + for covariate, configs in config_covariates.items(): + reg_lambda = configs.reg_lambda + if reg_lambda is not None: + weights = model.get_covar_weights(covariate) + loss = torch.mean(utils_torch.penalize_nonzero(weights)).squeeze() + reg_covariate_loss += reg_lambda * loss + + return reg_covariate_loss + + def reg_func_regressors(config_regressors, model): """ Regularization of regressors coefficients to induce sparsity
diff --git a/tests/test_regularization.py b/tests/test_regularization.py --- a/tests/test_regularization.py +++ b/tests/test_regularization.py @@ -6,7 +6,7 @@ import pandas as pd import pytest import torch -from utils.dataset_generators import generate_event_dataset, generate_holiday_dataset +from utils.dataset_generators import generate_event_dataset, generate_holiday_dataset, generate_lagged_regressor_dataset from neuralprophet import NeuralProphet, df_utils from neuralprophet.utils import reg_func_abs @@ -116,3 +116,48 @@ def test_regularization_events(): # print(to_preserve) assert np.mean(to_reduce) < 0.1 assert np.mean(to_preserve) > 0.5 + + +def test_regularization_lagged_regressor(): + """ + Test case for regularization feature of lagged regressors. Utlizes a + synthetic dataset with 4 noise-based lagged regressors (a, b, c, d). + The first and last lagged regressors (a, d) are expected to have a weight + close to 1. The middle lagged regressors (b, c) meanwhile are expected to + have a weight close to 0, due to the regularization. All other model + components are turned off to avoid side effects. + """ + df, lagged_regressors = generate_lagged_regressor_dataset(periods=100) + df = df_utils.check_dataframe(df, check_y=False) + + m = NeuralProphet( + epochs=30, + batch_size=8, + learning_rate=0.1, + yearly_seasonality=False, + weekly_seasonality=False, + daily_seasonality=False, + growth="off", + normalize="off", + ) + m = m.add_lagged_regressor( + n_lags=3, + names=[lagged_regressor for lagged_regressor, _ in lagged_regressors], + regularization=0.1, + ) + m.fit(df, freq="D") + + lagged_regressors_config = dict(lagged_regressors) + + for name in m.config_covar.keys(): + weights = m.model.get_covar_weights(name).detach().numpy() + weight_average = np.average(weights) + + lagged_regressor_weight = lagged_regressors_config[name] + + if lagged_regressor_weight > 0.9: + assert weight_average > 0.6 + else: + assert weight_average < 0.1 + + print(name, weight_average, lagged_regressors_config[name]) diff --git a/tests/utils/dataset_generators.py b/tests/utils/dataset_generators.py --- a/tests/utils/dataset_generators.py +++ b/tests/utils/dataset_generators.py @@ -1,4 +1,5 @@ import pandas as pd +import numpy as np from neuralprophet.time_dataset import make_country_specific_holidays_df @@ -34,3 +35,35 @@ def generate_event_dataset( df.loc[event, "y"] = y_events_override.get(event, y_event) return df, events + + +def generate_lagged_regressor_dataset(periods=31): + """ + Generate dataset for tests on lagged regressor. + Columns are: ds, lagged_regressors (one entry each), y + Each lagged regressor is random noise (range 0 to 1). + y is a weighted sum of the the previous 3 lagged regressors. + """ + lagged_regressors = [("a", 1), ("b", 0.1), ("c", 0.1), ("d", 1)] + + dates = pd.date_range("2022-01-01", periods=periods, freq="D") + + df = pd.DataFrame({"ds": dates}, index=dates) + + for lagged_regressor, _ in lagged_regressors: + df[lagged_regressor] = np.random.random(periods) + + df["weighted_sum"] = sum( + df[lagged_regressor] * lagged_regressor_scale for lagged_regressor, lagged_regressor_scale in lagged_regressors + ) + df["y"] = 0 + + overlap = 3 + + for pos, (index, data) in enumerate(df.iterrows()): + if pos >= overlap: + df.loc[index, "y"] = sum([df.iloc[pos - lag - 1]["weighted_sum"] for lag in range(overlap)]) + + df = df.drop(columns=["weighted_sum"]) + + return df, lagged_regressors
regularize covariates
@ourownstory this is the issue we were talking about right? feel free to assign to me then
2022-08-15T16:57:27
ourownstory/neural_prophet
718
ourownstory__neural_prophet-718
[ "689" ]
150b2ac9dc0dc5f2a010ccd9f74fb177f697fa6b
diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -537,6 +537,9 @@ def add_country_holidays(self, country_name, lower_window=0, upper_window=0, reg and create the corresponding configs such as lower, upper windows and the regularization parameters + Holidays can only be added for a single country. Calling the function + multiple times will override already added country holidays. + Parameters ---------- country_name : string @@ -552,6 +555,10 @@ def add_country_holidays(self, country_name, lower_window=0, upper_window=0, reg """ if self.fitted: raise Exception("Country must be specified prior to model fitting.") + if self.country_holidays_config: + log.warning( + "Country holidays can only be added for a single country. Previous country holidays were overridden." + ) if regularization is not None: if regularization < 0:
diff --git a/tests/test_unit.py b/tests/test_unit.py --- a/tests/test_unit.py +++ b/tests/test_unit.py @@ -896,3 +896,16 @@ def install(package): metadata_version_ = metadata.version("neuralprophet") assert metadata_version_ == init_version assert metadata_version_ == file_version + + +def test_add_country_holiday_multiple_calls_warning(caplog): + error_message = ( + "Country holidays can only be added for a single country. Previous country holidays were overridden." + ) + + m = NeuralProphet() + m.add_country_holidays("US") + assert error_message not in caplog.text + + m.add_country_holidays("Germany") + assert error_message in caplog.text
add_country_holidays does replace prior holiday values in contrast to its name Calling `add_country_holidays` does override any previously defined holidays. Meaning repeated calls of `add_country_holidays` with different countries do only result in the holidays of the final call (see example below). Looking up the implementation/documentation of prophet their function behaves the same day, so either we should 1) add the same hint in documentation (see [reference](https://github.com/facebook/prophet/blob/10310ceb2da05837a198db6714d658a1e0a32478/python/prophet/forecaster.py#L714)), possibly throw an exception/warning in case people call it multiple times 2) differ from the behavior of prophet and actually allow for multiple countries 3) renaming the function to `set_country_holidays` to clarify its behavior. Would be curious to hear if only I got confused and which direction forward is best. Example from an integration test with multiple `add_country_holidays` calls: https://github.com/ourownstory/neural_prophet/blob/main/tests/test_integration.py#L429-L434
Adjust the documentation as discussed
2022-08-16T21:07:43
ourownstory/neural_prophet
812
ourownstory__neural_prophet-812
[ "806" ]
ea717ad31dec216ba670492f9b89fd075621dc3a
diff --git a/neuralprophet/df_utils.py b/neuralprophet/df_utils.py --- a/neuralprophet/df_utils.py +++ b/neuralprophet/df_utils.py @@ -243,7 +243,7 @@ def init_data_params( Parameters ---------- - df : pd.DataFrame, dict (deprecated) + df : pd.DataFrame data to compute normalization parameters from. normalize : str Type of normalization to apply to the time series. @@ -471,7 +471,7 @@ def check_dataframe(df, check_y=True, covariates=None, regressors=None, events=N Parameters ---------- - df : pd.DataFrame, dict (deprecated) + df : pd.DataFrame containing column ``ds`` check_y : bool if df must have series values @@ -663,7 +663,7 @@ def crossvalidation_split_df( Parameters ---------- - df : pd.DataFrame, dict (deprecated) + df : pd.DataFrame data n_lags : int identical to NeuralProphet @@ -676,7 +676,7 @@ def crossvalidation_split_df( fold_overlap_pct : float percentage of overlap between the validation folds (default: 0.0) global_model_cv_type : str - Type of crossvalidation to apply to the dict of time series. + Type of crossvalidation to apply to the time series. options: @@ -892,8 +892,8 @@ def split_df(df, n_lags, n_forecasts, valid_p=0.2, inputs_overbleed=True, local_ Parameters ---------- - df : pd.DataFrame, dict (deprecated) - dataframe or dict of dataframes containing column ``ds``, ``y`` with all data + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally``ID`` with all data n_lags : int identical to NeuralProphet n_forecasts : int @@ -1273,12 +1273,12 @@ def _infer_frequency(df, freq, min_freq_percentage=0.7): def infer_frequency(df, freq, n_lags, min_freq_percentage=0.7): - """Automatically infers frequency of dataframe or dict of dataframes. + """Automatically infers frequency of dataframe. Parameters ---------- - df : pd.DataFrame, dict (deprecated) - Dataframe with columns ``ds`` datestamps and ``y`` time series values + df : pd.DataFrame + Dataframe with columns ``ds`` datestamps and ``y`` time series values, and optionally``ID`` freq : str Data step sizes, i.e. frequency of data recording, diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -615,8 +615,8 @@ def fit(self, df, freq="auto", validation_df=None, progress="bar", minimal=False Parameters ---------- - df : pd.DataFrame, dict (deprecated) - containing column ``ds``, ``y`` with all data + df : pd.DataFrame + containing column ``ds``, ``y``, and optionally``ID`` with all data freq : str Data step sizes. Frequency of data recording, @@ -685,8 +685,8 @@ def predict(self, df, decompose=True, raw=False): Parameters ---------- - df : pd.DataFrame, dict (deprecated) - dataframe or dict of dataframes containing column ``ds``, ``y`` with data + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally``ID`` with data decompose : bool whether to add individual components of forecast to the dataframe raw : bool @@ -748,8 +748,8 @@ def test(self, df): Parameters ---------- - df : pd.DataFrame, dict (deprecated) - dataframe or dict of dataframes containing column ``ds``, ``y`` with with holdout data + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally``ID`` with with holdout data Returns ------- pd.DataFrame @@ -774,8 +774,8 @@ def split_df(self, df, freq="auto", valid_p=0.2, local_split=False): Parameters ---------- - df : pd.DataFrame, dict (deprecated) - dataframe or dict of dataframes containing column ``ds``, ``y`` with all data + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally``ID`` with all data freq : str data step sizes. Frequency of data recording, @@ -906,8 +906,8 @@ def crossvalidation_split_df( Parameters ---------- - df : pd.DataFrame, dict (deprecated) - dataframe or dict of dataframes containing column ``ds``, ``y`` with all data + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally``ID`` with all data freq : str data step sizes. Frequency of data recording, @@ -1070,8 +1070,8 @@ def double_crossvalidation_split_df(self, df, freq="auto", k=5, valid_pct=0.10, Parameters ---------- - df : pd.DataFrame, dict (deprecated) - dataframe or dict of dataframes containing column ``ds``, ``y`` with all data + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally``ID`` with all data freq : str data step sizes. Frequency of data recording, @@ -1110,8 +1110,8 @@ def create_df_with_events(self, df, events_df): Parameters ---------- - df : pd.DataFrame, dict (deprecated) - dataframe or dict of dataframes containing column ``ds``, ``y`` with all data + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally``ID`` with all data events_df : dict, pd.DataFrame containing column ``ds`` and ``event`` @@ -1156,7 +1156,7 @@ def make_future_dataframe(self, df, events_df=None, regressors_df=None, periods= Parameters ---------- - df: pd.DataFrame, dict (deprecated) + df: pd.DataFrame History to date. DataFrame containing all columns up to present events_df : pd.DataFrame Future event occurrences corresponding to `periods` steps into future. @@ -1257,8 +1257,8 @@ def predict_trend(self, df, quantile=0.5): Parameters ---------- - df : pd.DataFrame, dict (deprecated) - dataframe or dict of dataframes containing column ``ds``, ``y`` with all data + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally``ID`` with all data quantile : float the quantile in (0, 1) that needs to be predicted @@ -1292,8 +1292,8 @@ def predict_seasonal_components(self, df, quantile=0.5): Parameters ---------- - df : pd.DataFrame, dict (deprecated) - dataframe or dict of dataframes containing columns ``ds``, ``y`` with all data + df : pd.DataFrame + dataframe containing columns ``ds``, ``y``, and optionally``ID`` with all data quantile : float the quantile in (0, 1) that needs to be predicted @@ -1392,7 +1392,7 @@ def plot(self, fcst, df_name=None, ax=None, xlabel="ds", ylabel="y", figsize=(10 Parameters ---------- - fcst : pd.DataFrame, dict (deprecated) + fcst : pd.DataFrame output of self.predict. df_name : str ID from time series that should be plotted @@ -1552,7 +1552,7 @@ def plot_last_forecast( Parameters ---------- - fcst : pd.DataFrame, dict (deprecated) + fcst : pd.DataFrame output of self.predict. df_name : str ID from time series that should be plotted @@ -1640,7 +1640,7 @@ def plot_components( Parameters ---------- - fcst : pd.DataFrame, dict (deprecated) + fcst : pd.DataFrame output of self.predict df_name : str ID from time series that should be plotted @@ -1802,8 +1802,8 @@ def _create_dataset(self, df, predict_mode): Parameters ---------- - df : pd.DataFrame, dict (deprecated) - dataframe or dict of dataframes containing column ``ds``, ``y`` and + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally``ID`` and normalized columns normalized columns ``ds``, ``y``, ``t``, ``y_scaled`` predict_mode : bool specifies predict mode @@ -1965,8 +1965,8 @@ def _handle_missing_data(self, df, freq, predicting=False): Parameters ---------- - df : pd.DataFrame, dict (deprecated) - dataframe or dict of dataframes containing column ``ds``, ``y`` with all data + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally``ID`` with all data freq : str data step sizes. Frequency of data recording, @@ -1994,8 +1994,8 @@ def _check_dataframe(self, df, check_y=True, exogenous=True): Parameters ---------- - df : pd.DataFrame, dict (deprecated) - dataframe or dict of dataframes containing column ``ds``, ``y`` with all data + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally``ID`` with all data check_y : bool if df must have series values @@ -2078,8 +2078,8 @@ def _normalize(self, df): Parameters ---------- - df : pd.DataFrame, dict (deprecated) - dataframe or dict of dataframes containing column ``ds``, ``y`` with all data + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally``ID`` with all data Returns ------- @@ -2100,8 +2100,8 @@ def _init_train_loader(self, df): Parameters ---------- - df : pd.DataFrame, dict (deprecated) - dataframe or dict of dataframes containing column ``ds``, ``y`` with all data + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally``ID`` with all data Returns ------- @@ -2151,8 +2151,8 @@ def _init_val_loader(self, df): Parameters ---------- - df : pd.DataFrame, dict (deprecated) - dataframe or dict of dataframes containing column ``ds``, ``y`` with all data + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally``ID`` with all data Returns ------- @@ -2307,10 +2307,10 @@ def _train(self, df, df_val=None, progress="bar"): Parameters ---------- - df : pd.DataFrame, dict (deprecated) - dataframe or dict of dataframes containing column ``ds``, ``y`` with all data - df_val : pd.DataFrame, dict (deprecated) - dataframe or dict of dataframes containing column ``ds``, ``y`` with validation data + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally``ID`` with all data + df_val : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally``ID`` with validation data progress : str Method of progress display. @@ -2464,8 +2464,8 @@ def _train_minimal(self, df, progress_bar=False): Parameters ---------- - df: pd.DataFrame, dict (deprecated) - dataframe or dict of dataframes containing column ``ds``, ``y`` with all data + df: pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally``ID`` with all data Returns ------- @@ -2710,7 +2710,7 @@ def _predict_raw(self, df, df_name, include_components=False): Parameters ---------- df : pd.DataFrame - dataframe or dict of dataframes containing column ``ds``, ``y`` with all data + dataframe containing column ``ds``, ``y``, and optionally``ID`` with all data df_name : str name of the data params from which the current dataframe refers to (only in case of local_normalization) include_components : bool diff --git a/neuralprophet/time_dataset.py b/neuralprophet/time_dataset.py --- a/neuralprophet/time_dataset.py +++ b/neuralprophet/time_dataset.py @@ -20,8 +20,9 @@ def __init__(self, df, **kwargs): Parameters ---------- - df_dict : dict - Containing pd.DataFrame time series data + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally``ID`` and + normalized columns normalized columns ``ds``, ``y``, ``t``, ``y_scaled`` **kwargs : dict Identical to :meth:`tabularize_univariate_datetime` """
[documentation] Correct all docstrings to talk about ID, not df (completely deprecate dicts)
2022-10-13T18:04:41
ourownstory/neural_prophet
832
ourownstory__neural_prophet-832
[ "824" ]
35eea3f6f604c595d0cafe9c285049d919332dc4
diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -2567,9 +2567,9 @@ def _make_future_dataframe(self, df, events_df, regressors_df, periods, n_histor if len(df) < self.max_lags: raise ValueError( - "Insufficient input data for a prediction." - "Please supply historic observations (number of rows) of at least max_lags (max of number of n_lags)." - ) + "Insufficient input data for a prediction." + "Please supply historic observations (number of rows) of at least max_lags (max of number of n_lags)." + ) elif len(df) < self.max_lags + n_historic_predictions: log.warning( f"Insufficient data for {n_historic_predictions} historic forecasts, reduced to {len(df) - self.max_lags}." @@ -2687,7 +2687,7 @@ def _prepare_dataframe_to_predict(self, df): # Checks if len(df_i) == 0 or len(df_i) < self.max_lags: raise ValueError( - "Insufficient input data for a prediction." + "Insufficient input data for a prediction." "Please supply historic observations (number of rows) of at least max_lags (max of number of n_lags)." ) if len(df_i.columns) == 1 and "ds" in df_i:
diff --git a/tests/test_regularization.py b/tests/test_regularization.py --- a/tests/test_regularization.py +++ b/tests/test_regularization.py @@ -158,6 +158,6 @@ def test_regularization_lagged_regressor(): if lagged_regressor_weight > 0.9: assert weight_average > 0.5 else: - assert weight_average < 0.35 # Note: this should be < 0.1, but due to fitting issues, relaxed temporarily. + assert weight_average < 0.35 # Note: this should be < 0.1, but due to fitting issues, relaxed temporarily. print(name, weight_average, lagged_regressors_config[name])
Introduce automated black checks via Github Actions As I encountered formatting issues across several files during cleanup, I'd propose we add a Github Action to ensure all pull requests comply with black.
2022-10-17T14:46:05
ourownstory/neural_prophet
852
ourownstory__neural_prophet-852
[ "843" ]
8d3a1c1f83cdc3c70f407ecaa6a33f28c56781a1
diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -82,7 +82,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] +html_static_path = ["_static", "images/np_highres.svg"] # html_sidebars = { '**': [ # "_templates/sidebar/brand.html", diff --git a/neuralprophet/df_utils.py b/neuralprophet/df_utils.py --- a/neuralprophet/df_utils.py +++ b/neuralprophet/df_utils.py @@ -1415,6 +1415,7 @@ def handle_negative_values(df, col, handle_negatives): def drop_missing_from_df(df, drop_missing, predict_steps, n_lags): """Drops windows of missing values in df according to the (lagged) samples that are dropped from TimeDataset. + Parameters ---------- df : pd.DataFrame @@ -1425,6 +1426,7 @@ def drop_missing_from_df(df, drop_missing, predict_steps, n_lags): identical to NeuralProphet n_lags : int identical to NeuralProphet + Returns ------- pd.DataFrame @@ -1456,6 +1458,7 @@ def drop_missing_from_df(df, drop_missing, predict_steps, n_lags): def join_dfs_after_data_drop(predicted, df, merge=False): """Creates the intersection between df and predicted, removing any dates that have been imputed and dropped in NeuralProphet.predict(). + Parameters ---------- df : pd.DataFrame @@ -1467,6 +1470,7 @@ def join_dfs_after_data_drop(predicted, df, merge=False): Options * (default) ``False``: Returns separate dataframes * ``True``: Merges predicted and df into one dataframe + Returns ------- pd.DataFrame
Automatic deployment of docs with Github Action > and if we can have different doc pages for different tags, we could automate the deployment via a github action, which would be ideal.
2022-10-18T19:12:00
ourownstory/neural_prophet
865
ourownstory__neural_prophet-865
[ "807" ]
46ae07a360ae85bed11f8a580c3cc861e71ffafd
diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -1705,7 +1705,7 @@ def plot_last_forecast( ) def plot_components( - self, fcst, df_name=None, figsize=None, forecast_in_focus=None, residuals=False, plotting_backend="default" + self, fcst, df_name="__df__", figsize=None, forecast_in_focus=None, residuals=False, plotting_backend="default" ): """Plot the NeuralProphet forecast components. @@ -1779,6 +1779,7 @@ def plot_components( figsize=tuple(x * 70 for x in figsize) if figsize else (700, 210), forecast_in_focus=forecast_in_focus if forecast_in_focus else self.highlight_forecast_step_n, residuals=residuals, + df_name=df_name, ) else: return plot_components( @@ -1788,6 +1789,7 @@ def plot_components( figsize=figsize, forecast_in_focus=forecast_in_focus if forecast_in_focus else self.highlight_forecast_step_n, residuals=residuals, + df_name=df_name, ) def plot_parameters( diff --git a/neuralprophet/plot_forecast.py b/neuralprophet/plot_forecast.py --- a/neuralprophet/plot_forecast.py +++ b/neuralprophet/plot_forecast.py @@ -151,7 +151,14 @@ def plot( def plot_components( - m, fcst, quantile=0.5, forecast_in_focus=None, one_period_per_season=True, residuals=False, figsize=None + m, + fcst, + df_name="__df__", + quantile=0.5, + forecast_in_focus=None, + one_period_per_season=True, + residuals=False, + figsize=None, ): """Plot the NeuralProphet forecast components. @@ -161,6 +168,8 @@ def plot_components( Fitted model fcst : pd.DataFrame Output of m.predict + df_name : str + ID from time series that should be plotted quantile : float Quantile for which the forecast components are to be plotted forecast_in_focus : int @@ -346,13 +355,13 @@ def plot_components( if one_period_per_season: comp_name = comp["comp_name"] if comp_name.lower() == "weekly" or m.config_season.periods[comp_name].period == 7: - plot_weekly(m=m, ax=ax, quantile=quantile, comp_name=comp_name) + plot_weekly(m=m, ax=ax, quantile=quantile, comp_name=comp_name, df_name=df_name) elif comp_name.lower() == "yearly" or m.config_season.periods[comp_name].period == 365.25: - plot_yearly(m=m, ax=ax, quantile=quantile, comp_name=comp_name) + plot_yearly(m=m, ax=ax, quantile=quantile, comp_name=comp_name, df_name=df_name) elif comp_name.lower() == "daily" or m.config_season.periods[comp_name].period == 1: - plot_daily(m=m, ax=ax, quantile=quantile, comp_name=comp_name) + plot_daily(m=m, ax=ax, quantile=quantile, comp_name=comp_name, df_name=df_name) else: - plot_custom_season(m=m, ax=ax, quantile=quantile, comp_name=comp_name) + plot_custom_season(m=m, ax=ax, quantile=quantile, comp_name=comp_name, df_name=df_name) else: comp_name = f"season_{comp['comp_name']}" plot_forecast_component(fcst=fcst, ax=ax, comp_name=comp_name, plot_name=comp["plot_name"]) diff --git a/neuralprophet/plot_forecast_plotly.py b/neuralprophet/plot_forecast_plotly.py --- a/neuralprophet/plot_forecast_plotly.py +++ b/neuralprophet/plot_forecast_plotly.py @@ -202,7 +202,9 @@ def plot(fcst, quantiles, xlabel="ds", ylabel="y", highlight_forecast=None, line return fig -def plot_components(m, fcst, forecast_in_focus=None, one_period_per_season=True, residuals=False, figsize=(700, 210)): +def plot_components( + m, fcst, df_name="__df__", forecast_in_focus=None, one_period_per_season=True, residuals=False, figsize=(700, 210) +): """ Plot the NeuralProphet forecast components. @@ -212,6 +214,8 @@ def plot_components(m, fcst, forecast_in_focus=None, one_period_per_season=True, Fitted model fcst : pd.DataFrame Output of m.predict + df_name : str + ID from time series that should be plotted forecast_in_focus : int n-th step ahead forecast AR-coefficients to plot one_period_per_season : bool @@ -398,7 +402,7 @@ def plot_components(m, fcst, forecast_in_focus=None, one_period_per_season=True, comp.update({"multiplicative": True}) if one_period_per_season: comp_name = comp["comp_name"] - trace_object = get_seasonality_props(m, fcst, **comp) + trace_object = get_seasonality_props(m, fcst, df_name, **comp) else: comp_name = f"season_{comp['comp_name']}" trace_object = get_forecast_component_props(fcst=fcst, comp_name=comp_name, plot_name=comp["plot_name"]) @@ -722,7 +726,7 @@ def get_multiforecast_component_props( return {"traces": traces, "xaxis": xaxis, "yaxis": yaxis} -def get_seasonality_props(m, fcst, comp_name="weekly", multiplicative=False, quick=False, **kwargs): +def get_seasonality_props(m, fcst, df_name="__df__", comp_name="weekly", multiplicative=False, quick=False, **kwargs): """ Prepares a dictionary for plotting the selected seasonality with plotly @@ -732,6 +736,8 @@ def get_seasonality_props(m, fcst, comp_name="weekly", multiplicative=False, qui Fitted NeuralProphet model fcst : pd.DataFrame Output of m.predict + df_name : str + ID from time series that should be plotted comp_name : str Name of the component to plot multiplicative : bool @@ -757,6 +763,7 @@ def get_seasonality_props(m, fcst, comp_name="weekly", multiplicative=False, qui plot_points = np.floor(period * 24 * 60).astype(int) days = pd.to_datetime(np.linspace(start.value, end.value, plot_points, endpoint=False)) df_y = pd.DataFrame({"ds": days}) + df_y["ID"] = df_name if quick: predicted = m.predict_season_from_dates(m, dates=df_y["ds"], name=comp_name)
[enhancement] allow residuals plotting with local normalization currently disabled in `plot_components`, but should not be hard to allow
2022-10-19T19:07:19
ourownstory/neural_prophet
899
ourownstory__neural_prophet-899
[ "879" ]
580dd69468fb3a652389f124c9bdcdb9013f6ce0
diff --git a/neuralprophet/df_utils.py b/neuralprophet/df_utils.py --- a/neuralprophet/df_utils.py +++ b/neuralprophet/df_utils.py @@ -1027,6 +1027,7 @@ def convert_events_to_features(df, config_events, events_df): dates = None else: dates = events_df[events_df.event == event].ds + df.reset_index(drop=True, inplace=True) event_feature[df.ds.isin(dates)] = 1.0 df[event] = event_feature return df
[bug] index mismatch with events in global setup By @rbeldagarcia in #827 Hello! Thanks for the update, some features are very useful like to allow create a global model with a unique data frame. I write this comment to report some observation of the new version. I have tried to migrate my models written in 0.3.2 to 0.4.1 and I have found some issues. In particular, these models are Global model with events, and the problems appear in: Could you confirm if the requirements.txt is updated? I would like to validate that my environment is correct before reporting this as a bug. In particular, I have tried with: pandas=1.3.5 neuralprophet=0.4.1 numpy=1.21.6 I have tried to create a new environment with requirements.txt but it reports a error trying to install torch>=1.8.0 df_utils.py (line 1006) ``` for event in config_events.keys(): event_feature = pd.Series([0.0] * df.shape[0]) event_feature.index = df.index #added to fix the errors, without this it reports NaN when the events are added to the dataframe in global model. #The problem is that if df.index != event_feature.index then it return NaN when try to do: df[event] = event_feature # events_df may be None in case ID from original df is not provided in events df if events_df is None: dates = None else: dates = events_df[events_df.event == event].ds event_feature.iloc[df.ds.isin(dates)] = 1.0 #Added 'iloc' to support events that are only present in some 'ID' groups df[event] = event_feature return df ``` I attach you the code and a screen shoot [Neuralprophet041.zip](https://github.com/ourownstory/neural_prophet/files/9818669/Neuralprophet041.zip) _Originally posted by @rbeldagarcia in https://github.com/ourownstory/neural_prophet/discussions/827#discussioncomment-3913276_
@rbeldagarcia Thank you for raising this. Would you be able to simplify your example code further to the absolute minimum needed to reproduce the error? That would help us to troubleshoot. Thank you! Oskar Hello! Of course, you can reproduce with the following code: ``` import pandas as pd from neuralprophet import NeuralProphet data_location = "https://raw.githubusercontent.com/ourownstory/neuralprophet-data/main/datasets/" df_ercot = pd.read_csv(data_location + "multivariate/load_ercot_regions.csv")[0:600] history_events_df = pd.DataFrame() for zona in list(df_ercot.columns.drop('ds')): playoffs_history = pd.DataFrame({ 'ID': zona, 'event': f'Evento_{zona}', 'ds': pd.to_datetime(['2007-05-01'])}) history_events_df = pd.concat((history_events_df, playoffs_history)) df_ercot.ds = pd.date_range(end='1/2/2008', periods=600) df_ercot = df_ercot.melt(id_vars=['ds'], value_vars=list(df_ercot.columns.drop('ds')), var_name='ID', value_name='y') m = NeuralProphet() m = m.add_events(list(history_events_df.event.unique())) history_df = m.create_df_with_events(df_ercot, history_events_df) ``` Thank you,
2022-10-25T00:13:13
ourownstory/neural_prophet
909
ourownstory__neural_prophet-909
[ "813" ]
59230b673b8326ea77c246d8b186b82a9c0eb0b6
diff --git a/neuralprophet/df_utils.py b/neuralprophet/df_utils.py --- a/neuralprophet/df_utils.py +++ b/neuralprophet/df_utils.py @@ -333,7 +333,6 @@ def init_data_params( def auto_normalization_setting(array): if len(np.unique(array)) < 2: - log.error("Encountered variable with singular value in training set. Please remove variable.") raise ValueError("Encountered variable with singular value in training set. Please remove variable.") # elif set(series.unique()) in ({True, False}, {1, 0}, {1.0, 0.0}, {-1, 1}, {-1.0, 1.0}): elif len(np.unique(array)) == 2: @@ -444,6 +443,14 @@ def check_single_dataframe(df, check_y, covariates, regressors, events): raise ValueError("Column ds has timezone specified, which is not supported. Remove timezone.") if len(df.ds.unique()) != len(df.ds): raise ValueError("Column ds has duplicate values. Please remove duplicates.") + regressors_to_remove = [] + if regressors is not None: + for reg in regressors: + if len(df[reg].unique()) < 2: + log.warning( + "Encountered future regressor with only unique values in training set. Automatically removed variable." + ) + regressors_to_remove.append(reg) columns = [] if check_y: @@ -479,7 +486,7 @@ def check_single_dataframe(df, check_y, covariates, regressors, events): df.index.name = None df = df.sort_values("ds") df = df.reset_index(drop=True) - return df + return df, regressors_to_remove def check_dataframe(df, check_y=True, covariates=None, regressors=None, events=None): @@ -507,11 +514,18 @@ def check_dataframe(df, check_y=True, covariates=None, regressors=None, events=N """ df, _, _, _, _ = prep_or_copy_df(df) checked_df = pd.DataFrame() + regressors_to_remove = [] for df_name, df_i in df.groupby("ID"): - df_aux = check_single_dataframe(df_i, check_y, covariates, regressors, events).copy(deep=True) + df_aux, reg = check_single_dataframe(df_i, check_y, covariates, regressors, events) + df_aux = df_aux.copy(deep=True) + if len(reg) > 0: + regressors_to_remove.append(*reg) df_aux["ID"] = df_name checked_df = pd.concat((checked_df, df_aux), ignore_index=True) - return checked_df + if len(regressors_to_remove) > 0: + regressors_to_remove = list(set(regressors_to_remove)) + checked_df = checked_df.drop(*regressors_to_remove, axis=1) + return checked_df, regressors_to_remove def _crossvalidation_split_df(df, n_lags, n_forecasts, k, fold_pct, fold_overlap_pct=0.0): diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -2152,13 +2152,17 @@ def _check_dataframe(self, df, check_y=True, exogenous=True): checked dataframe """ df, _, _, _, _ = df_utils.prep_or_copy_df(df) - return df_utils.check_dataframe( + df, regressors_to_remove = df_utils.check_dataframe( df=df, check_y=check_y, covariates=self.config_lagged_regressors if exogenous else None, regressors=self.config_regressors if exogenous else None, events=self.config_events if exogenous else None, ) + for reg in regressors_to_remove: + log.warning(f"Removing regressor {reg} because it is not present in the data.") + self.config_regressors.pop(reg) + return df def _validate_column_name(self, name, events=True, seasons=True, regressors=True, covariates=True): """Validates the name of a seasonality, event, or regressor.
diff --git a/tests/test_integration.py b/tests/test_integration.py --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -47,7 +47,7 @@ def test_train_eval_test(): learning_rate=LR, ) df = pd.read_csv(PEYTON_FILE, nrows=95) - df = df_utils.check_dataframe(df, check_y=False) + df, _ = df_utils.check_dataframe(df, check_y=False) df = m._handle_missing_data(df, freq="D", predicting=False) df_train, df_test = m.split_df(df, freq="D", valid_p=0.1) metrics = m.fit(df_train, freq="D", validation_df=df_test) @@ -59,7 +59,7 @@ def test_train_eval_test(): def test_df_utils_func(): log.info("testing: df_utils Test") df = pd.read_csv(PEYTON_FILE, nrows=95) - df = df_utils.check_dataframe(df, check_y=False) + df, _ = df_utils.check_dataframe(df, check_y=False) # test find_time_threshold df, _, _, _, _ = df_utils.prep_or_copy_df(df) @@ -1335,6 +1335,28 @@ def test_global_modeling_with_events_and_future_regressors(): fig3 = m.plot_parameters() +def test_auto_normalization(): + length = 100 + days = pd.date_range(start="2017-01-01", periods=length) + y = np.ones(length) + y[1] = 0 + y[2] = 2 + y[3] = 3.3 + df = pd.DataFrame({"ds": days, "y": y}) + df["future_constant"] = 1.0 + df["future_dynamic"] = df["y"] * 2 + m = NeuralProphet( + epochs=EPOCHS, + batch_size=BATCH_SIZE, + learning_rate=LR, + n_forecasts=5, + normalize="auto", + ) + m = m.add_future_regressor("future_constant") + m = m.add_future_regressor("future_dynamic") + _ = m.fit(df, freq="D") + + def test_minimal(): log.info("testing: Plotting") df = pd.read_csv(PEYTON_FILE, nrows=NROWS) diff --git a/tests/test_regularization.py b/tests/test_regularization.py --- a/tests/test_regularization.py +++ b/tests/test_regularization.py @@ -50,7 +50,7 @@ def test_reg_func_abs(): def test_regularization_holidays(): df = generate_holiday_dataset(y_holidays_override=Y_HOLIDAYS_OVERRIDE) - df = df_utils.check_dataframe(df, check_y=False) + df, _ = df_utils.check_dataframe(df, check_y=False) m = NeuralProphet( epochs=20, @@ -81,7 +81,7 @@ def test_regularization_holidays(): def test_regularization_events(): df, events = generate_event_dataset(y_events_override=Y_EVENTS_OVERRIDE) - df = df_utils.check_dataframe(df, check_y=False) + df, _ = df_utils.check_dataframe(df, check_y=False) m = NeuralProphet( epochs=50, @@ -132,7 +132,7 @@ def test_regularization_lagged_regressor(): components are turned off to avoid side effects. """ df, lagged_regressors = generate_lagged_regressor_dataset(periods=100) - df = df_utils.check_dataframe(df, check_y=False) + df, _ = df_utils.check_dataframe(df, check_y=False) m = NeuralProphet( epochs=30, diff --git a/tests/test_unit.py b/tests/test_unit.py --- a/tests/test_unit.py +++ b/tests/test_unit.py @@ -76,7 +76,7 @@ def test_time_dataset(): config_missing = configure.MissingDataHandling() df_train, df_val = df_utils.split_df(df_in, n_lags, n_forecasts, valid_p) # create a tabularized dataset from time series - df = df_utils.check_dataframe(df_train) + df, _ = df_utils.check_dataframe(df_train) local_data_params, global_data_params = df_utils.init_data_params(df=df, normalize="minmax") df = df.drop("ID", axis=1) df = df_utils.normalize(df, global_data_params) @@ -224,7 +224,7 @@ def check_split(df_in, df_len_expected, n_lags, n_forecasts, freq, p=0.1): n_lags=n_lags, n_forecasts=n_forecasts, ) - df_in = df_utils.check_dataframe(df_in, check_y=False) + df_in, _ = df_utils.check_dataframe(df_in, check_y=False) df_in = m._handle_missing_data(df_in, freq=freq, predicting=False) assert df_len_expected == len(df_in) total_samples = len(df_in) - n_lags - 2 * n_forecasts + 2 @@ -848,7 +848,7 @@ def test_too_many_NaN(): limit_linear=config_missing.impute_linear, rolling=config_missing.impute_rolling, ) - df = df_utils.check_dataframe(df) + df, _ = df_utils.check_dataframe(df) local_data_params, global_data_params = df_utils.init_data_params(df=df, normalize="minmax") df = df.drop("ID", axis=1) df = df_utils.normalize(df, global_data_params)
[enhancement] remove regressor if training set has one unique value Instead of throwing an error, simply remove the problematic regressor from the model with an error log message. https://github.com/ourownstory/neural_prophet/blob/ea717ad31dec216ba670492f9b89fd075621dc3a/neuralprophet/df_utils.py#L318 This has caused issues for people in the past. e.g. #809 This must also address: - reduce error to warning when unused column in df ( training or predicting have unused cols not in model definition)
Note: we discussed that we would simply remove a regressor with non-unique values from the model and throw a warning instead of manipulating the models weights.
2022-10-27T00:37:03
ourownstory/neural_prophet
919
ourownstory__neural_prophet-919
[ "906" ]
cce32c0dc4fba7c0e79944342454d2def955e51c
diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -1716,6 +1716,25 @@ def plot_latest_forecast( line_per_origin=True, ) + def plot_last_forecast( + self, + fcst, + df_name=None, + ax=None, + xlabel="ds", + ylabel="y", + figsize=(10, 6), + include_previous_forecasts=0, + plot_history_data=None, + plotting_backend="default", + ): + args = locals() + log.warning( + "plot_last_forecast() has been renamed to plot_latest_forecast() and is therefore deprecated. " + "Please use plot_latst_forecast() in the future" + ) + return NeuralProphet.plot_latest_forecast(**args) + def plot_components( self, fcst, df_name="__df__", figsize=None, forecast_in_focus=None, residuals=False, plotting_backend="default" ):
diff --git a/tests/test_integration.py b/tests/test_integration.py --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -485,6 +485,7 @@ def test_plot(): forecast = m.predict(future) m.plot(forecast) m.plot_latest_forecast(forecast, include_previous_forecasts=10) + m.plot_last_forecast(fcst=forecast, include_previous_forecasts=10) # test deprecated alias m.plot_components(forecast) m.plot_parameters() log.info("testing: Plotting with quants")
[UI] add plot_last alias for renamed function (backwards compatiblitity)
2022-10-28T07:40:03