desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Retrieve resource object by an id of a reference. Note: This method throws StackStormDBObjectNotFoundError exception if the object is not found in the database.'
def _get_by_ref_or_id(self, ref_or_id, exclude_fields=None):
if ResourceReference.is_resource_reference(ref_or_id): is_reference = True else: is_reference = False if is_reference: resource_db = self._get_by_ref(resource_ref=ref_or_id, exclude_fields=exclude_fields) else: resource_db = self._get_by_id(resource_id=ref_or_id, exclude_fields=exclude_fields) if (not resource_db): msg = ('Resource with a reference or id "%s" not found' % ref_or_id) raise StackStormDBObjectNotFoundError(msg) return resource_db
'Select a user which is to be used by the HTTP request following this call.'
def use_user(self, user_db):
if (not user_db): raise ValueError('"user_db" is mandatory') mock_context = {'user': user_db} self.request_context_mock = mock.PropertyMock(return_value=mock_context) Router.mock_context = self.request_context_mock
'Cancels an execution twice, to ensure that a full execution object is returned instead of an error message'
def test_post_delete_duplicate(self):
post_resp = self._do_post(LIVE_ACTION_1) self.assertEqual(post_resp.status_int, 201) for i in range(2): delete_resp = self._do_delete(self._get_actionexecution_id(post_resp)) self.assertEqual(delete_resp.status_int, 200) self.assertEqual(delete_resp.json['status'], 'canceled') expected_result = {'message': 'Action canceled by user.', 'user': 'stanley'} self.assertDictEqual(delete_resp.json['result'], expected_result)
'Test validation does NOT take place with the default configuration.'
def test_post_invalid_custom_trigger_param_trigger_param_validation_disabled_default_cfg(self):
post_resp = self.__do_post(TestRuleController.RULE_11) self.assertEqual(post_resp.status_int, http_client.CREATED)
'This method should be invoked from run_and_print. The separation of run is to let the core logic be testable.'
@abc.abstractmethod def run(self, args, **kwargs):
raise NotImplementedError
'This method is invoked when the corresponding command is executed from the command line.'
@abc.abstractmethod def run_and_print(self, args, **kwargs):
raise NotImplementedError
'This function walks up the component causal chain. It only returns properties in the causal chain and nothing else.'
@staticmethod def _filter_trace_components(trace, args):
if (not (args.execution or args.rule or args.trigger_instance)): return trace component_id = None component_type = None if args.execution: component_id = args.execution component_type = 'action_execution' elif args.rule: component_id = args.rule component_type = 'rule' elif args.trigger_instance: component_id = args.trigger_instance component_type = 'trigger_instance' action_executions = [] rules = [] trigger_instances = [] search_target_found = (component_id and component_type) while search_target_found: components_list = [] if (component_type == 'action_execution'): components_list = trace.action_executions to_update_list = action_executions elif (component_type == 'rule'): components_list = trace.rules to_update_list = rules elif (component_type == 'trigger_instance'): components_list = trace.trigger_instances to_update_list = trigger_instances search_target_found = False component_caused_by_id = None for component in components_list: test_id = component['object_id'] if (test_id == component_id): caused_by = component.get('caused_by', {}) component_id = caused_by.get('id', None) component_type = caused_by.get('type', None) if (component_caused_by_id and (component_caused_by_id != component_id)): continue component_caused_by_id = None to_update_list.append(component) if (component_id and (':' in component_id)): component_id_split = component_id.split(':') component_id = component_id_split[0] component_caused_by_id = component_id_split[1] search_target_found = True break trace.action_executions = action_executions trace.rules = rules trace.trigger_instances = trigger_instances return trace
'This function looks at the disaply filters to determine which components should be displayed.'
@staticmethod def _apply_display_filters(trace, args):
all_component_types = (not (args.show_executions or args.show_rules or args.show_trigger_instances)) if ((all_component_types or args.show_trigger_instances) and args.hide_noop_triggers): filtered_trigger_instances = [] for trigger_instance in trace.trigger_instances: is_noop_trigger_instance = True for rule in trace.rules: caused_by_id = rule.get('caused_by', {}).get('id', None) if (caused_by_id == trigger_instance['object_id']): is_noop_trigger_instance = False if (not is_noop_trigger_instance): filtered_trigger_instances.append(trigger_instance) trace.trigger_instances = filtered_trigger_instances if all_component_types: return trace if (not args.show_executions): trace.action_executions = [] if (not args.show_rules): trace.rules = [] if (not args.show_trigger_instances): trace.trigger_instances = [] return trace
'Print the execution detail to stdout. This method takes into account if an executed action was workflow or not and formats the output accordingly.'
def _print_execution_details(self, execution, args, **kwargs):
runner_type = execution.action.get('runner_type', 'unknown') is_workflow_action = (runner_type in WORKFLOW_RUNNER_TYPES) show_tasks = getattr(args, 'show_tasks', False) raw = getattr(args, 'raw', False) detail = getattr(args, 'detail', False) key = getattr(args, 'key', None) attr = getattr(args, 'attr', []) if (show_tasks and (not is_workflow_action)): raise ValueError('--show-tasks option can only be used with workflow actions') if ((not raw) and (not detail) and (show_tasks or is_workflow_action)): self._run_and_print_child_task_list(execution=execution, args=args, **kwargs) else: instance = execution if detail: formatter = table.PropertyValueTable else: formatter = execution_formatter.ExecutionResult if detail: options = {'attributes': copy.copy(self.display_attributes)} elif key: options = {'attributes': [('result.%s' % key)], 'key': key} else: options = {'attributes': attr} options['json'] = args.json options['attribute_transform_functions'] = self.attribute_transform_functions self.print_output(instance, formatter, **options)
'Retrieve a top level workflow error. :return: (error, traceback)'
def _get_top_level_error(self, live_action):
if isinstance(live_action.result, dict): error = live_action.result.get('error', None) traceback = live_action.result.get('traceback', None) else: error = 'See result' traceback = 'See result' return (error, traceback)
'Retrieve error message from the provided task. :return: (error, traceback)'
def _get_task_error(self, task):
if (not task): return (None, None) result = task['result'] if isinstance(result, dict): stderr = result.get('stderr', None) error = result.get('error', None) traceback = result.get('traceback', None) error = (error if error else stderr) else: stderr = None error = None traceback = None return (error, traceback)
'Build a dictionary with parameters which will be passed to the action by parsing parameters passed to the CLI. :param args: CLI argument. :type args: ``object`` :rtype: ``dict``'
def _get_action_parameters_from_args(self, action, runner, args):
action_ref_or_id = action.ref def read_file(file_path): if (not os.path.exists(file_path)): raise ValueError(('File "%s" doesn\'t exist' % file_path)) if (not os.path.isfile(file_path)): raise ValueError(('"%s" is not a file' % file_path)) with open(file_path, 'rb') as fp: content = fp.read() return content def transform_object(value): if value.startswith('{'): result = value = json.loads(value) else: pairs = value.split(',') result = {} for pair in pairs: split = pair.split('=', 1) if (len(split) != 2): continue (key, value) = split result[key] = value return result def transform_array(value): try: value = str([int(value)]) except ValueError: pass try: result = json.loads(value) except ValueError: result = [v.strip() for v in value.split(',')] return result transformer = {'array': transform_array, 'boolean': (lambda x: ast.literal_eval(x.capitalize())), 'integer': int, 'number': float, 'object': transform_object, 'string': str} def normalize(name, value): ' The desired type is contained in the action meta-data, so we can look that up\n and call the desired "caster" function listed in the "transformer" dict\n ' if (name in runner.runner_parameters): param = runner.runner_parameters[name] if (('type' in param) and (param['type'] in transformer)): return transformer[param['type']](value) if (name in action.parameters): param = action.parameters[name] if (('type' in param) and (param['type'] in transformer)): return transformer[param['type']](value) return value result = {} if (not args.parameters): return result for idx in range(len(args.parameters)): arg = args.parameters[idx] if ('=' in arg): (k, v) = arg.split('=', 1) if k.startswith('@'): k = k[1:] is_file = True else: is_file = False try: if is_file: file_path = os.path.normpath(pjoin(os.getcwd(), v)) file_name = os.path.basename(file_path) content = read_file(file_path=file_path) if (action_ref_or_id == 'core.http'): result['_file_name'] = file_name result['file_content'] = content else: result[k] = content else: result[k] = normalize(k, v) except Exception as e: if ('malformed string' in str(e)): message = 'Invalid value for boolean parameter. Valid values are: true, false' raise ValueError(message) else: raise e else: result['cmd'] = ' '.join(args.parameters[idx:]) break if ('file_content' in result): if ('method' not in result): result['method'] = 'POST' if ('file_name' not in result): result['file_name'] = result['_file_name'] del result['_file_name'] if args.inherit_env: result['env'] = self._get_inherited_env_vars() return result
'The goal of this method is to add an indent at every level. This way the WF is represented as a tree structure while in a list. For the right visuals representation the list must be a DF traversal else the idents will end up looking strange.'
def _format_child_instances(self, children, parent_id):
children = format_wf_instances(children) depth = {parent_id: 0} result = [] for child in children: if (child.parent not in depth): parent = None for instance in children: if (WF_PREFIX in instance.id): instance_id = instance.id[(instance.id.index(WF_PREFIX) + len(WF_PREFIX)):] else: instance_id = instance.id if (instance_id == child.parent): parent = instance if (parent and parent.parent and (parent.parent in depth)): depth[child.parent] = (depth[parent.parent] + 1) else: depth[child.parent] = 0 child.id = ((INDENT_CHAR * depth[child.parent]) + child.id) result.append(self._format_for_common_representation(child)) return result
'Formats a task for common representation between mistral and action-chain.'
def _format_for_common_representation(self, task):
context = getattr(task, 'context', None) if (context and ('chain' in context)): task_name_key = 'context.chain.name' elif (context and ('mistral' in context)): task_name_key = 'context.mistral.task_name' return models.action.LiveAction(**{'id': task.id, 'status': task.status, 'task': jsutil.get_value(vars(task), task_name_key), 'action': task.action.get('ref', None), 'start_timestamp': task.start_timestamp, 'end_timestamp': getattr(task, 'end_timestamp', None)})
'Sort a provided list of action parameters. :type parameters: ``list`` :type names: ``list`` or ``set``'
def _sort_parameters(self, parameters, names):
sorted_parameters = sorted(names, key=(lambda name: self._get_parameter_sort_value(parameters=parameters, name=name))) return sorted_parameters
'Return a value which determines sort order for a particular parameter. By default, parameters are sorted using "position" parameter attribute. If this attribute is not available, parameter is sorted based on the name.'
def _get_parameter_sort_value(self, parameters, name):
parameter = parameters.get(name, None) if (not parameter): return None sort_value = parameter.get('position', name) return sort_value
'Retrieve a list of exclude attributes for particular command line arguments.'
@classmethod def _get_exclude_attributes(cls, args):
exclude_attributes = [] result_included = False trigger_instance_included = False for attr in args.attr: if attr.startswith('result'): result_included = True if attr.startswith('trigger_instance'): trigger_instance_included = True if (not result_included): exclude_attributes.append('result') if (not trigger_instance_included): exclude_attributes.append('trigger_instance') return exclude_attributes
'Retrieve resource by a primary key.'
def get_resource_by_pk(self, pk, **kwargs):
try: instance = self.manager.get_by_id(pk, **kwargs) except Exception as e: traceback.print_exc() response = getattr(e, 'response', None) status_code = getattr(response, 'status_code', None) if (status_code and (status_code == httplib.UNAUTHORIZED)): raise e instance = None return instance
'Retrieve resource by name.'
def get_resource_by_name(self, name, **kwargs):
instance = self.manager.get_by_name(name, **kwargs) return instance
'Return a dictionary representation of this object. :param exclude_attributes: Optional list of attributes to exclude. :type exclude_attributes: ``list`` :rtype: ``dict``'
def to_dict(self, exclude_attributes=None):
exclude_attributes = (exclude_attributes or []) attributes = self.__dict__.keys() attributes = [attr for attr in attributes if ((not attr.startswith('__')) and (attr not in exclude_attributes))] result = {} for attribute in attributes: value = getattr(self, attribute, None) result[attribute] = value return result
'Gets a property of a Resource. id_ : Id of the resource property_name: Name of the property self_deserialize: #Implies use the deserialize method implemented by this resource.'
@add_auth_token_to_kwargs_from_env def get_property(self, id_, property_name, self_deserialize=True, **kwargs):
token = kwargs.pop('token', None) api_key = kwargs.pop('api_key', None) if kwargs: url = ('/%s/%s/%s/?%s' % (self.resource.get_url_path_name(), id_, property_name, urllib.parse.urlencode(kwargs))) else: url = ('/%s/%s/%s/' % (self.resource.get_url_path_name(), id_, property_name)) if token: response = self.client.get(url, token=token) elif api_key: response = self.client.get(url, api_key=api_key) else: response = self.client.get(url) if (response.status_code == 404): return None if (response.status_code != 200): self.handle_error(response) if self_deserialize: return [self.resource.deserialize(item) for item in response.json()] else: return response.json()
'Parse the config and return kwargs which can be passed to the Client constructor. :rtype: ``dict``'
def _get_config_file_options(self, args):
rc_options = self._parse_config_file(args=args) result = {} for (kwarg_name, (section, option)) in six.iteritems(CONFIG_OPTION_TO_CLIENT_KWARGS_MAP): result[kwarg_name] = rc_options.get(section, {}).get(option, None) return result
'Retrieve path to the CLI configuration file. :rtype: ``str``'
def _get_config_file_path(self, args):
path = os.environ.get('ST2_CONFIG_FILE', ST2_CONFIG_PATH) if args.config_file: path = args.config_file path = os.path.abspath(os.path.expanduser(path)) if ((path != ST2_CONFIG_PATH) and (not os.path.isfile(path))): raise ValueError(('Config "%s" not found' % path)) return path
'Retrieve a valid auth token. If caching is enabled, we will first try to retrieve cached token from a file system. If cached token is expired or not available, we will try to authenticate using the provided credentials and retrieve a new auth token. :rtype: ``str``'
def _get_auth_token(self, client, username, password, cache_token):
if cache_token: token = self._get_cached_auth_token(client=client, username=username, password=password) else: token = None if (not token): token_obj = self._authenticate_and_retrieve_auth_token(client=client, username=username, password=password) self._cache_auth_token(token_obj=token_obj) token = token_obj.token return token
'Retrieve cached auth token from the file in the config directory. :rtype: ``str``'
def _get_cached_auth_token(self, client, username, password):
if (not os.path.isdir(ST2_CONFIG_DIRECTORY)): os.makedirs(ST2_CONFIG_DIRECTORY) cached_token_path = self._get_cached_token_path_for_user(username=username) if (not os.access(ST2_CONFIG_DIRECTORY, os.R_OK)): message = ('Unable to retrieve cached token from "%s" (user %s doesn\'t have read access to the parent directory). Subsequent requests won\'t use a cached token meaning they may be slower.' % (cached_token_path, os.getlogin())) self.LOG.warn(message) return None if (not os.path.isfile(cached_token_path)): return None if (not os.access(cached_token_path, os.R_OK)): message = ('Unable to retrieve cached token from "%s" (user %s doesn\'t have read access to this file). Subsequent requests won\'t use a cached token meaning they may be slower.' % (cached_token_path, os.getlogin())) self.LOG.warn(message) return None file_st_mode = oct((os.stat(cached_token_path).st_mode & 511)) others_st_mode = int(file_st_mode[(-1)]) if (others_st_mode >= 4): message = ('Permissions (%s) for cached token file "%s" are to permissive. Please restrict the permissions and make sure only your own user can read from the file' % (file_st_mode, cached_token_path)) self.LOG.warn(message) with open(cached_token_path) as fp: data = fp.read() try: data = json.loads(data) token = data['token'] expire_timestamp = data['expire_timestamp'] except Exception as e: msg = ('File "%s" with cached token is corrupted or invalid (%s). Please delete this file' % (cached_token_path, str(e))) raise ValueError(msg) now = int(time.time()) if ((expire_timestamp - TOKEN_EXPIRATION_GRACE_PERIOD_SECONDS) < now): self.LOG.debug(('Cached token from file "%s" has expired' % cached_token_path)) return None self.LOG.debug(('Using cached token from file "%s"' % cached_token_path)) return token
'Cache auth token in the config directory. :param token_obj: Token object. :type token_obj: ``object``'
def _cache_auth_token(self, token_obj):
if (not os.path.isdir(ST2_CONFIG_DIRECTORY)): os.makedirs(ST2_CONFIG_DIRECTORY) username = token_obj.user cached_token_path = self._get_cached_token_path_for_user(username=username) if (not os.access(ST2_CONFIG_DIRECTORY, os.W_OK)): message = ('Unable to write token to "%s" (user %s doesn\'t have write access to the parent directory). Subsequent requests won\'t use a cached token meaning they may be slower.' % (cached_token_path, os.getlogin())) self.LOG.warn(message) return None if (os.path.isfile(cached_token_path) and (not os.access(cached_token_path, os.W_OK))): message = ('Unable to write token to "%s" (user %s doesn\'t have write access to this file). Subsequent requests won\'t use a cached token meaning they may be slower.' % (cached_token_path, os.getlogin())) self.LOG.warn(message) return None token = token_obj.token expire_timestamp = parse_isotime(token_obj.expiry) expire_timestamp = calendar.timegm(expire_timestamp.timetuple()) data = {} data['token'] = token data['expire_timestamp'] = expire_timestamp data = json.dumps(data) fd = os.open(cached_token_path, (os.O_WRONLY | os.O_CREAT), 384) with os.fdopen(fd, 'w') as fp: fp.write(data) self.LOG.debug(('Token has been cached in "%s"' % cached_token_path)) return True
'Retrieve cached token path for the provided username.'
def _get_cached_token_path_for_user(self, username):
file_name = ('token-%s' % username) result = os.path.abspath(os.path.join(ST2_CONFIG_DIRECTORY, file_name)) return result
'Format a value for a simple field.'
@staticmethod def _get_simple_field_value(entry, field_name):
value = getattr(entry, field_name, '') if isinstance(value, (list, tuple)): if (len(value) == 0): value = '' elif isinstance(value[0], (str, unicode)): value = ', '.join(value) return value
'Override this method to customize output format for the subject.'
@classmethod @abc.abstractmethod def format(cls, subject, *args, **kwargs):
raise NotImplementedError
'Parse the config and return a dict with the parsed values. :rtype: ``dict``'
def parse(self):
result = defaultdict(dict) if (not os.path.isfile(self.config_file_path)): return CONFIG_DEFAULT_VALUES config = ConfigParser() with open(self.config_file_path, 'r') as fp: config.readfp(fp) for (section, keys) in six.iteritems(CONFIG_FILE_OPTIONS): for (key, options) in six.iteritems(keys): key_type = options['type'] key_default_value = options['default'] if config.has_option(section, key): if (key_type in ['str', 'string']): get_func = config.get elif (key_type in ['int', 'integer']): get_func = config.getint elif (key_type in ['float']): get_func = config.getfloat elif (key_type in ['bool', 'boolean']): get_func = config.getboolean else: msg = ('Invalid type "%s" for option "%s"' % (key_type, key)) raise ValueError(msg) value = get_func(section, key) result[section][key] = value else: result[section][key] = key_default_value return dict(result)
'Test \'st2 login\' functionality by specifying a password and a configuration file'
@mock.patch.object(requests, 'post', mock.MagicMock(return_value=base.FakeResponse(json.dumps(TOKEN), 200, 'OK'))) def runTest(self):
expected_username = self.TOKEN['user'] args = ['--config', self.CONFIG_FILE, 'login', expected_username, '--password', 'Password1!'] self.shell.run(args) with open(self.CONFIG_FILE, 'r') as config_file: for line in config_file.readlines(): self.assertFalse(('password' in line)) self.assertFalse(('olduser' in line)) if ('username' in line): self.assertEquals(line.split(' ')[2][:(-1)], expected_username) self.assertTrue(os.path.isfile(('%stoken-%s' % (self.DOTST2_PATH, expected_username))))
'Test \'st2 login\' functionality with interactive password entry'
@mock.patch.object(requests, 'post', mock.MagicMock(return_value=base.FakeResponse(json.dumps(TOKEN), 200, 'OK'))) @mock.patch.object(requests, 'get', mock.MagicMock(return_value=base.FakeResponse(json.dumps({}), 200, 'OK'))) @mock.patch('st2client.commands.auth.getpass') def runTest(self, mock_gp):
expected_username = self.TOKEN['user'] args = ['--config', self.CONFIG_FILE, 'login', expected_username] mock_gp.getpass.return_value = 'Password1!' self.shell.run(args) expected_kwargs = {'headers': {'content-type': 'application/json'}, 'auth': ('st2admin', 'Password1!')} requests.post.assert_called_with('http://127.0.0.1:9100/tokens', '{}', **expected_kwargs) with open(self.CONFIG_FILE, 'r') as config_file: for line in config_file.readlines(): self.assertFalse(('password' in line)) self.assertFalse(('olduser' in line)) if ('username' in line): self.assertEquals(line.split(' ')[2][:(-1)], expected_username) self.assertTrue(os.path.isfile(('%stoken-%s' % (self.DOTST2_PATH, expected_username)))) args = ['--config', self.CONFIG_FILE, 'pack', 'list'] self.shell.run(args) expected_kwargs = {'headers': {'X-Auth-Token': self.TOKEN['token']}, 'params': {}} requests.get.assert_called_with('http://127.0.0.1:9101/v1/packs', **expected_kwargs)
'Test \'st2 login\' functionality with --write-password flag set'
@mock.patch.object(requests, 'post', mock.MagicMock(return_value=base.FakeResponse(json.dumps(TOKEN), 200, 'OK'))) @mock.patch('st2client.commands.auth.getpass') def runTest(self, mock_gp):
expected_username = self.TOKEN['user'] args = ['--config', self.CONFIG_FILE, 'login', expected_username, '--password', 'Password1!', '--write-password'] self.shell.run(args) with open(self.CONFIG_FILE, 'r') as config_file: for line in config_file.readlines(): self.assertFalse(('olduser' in line)) if ('username' in line): self.assertEquals(line.split(' ')[2][:(-1)], expected_username) self.assertTrue(os.path.isfile(('%stoken-%s' % (self.DOTST2_PATH, expected_username))))
'Test \'st2 login\' ability to detect unhandled exceptions'
@mock.patch.object(requests, 'post', mock.MagicMock(return_value=base.FakeResponse(json.dumps(TOKEN), 200, 'OK'))) @mock.patch('st2client.commands.auth.getpass') def runTest(self, mock_gp):
expected_username = self.TOKEN['user'] args = ['--config', self.CONFIG_FILE, 'login', expected_username] mock_gp.getpass = mock.MagicMock(side_effect=Exception) self.shell.run(args) retcode = self.shell.run(args) self.assertTrue((('Failed to log in as %s' % expected_username) in self.stdout.getvalue())) self.assertTrue(('Logged in as' not in self.stdout.getvalue())) self.assertEqual(retcode, 0)
'Test \'st2 whoami\' functionality'
@mock.patch.object(requests, 'post', mock.MagicMock(return_value=base.FakeResponse(json.dumps({}), 200, 'OK'))) def runTest(self):
retcode = self.shell.run(['--config', self.CONFIG_FILE, 'whoami']) self.assertEqual(retcode, 0) self.assertTrue((self.USERNAME in self.stdout.getvalue()))
'Test \'st2 whoami\' functionality with a missing username'
@mock.patch.object(requests, 'post', mock.MagicMock(return_value=base.FakeResponse(json.dumps({}), 200, 'OK'))) def runTest(self):
retcode = self.shell.run(['--config', self.CONFIG_FILE, 'whoami']) self.assertEqual('Unable to retrieve currently logged-in user', self.stdout.getvalue().strip()) self.assertEqual(retcode, 0)
'Test \'st2 whoami\' functionality with a missing credentials section'
@mock.patch.object(requests, 'post', mock.MagicMock(return_value=base.FakeResponse(json.dumps({}), 200, 'OK'))) def runTest(self):
retcode = self.shell.run(['--config', self.CONFIG_FILE, 'whoami']) self.assertEqual('Unable to retrieve currently logged-in user', self.stdout.getvalue().strip()) self.assertEqual(retcode, 0)
'Test \'st2 whoami\' ability to detect unhandled exceptions'
@mock.patch.object(requests, 'post', mock.MagicMock(return_value=base.FakeResponse(json.dumps({}), 200, 'OK'))) @mock.patch('st2client.commands.auth.BaseCLIApp') def runTest(self, mock_cli):
mock_cli.return_value._get_config_file_path = mock.MagicMock(side_effect=Exception) retcode = self.shell.run(['--config', self.CONFIG_FILE, 'whoami']) self.assertEqual('Unable to retrieve currently logged-in user', self.stdout.getvalue().strip()) self.assertEqual(retcode, 0)
'Retrieve information (user account) for a particular user. Note: Not all the auth backends may implement this. :rtype: ``dict``'
def get_user(self, username):
raise NotImplementedError('get_user() not implemented for this backend')
'Retrieve a list of groups a particular user is a member of. Note: Not all the auth backends may implement this. :rtype: ``list`` of ``str``'
def get_user_groups(self, username):
raise NotImplementedError('get_groups() not implemented for this backend')
'Method which asserts that the common ST2 environment variables are present in the provided environment.'
def assertCommonSt2EnvVarsAvailableInEnv(self, env):
for var_name in COMMON_ACTION_ENV_VARIABLES: self.assertTrue((var_name in env)) self.assertEqual(env['ST2_ACTION_API_URL'], get_full_public_api_url()) self.assertTrue((env[AUTH_TOKEN_ENV_VARIABLE_NAME] is not None))
'Register all the packs inside the fixtures directory.'
@classmethod def _register_packs(self):
registrar = ResourceRegistrar(use_pack_cache=False) registrar.register_packs(base_dirs=get_packs_base_paths())
'Register all the packs inside the fixtures directory.'
@classmethod def _register_pack_configs(self, validate_configs=False):
registrar = ConfigsRegistrar(use_pack_cache=False, validate_configs=validate_configs) registrar.register_from_packs(base_dirs=get_packs_base_paths())
'Add a process to the local data structure to make sure it will get killed and cleaned up on tearDown.'
def add_process(self, process):
self.processes[process.pid] = process
'Remove process from a local data structure.'
def remove_process(self, process):
if (process.pid in self.processes): del self.processes[process.pid]
'Assert that a long running process provided Process object as returned by subprocess.Popen has succesfuly started and is running.'
def assertProcessIsRunning(self, process):
if (not process): raise ValueError('process is None') return_code = process.poll() if (return_code is not None): if process.stdout: stdout = process.stdout.read() else: stdout = '' if process.stderr: stderr = process.stderr.read() else: stderr = '' msg = ('Process exited with code=%s.\nStdout:\n%s\n\nStderr:\n%s' % (return_code, stdout, stderr)) self.fail(msg)
'Return raw fixture content for the provided fixture path. :param fixture_path: Fixture path relative to the tests/fixtures/ directory. :type fixture_path: ``str``'
def get_fixture_content(self, fixture_path):
base_pack_path = self._get_base_pack_path() fixtures_path = os.path.join(base_pack_path, 'tests/fixtures/') fixture_path = os.path.join(fixtures_path, fixture_path) with open(fixture_path, 'r') as fp: content = fp.read() return content
'Loads fixtures specified in fixtures_dict into the database and returns DB models for the fixtures. fixtures_dict should be of the form: \'actions\': [\'action-1.yaml\', \'action-2.yaml\'], \'rules\': [\'rule-1.yaml\'], \'liveactions\': [\'execution-1.yaml\'] :param fixtures_pack: Name of the pack to load fixtures from. :type fixtures_pack: ``str`` :param fixtures_dict: Dictionary specifying the fixtures to load for each type. :type fixtures_dict: ``dict`` :rtype: ``dict``'
def save_fixtures_to_db(self, fixtures_pack='generic', fixtures_dict=None):
if (fixtures_dict is None): fixtures_dict = {} fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack) self._validate_fixture_dict(fixtures_dict, allowed=ALLOWED_DB_FIXTURES) db_models = {} for (fixture_type, fixtures) in six.iteritems(fixtures_dict): API_MODEL = FIXTURE_API_MODEL.get(fixture_type, None) PERSISTENCE_MODEL = FIXTURE_PERSISTENCE_MODEL.get(fixture_type, None) loaded_fixtures = {} for fixture in fixtures: if (fixture in loaded_fixtures): msg = ('Fixture "%s" is specified twice, probably a typo.' % fixture) raise ValueError(msg) fixture_dict = self.meta_loader.load(self._get_fixture_file_path_abs(fixtures_pack_path, fixture_type, fixture)) api_model = API_MODEL(**fixture_dict) db_model = API_MODEL.to_model(api_model) db_model = PERSISTENCE_MODEL.add_or_update(db_model) loaded_fixtures[fixture] = db_model db_models[fixture_type] = loaded_fixtures return db_models
'Loads fixtures specified in fixtures_dict. We simply want to load the meta into dict objects. fixtures_dict should be of the form: \'actionchains\': [\'actionchain1.yaml\', \'actionchain2.yaml\'], \'workflows\': [\'workflow.yaml\'] :param fixtures_pack: Name of the pack to load fixtures from. :type fixtures_pack: ``str`` :param fixtures_dict: Dictionary specifying the fixtures to load for each type. :type fixtures_dict: ``dict`` :rtype: ``dict``'
def load_fixtures(self, fixtures_pack='generic', fixtures_dict=None):
if (not fixtures_dict): return {} fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack) self._validate_fixture_dict(fixtures_dict) all_fixtures = {} for (fixture_type, fixtures) in six.iteritems(fixtures_dict): loaded_fixtures = {} for fixture in fixtures: fixture_dict = self.meta_loader.load(self._get_fixture_file_path_abs(fixtures_pack_path, fixture_type, fixture)) loaded_fixtures[fixture] = fixture_dict all_fixtures[fixture_type] = loaded_fixtures return all_fixtures
'Loads fixtures specified in fixtures_dict as db models. This method must be used for fixtures that have associated DB models. We simply want to load the meta as DB models but don\'t want to save them to db. fixtures_dict should be of the form: \'actions\': [\'action-1.yaml\', \'action-2.yaml\'], \'rules\': [\'rule-1.yaml\'], \'liveactions\': [\'execution-1.yaml\'] :param fixtures_pack: Name of the pack to load fixtures from. :type fixtures_pack: ``str`` :param fixtures_dict: Dictionary specifying the fixtures to load for each type. :type fixtures_dict: ``dict`` :rtype: ``dict``'
def load_models(self, fixtures_pack='generic', fixtures_dict=None):
if (not fixtures_dict): return {} fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack) self._validate_fixture_dict(fixtures_dict, allowed=ALLOWED_DB_FIXTURES) all_fixtures = {} for (fixture_type, fixtures) in six.iteritems(fixtures_dict): API_MODEL = FIXTURE_API_MODEL.get(fixture_type, None) loaded_models = {} for fixture in fixtures: fixture_dict = self.meta_loader.load(self._get_fixture_file_path_abs(fixtures_pack_path, fixture_type, fixture)) api_model = API_MODEL(**fixture_dict) db_model = API_MODEL.to_model(api_model) loaded_models[fixture] = db_model all_fixtures[fixture_type] = loaded_models return all_fixtures
'Deletes fixtures specified in fixtures_dict from the database. fixtures_dict should be of the form: \'actions\': [\'action-1.yaml\', \'action-2.yaml\'], \'rules\': [\'rule-1.yaml\'], \'liveactions\': [\'execution-1.yaml\'] :param fixtures_pack: Name of the pack to delete fixtures from. :type fixtures_pack: ``str`` :param fixtures_dict: Dictionary specifying the fixtures to delete for each type. :type fixtures_dict: ``dict`` :param raise_on_fail: Optional If True, raises exception if delete fails on any fixture. :type raise_on_fail: ``boolean``'
def delete_fixtures_from_db(self, fixtures_pack='generic', fixtures_dict=None, raise_on_fail=False):
if (not fixtures_dict): return fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack) self._validate_fixture_dict(fixtures_dict) for (fixture_type, fixtures) in six.iteritems(fixtures_dict): API_MODEL = FIXTURE_API_MODEL.get(fixture_type, None) PERSISTENCE_MODEL = FIXTURE_PERSISTENCE_MODEL.get(fixture_type, None) for fixture in fixtures: fixture_dict = self.meta_loader.load(self._get_fixture_file_path_abs(fixtures_pack_path, fixture_type, fixture)) api_model = API_MODEL(**fixture_dict) db_model = API_MODEL.to_model(api_model) try: PERSISTENCE_MODEL.delete(db_model) except: if raise_on_fail: raise
'Deletes models specified in models_dict from the database. models_dict should be of the form: \'actions\': [ACTION1, ACTION2], \'rules\': [RULE1], \'liveactions\': [EXECUTION] :param fixtures_dict: Dictionary specifying the fixtures to delete for each type. :type fixtures_dict: ``dict``. :param raise_on_fail: Optional If True, raises exception if delete fails on any model. :type raise_on_fail: ``boolean``'
def delete_models_from_db(self, models_dict, raise_on_fail=False):
for (model_type, models) in six.iteritems(models_dict): PERSISTENCE_MODEL = FIXTURE_PERSISTENCE_MODEL.get(model_type, None) for model in models: try: PERSISTENCE_MODEL.delete(model) except: if raise_on_fail: raise
'Return mock logger instance. Keep in mind that this method returns Mock class instance which means you can use all the usual Mock class methods to assert that a particular message has been logged / logger has been called with particular arguments.'
def get_logger(self, name):
return self._logger
'Return a list of all values stored in a dictionary which is local to this class.'
def list_values(self, local=True, prefix=None):
key_prefix = self._get_full_key_prefix(local=local, prefix=prefix) if (not key_prefix): return self._datastore_items.values() result = [] for (name, kvp) in self._datastore_items.items(): if name.startswith(key_prefix): result.append(kvp) return result
'Return a particular value stored in a dictionary which is local to this class.'
def get_value(self, name, local=True, scope=SYSTEM_SCOPE, decrypt=False):
name = self._get_full_key_name(name=name, local=local) if (name not in self._datastore_items): return None kvp = self._datastore_items[name] return kvp.value
'Store a value in a dictionary which is local to this class.'
def set_value(self, name, value, ttl=None, local=True, scope=SYSTEM_SCOPE, encrypt=False):
if ttl: raise ValueError('MockDatastoreService.set_value doesn\'t support "ttl" argument') name = self._get_full_key_name(name=name, local=local) instance = KeyValuePair() instance.id = name instance.name = name instance.value = value self._datastore_items[name] = instance return True
'Delete a value from a dictionary which is local to this class.'
def delete_value(self, name, local=True, scope=SYSTEM_SCOPE):
name = self._get_full_key_name(name=name, local=local) if (name not in self._datastore_items): return False del self._datastore_items[name] return True
'Retrieve instance of the sensor class.'
def get_sensor_instance(self, config=None, poll_interval=None):
kwargs = {'sensor_service': self.sensor_service} if config: kwargs['config'] = config if (poll_interval is not None): kwargs['poll_interval'] = poll_interval instance = self.sensor_cls(**kwargs) return instance
'Assert that the trigger with the provided values has been dispatched. :param trigger: Name of the trigger. :type trigger: ``str`` :param paylod: Trigger payload (optional). If not provided, only trigger name is matched. type: payload: ``object`` :param trace_context: Trigger trace context (optional). If not provided, only trigger name is matched. type: payload: ``object``'
def assertTriggerDispatched(self, trigger, payload=None, trace_context=None):
dispatched_triggers = self.get_dispatched_triggers() for item in dispatched_triggers: trigger_matches = (item['trigger'] == trigger) if payload: payload_matches = (item['payload'] == payload) else: payload_matches = True if trace_context: trace_context_matches = (item['trace_context'] == trace_context) else: trace_context_matches = True if (trigger_matches and payload_matches and trace_context_matches): return True msg = ('Trigger "%s" hasn\'t been dispatched' % trigger) raise AssertionError(msg)
'Retrieve instance of the action class.'
def get_action_instance(self, config=None):
instance = get_action_class_instance(action_cls=self.action_cls, config=config, action_service=self.action_service) return instance
'Assert that the provided command matches exactly one format string from the provided list.'
def assertCommandMatchesExactlyOneFormatString(self, format_strings, command):
matched_format_strings = [] for format_string in format_strings: try: extract_parameters(format_str=format_string, param_stream=command) except ParseException: continue matched_format_strings.append(format_string) if (len(matched_format_strings) == 0): msg = ('Command "%s" didn\'t match any of the provided format strings' % command) raise AssertionError(msg) elif (len(matched_format_strings) > 1): msg = ('Command "%s" matched multiple format strings: %s' % (command, ', '.join(matched_format_strings))) raise AssertionError(msg)
'Assert that the provided command matches the format string. In addition to that, also assert that the parameters which have been extracted from the user input (command) also match the provided parameters.'
def assertExtractedParametersMatch(self, format_string, command, parameters):
extracted_params = extract_parameters_for_action_alias_db(action_alias_db=self.action_alias_db, format_str=format_string, param_stream=command) if (extracted_params != parameters): msg = ('Extracted parameters from command string "%s" against format string "%s" didn\'t match the provided parameters: ' % (command, format_string)) try: self.assertEqual(extracted_params, parameters) except AssertionError as e: msg += str(e) raise AssertionError(msg)
'Retrieve ActionAlias DB object for the provided alias name.'
def _get_action_alias_db_by_name(self, name):
base_pack_path = self._get_base_pack_path() pack_yaml_path = os.path.join(base_pack_path, MANIFEST_FILE_NAME) if os.path.isfile(pack_yaml_path): meta_loader = MetaLoader() pack_metadata = meta_loader.load(pack_yaml_path) pack = get_pack_ref_from_metadata(metadata=pack_metadata) else: (_, pack) = os.path.split(base_pack_path) pack_loader = ContentPackLoader() registrar = AliasesRegistrar(use_pack_cache=False) aliases_path = pack_loader.get_content_from_pack(pack_dir=base_pack_path, content_type='aliases') aliases = registrar._get_aliases_from_pack(aliases_dir=aliases_path) for alias_path in aliases: action_alias_db = registrar._get_action_alias_db(pack=pack, action_alias=alias_path) if (action_alias_db.name == name): return action_alias_db raise ValueError(('Alias with name "%s" not found' % name))
'Creates the object patching the actual connection.'
@patch('paramiko.SSHClient', Mock) def setUp(self):
cfg.CONF.set_override(name='ssh_key_file', override=None, group='system_user') cfg.CONF.set_override(name='use_ssh_config', override=False, group='ssh_runner') conn_params = {'hostname': 'dummy.host.org', 'port': 8822, 'username': 'ubuntu', 'key_files': '~/.ssh/ubuntu_ssh', 'timeout': '600'} self.ssh_cli = ParamikoSSHClient(**conn_params)
'Loads proxy commands from ssh config file'
@patch('paramiko.SSHClient', Mock) @patch.object(ParamikoSSHClient, '_is_key_file_needs_passphrase', MagicMock(return_value=False)) @patch('paramiko.ProxyCommand') def test_set_proxycommand(self, mock_ProxyCommand):
ssh_config_file_path = os.path.join(get_resources_base_path(), 'ssh', 'dummy_ssh_config') cfg.CONF.set_override(name='ssh_config_file_path', override=ssh_config_file_path, group='ssh_runner') cfg.CONF.set_override(name='use_ssh_config', override=True, group='ssh_runner') conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu', 'password': 'foo'} mock = ParamikoSSHClient(**conn_params) mock.connect() mock_ProxyCommand.assert_called_once_with('ssh -q -W dummy.host.org:22 dummy_bastion')
'Loads proxy commands from ssh config file'
@patch('paramiko.SSHClient', Mock) @patch.object(ParamikoSSHClient, '_is_key_file_needs_passphrase', MagicMock(return_value=False)) @patch('paramiko.ProxyCommand') def test_fail_set_proxycommand(self, mock_ProxyCommand):
ssh_config_file_path = os.path.join(get_resources_base_path(), 'ssh', 'dummy_ssh_config_fail') cfg.CONF.set_override(name='ssh_config_file_path', override=ssh_config_file_path, group='ssh_runner') cfg.CONF.set_override(name='use_ssh_config', override=True, group='ssh_runner') conn_params = {'hostname': 'dummy.host.org'} mock = ParamikoSSHClient(**conn_params) self.assertRaises(Exception, mock.connect) mock_ProxyCommand.assert_not_called()
'Initialize object with no credentials. Just to have better coverage, initialize the object without \'password\' neither \'key\'. Now that we only reconcile the final parameters at the last moment when we explicitly try to connect, all the credentials should be set to None.'
@patch('paramiko.SSHClient', Mock) @patch.object(ParamikoSSHClient, '_is_key_file_needs_passphrase', MagicMock(return_value=False)) def test_create_without_credentials(self):
conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu'} mock = ParamikoSSHClient(**conn_params) self.assertEqual(mock.password, None) self.assertEqual(mock.key_material, None) self.assertEqual(mock.key_files, None)
'Basic execution.'
@patch('paramiko.SSHClient', Mock) @patch.object(ParamikoSSHClient, '_consume_stdout', MagicMock(return_value=StringIO(''))) @patch.object(ParamikoSSHClient, '_consume_stderr', MagicMock(return_value=StringIO(''))) @patch.object(os.path, 'exists', MagicMock(return_value=True)) @patch.object(os, 'stat', MagicMock(return_value=None)) @patch.object(ParamikoSSHClient, '_is_key_file_needs_passphrase', MagicMock(return_value=False)) def test_basic_usage_absolute_path(self):
mock = self.ssh_cli sd = '/root/random_script.sh' mock.connect() mock_cli = mock.client expected_conn = {'username': 'ubuntu', 'key_filename': '~/.ssh/ubuntu_ssh', 'allow_agent': False, 'hostname': 'dummy.host.org', 'look_for_keys': False, 'timeout': '600', 'port': 8822} mock_cli.connect.assert_called_once_with(**expected_conn) mock.put(sd, sd, mirror_local_mode=False) mock_cli.open_sftp().put.assert_called_once_with(sd, sd) mock.run(sd) mock_cli.get_transport().open_session().exec_command.assert_called_once_with(sd) mock.close()
'Provide a basic test with \'delete\' action.'
@patch('paramiko.SSHClient', Mock) @patch.object(ParamikoSSHClient, '_is_key_file_needs_passphrase', MagicMock(return_value=False)) def test_delete_script(self):
mock = self.ssh_cli sd = '/root/random_script.sh' mock.connect() mock.delete_file(sd) mock.client.open_sftp().unlink.assert_called_with(sd) mock.close()
'Clean up the temporary auth token for the current action. Note: This method should never throw since it\'s called inside finally block which assumes it doesn\'t throw.'
def _clean_up_auth_token(self, runner, status):
is_async_runner = isinstance(runner, AsyncActionRunner) action_completed = (status in action_constants.LIVEACTION_COMPLETED_STATES) if ((not is_async_runner) or (is_async_runner and action_completed)): try: self._delete_auth_token(runner.auth_token) except: LOG.exception('Unable to clean-up auth_token.') return True return False
'Update LiveActionDB object for the provided liveaction id.'
def _update_live_action_db(self, liveaction_id, status, result, context):
liveaction_db = get_liveaction_by_id(liveaction_id) if (status in action_constants.LIVEACTION_COMPLETED_STATES): end_timestamp = date_utils.get_datetime_utc_now() else: end_timestamp = None liveaction_db = update_liveaction_status(status=status, result=result, context=context, end_timestamp=end_timestamp, liveaction_db=liveaction_db) return liveaction_db
'Schedules the LiveAction and publishes the request to the appropriate action runner(s). LiveAction in statuses other than "requested" are ignored. :param request: Action execution request. :type request: ``st2common.models.db.liveaction.LiveActionDB``'
def process(self, request):
if (request.status != action_constants.LIVEACTION_STATUS_REQUESTED): LOG.info('%s is ignoring %s (id=%s) with "%s" status.', self.__class__.__name__, type(request), request.id, request.status) return try: liveaction_db = action_utils.get_liveaction_by_id(request.id) except StackStormDBObjectNotFoundError: LOG.exception('Failed to find liveaction %s in the database.', request.id) raise liveaction_db = self._apply_pre_run_policies(liveaction_db=liveaction_db) if (liveaction_db.status not in [action_constants.LIVEACTION_STATUS_REQUESTED, action_constants.LIVEACTION_STATUS_SCHEDULED]): LOG.info('%s is ignoring %s (id=%s) with "%s" status after policies are applied.', self.__class__.__name__, type(request), request.id, liveaction_db.status) return if (liveaction_db.status == action_constants.LIVEACTION_STATUS_REQUESTED): liveaction_db = action_service.update_status(liveaction_db, action_constants.LIVEACTION_STATUS_SCHEDULED, publish=False) LiveAction.publish_status(liveaction_db)
'Retrieve a runner reference for the provided action. :rtype: ``str``'
def _get_runner_ref(self, action_ref):
action = Action.get_by_ref(action_ref) return action['runner_type']['name']
':param retry_on: Condition to retry the execution on (failure, timeout). :type retry_on: ``str`` :param max_retry_count: Maximum number of times to try to retry an action. :type max_retry_count: ``int`` :param delay: How long to wait before retrying an execution. :type delay: ``float``'
def __init__(self, policy_ref, policy_type, retry_on, max_retry_count=2, delay=0):
super(ExecutionRetryPolicyApplicator, self).__init__(policy_ref=policy_ref, policy_type=policy_type) self.retry_on = retry_on self.max_retry_count = max_retry_count self.delay = (delay or 0)
'Retrieve parent info from context of the live action. :rtype: ``dict``'
def _is_live_action_part_of_workflow_action(self, live_action_db):
context = getattr(live_action_db, 'context', {}) parent = context.get('parent', {}) is_wf_action = ((parent is not None) and (parent != {})) return is_wf_action
'Retrieve current retry count for the provided live action. :rtype: ``int``'
def _get_live_action_retry_count(self, live_action_db):
context = getattr(live_action_db, 'context', {}) retry_count = context.get('policies', {}).get('retry', {}).get('retry_count', 0) return retry_count
'Dispatches the LiveAction to appropriate action runner. LiveAction in statuses other than "scheduled" and "canceling" are ignored. If LiveAction is already canceled and result is empty, the LiveAction is updated with a generic exception message. :param liveaction: Action execution request. :type liveaction: ``st2common.models.db.liveaction.LiveActionDB`` :rtype: ``dict``'
def process(self, liveaction):
if (liveaction.status == action_constants.LIVEACTION_STATUS_CANCELED): LOG.info('%s is not executing %s (id=%s) with "%s" status.', self.__class__.__name__, type(liveaction), liveaction.id, liveaction.status) if (not liveaction.result): updated_liveaction = action_utils.update_liveaction_status(status=liveaction.status, result={'message': 'Action execution canceled by user.'}, liveaction_id=liveaction.id) executions.update_execution(updated_liveaction) return if (liveaction.status not in [action_constants.LIVEACTION_STATUS_SCHEDULED, action_constants.LIVEACTION_STATUS_CANCELING]): LOG.info('%s is not dispatching %s (id=%s) with "%s" status.', self.__class__.__name__, type(liveaction), liveaction.id, liveaction.status) return try: liveaction_db = action_utils.get_liveaction_by_id(liveaction.id) except StackStormDBObjectNotFoundError: LOG.exception('Failed to find liveaction %s in the database.', liveaction.id) raise return (self._run_action(liveaction_db) if (liveaction.status == action_constants.LIVEACTION_STATUS_SCHEDULED) else self._cancel_action(liveaction_db))
'Tests that an invalid payload still results in dispatch success with default config The previous config defition used StrOpt instead of BoolOpt for cfg.CONF.system.validate_trigger_payload. This meant that even though the intention was to bypass validation, the fact that this option was a string, meant it always resulted in True during conditionals.the However, the other unit tests directly modified cfg.CONF.system.validate_trigger_payload before running, which obscured this bug during testing. This test (as well as resetting cfg.CONF.system.validate_trigger_payload to it\'s original value during tearDown) will test validation does NOT take place with the default configuration.'
@mock.patch('st2common.services.triggers.get_trigger_type_db', mock.MagicMock(return_value=TriggerTypeMock(TEST_SCHEMA))) def test_dispatch_success_with_default_config_and_invalid_payload(self):
payload = {'name': 'John Doe', 'age': '25'} self.sensor_service.dispatch('trigger-name', payload) self.assertEqual(self._dispatched_count, 1)
':param sensors: A list of sensor dicts. :type sensors: ``list`` of ``dict`` :param poll_interval: How long to sleep between each poll for running / dead sensors. :type poll_interval: ``float``'
def __init__(self, sensors, poll_interval=5, dispatcher=None):
self._poll_interval = poll_interval self._sensors = {} self._processes = {} if (not dispatcher): dispatcher = TriggerDispatcher(LOG) self._dispatcher = dispatcher self._stopped = False sensors = (sensors or []) for sensor_obj in sensors: sensor_id = self._get_sensor_id(sensor=sensor_obj) self._sensors[sensor_id] = sensor_obj self._sensor_start_times = {} self._sensor_respawn_counts = defaultdict(int) self._internal_sensor_state_variables = [self._processes, self._sensors, self._sensor_start_times]
'Main loop which polls sensor for results and detects dead sensors.'
def _poll_sensors_for_results(self, sensor_ids):
for sensor_id in sensor_ids: now = int(time.time()) process = self._processes[sensor_id] status = process.poll() if (status is not None): LOG.info('Process for sensor %s has exited with code %s', sensor_id, status) sensor = self._sensors[sensor_id] self._delete_sensor(sensor_id) self._dispatch_trigger_for_sensor_exit(sensor=sensor, exit_code=status) eventlet.spawn_n(self._respawn_sensor, sensor_id=sensor_id, sensor=sensor, exit_code=status) else: sensor_start_time = self._sensor_start_times[sensor_id] sensor_respawn_count = self._sensor_respawn_counts[sensor_id] successfuly_started = ((now - sensor_start_time) >= SENSOR_SUCCESSFUL_START_THRESHOLD) if (successfuly_started and (sensor_respawn_count >= 1)): self._sensor_respawn_counts[sensor_id] = 0
'Add a new sensor to the container. :type sensor: ``dict``'
def add_sensor(self, sensor):
sensor_id = self._get_sensor_id(sensor=sensor) if (sensor_id in self._sensors): LOG.warning('Sensor %s already exists and running.', sensor_id) return False self._spawn_sensor_process(sensor=sensor) LOG.debug('Sensor %s started.', sensor_id) self._sensors[sensor_id] = sensor return True
'Remove an existing sensor from the container. :type sensor: ``dict``'
def remove_sensor(self, sensor):
sensor_id = self._get_sensor_id(sensor=sensor) if (sensor_id not in self._sensors): LOG.warning("Sensor %s isn't running in this container.", sensor_id) return False self._stop_sensor_process(sensor_id=sensor_id) LOG.debug('Sensor %s stopped.', sensor_id) return True
'Spawn a new process for the provided sensor. New process uses isolated Python binary from a virtual environment belonging to the sensor pack.'
def _spawn_sensor_process(self, sensor):
sensor_id = self._get_sensor_id(sensor=sensor) virtualenv_path = get_sandbox_virtualenv_path(pack=sensor['pack']) python_path = get_sandbox_python_binary_path(pack=sensor['pack']) if (virtualenv_path and (not os.path.isdir(virtualenv_path))): format_values = {'pack': sensor['pack'], 'virtualenv_path': virtualenv_path} msg = (PACK_VIRTUALENV_DOESNT_EXIST % format_values) raise Exception(msg) trigger_type_refs = (sensor['trigger_types'] or []) trigger_type_refs = ','.join(trigger_type_refs) parent_args = json.dumps(sys.argv[1:]) args = [python_path, WRAPPER_SCRIPT_PATH, ('--pack=%s' % sensor['pack']), ('--file-path=%s' % sensor['file_path']), ('--class-name=%s' % sensor['class_name']), ('--trigger-type-refs=%s' % trigger_type_refs), ('--parent-args=%s' % parent_args)] if sensor['poll_interval']: args.append(('--poll-interval=%s' % sensor['poll_interval'])) env = os.environ.copy() env['PYTHONPATH'] = get_sandbox_python_path(inherit_from_parent=True, inherit_parent_virtualenv=True) ttl = cfg.CONF.auth.service_token_ttl metadata = {'service': 'sensors_container', 'sensor_path': sensor['file_path'], 'sensor_class': sensor['class_name']} temporary_token = create_token(username='sensors_container', ttl=ttl, metadata=metadata, service=True) env[API_URL_ENV_VARIABLE_NAME] = get_full_public_api_url() env[AUTH_TOKEN_ENV_VARIABLE_NAME] = temporary_token.token cmd = ' '.join(args) LOG.debug('Running sensor subprocess (cmd="%s")', cmd) try: process = subprocess.Popen(args=args, stdin=None, stdout=None, stderr=None, shell=False, env=env, preexec_fn=on_parent_exit('SIGTERM')) except Exception as e: cmd = ' '.join(args) message = ('Failed to spawn process for sensor %s ("%s"): %s' % (sensor_id, cmd, str(e))) raise Exception(message) self._processes[sensor_id] = process self._sensors[sensor_id] = sensor self._sensor_start_times[sensor_id] = int(time.time()) self._dispatch_trigger_for_sensor_spawn(sensor=sensor, process=process, cmd=cmd) return process
'Stop a sensor process for the provided sensor. :param sensor_id: Sensor ID. :type sensor_id: ``str`` :param exit_timeout: How long to wait for process to exit after sending SIGTERM signal. If the process doesn\'t exit in this amount of seconds, SIGKILL signal will be sent to the process. :type exit__timeout: ``int``'
def _stop_sensor_process(self, sensor_id, exit_timeout=PROCESS_EXIT_TIMEOUT):
process = self._processes[sensor_id] self._delete_sensor(sensor_id) process.terminate() timeout = 0 sleep_delay = 1 while (timeout < exit_timeout): status = process.poll() if (status is not None): break timeout += sleep_delay time.sleep(sleep_delay) if (status is None): process.kill()
'Method for respawning a sensor which died with a non-zero exit code.'
def _respawn_sensor(self, sensor_id, sensor, exit_code):
extra = {'sensor_id': sensor_id, 'sensor': sensor} if self._stopped: LOG.debug('Stopped, not respawning a dead sensor', extra=extra) return should_respawn = self._should_respawn_sensor(sensor_id=sensor_id, sensor=sensor, exit_code=exit_code) if (not should_respawn): LOG.debug('Not respawning a dead sensor', extra=extra) return LOG.debug('Respawning dead sensor', extra=extra) self._sensor_respawn_counts[sensor_id] += 1 sleep_delay = (SENSOR_RESPAWN_DELAY * self._sensor_respawn_counts[sensor_id]) eventlet.sleep(sleep_delay) try: self._spawn_sensor_process(sensor=sensor) except Exception as e: LOG.warning(e.message, exc_info=True) del self._sensors[sensor_id]
'Return True if the provided sensor should be respawned, False otherwise.'
def _should_respawn_sensor(self, sensor_id, sensor, exit_code):
if (exit_code == 0): return False respawn_count = self._sensor_respawn_counts[sensor_id] if (respawn_count >= SENSOR_MAX_RESPAWN_COUNTS): LOG.debug('Sensor has already been respawned max times, giving up') return False return True
'Return unique identifier for the provider sensor dict. :type sensor: ``dict``'
def _get_sensor_id(self, sensor):
sensor_id = sensor['ref'] return sensor_id
'Delete / reset all the internal state about a particular sensor.'
def _delete_sensor(self, sensor_id):
for var in self._internal_sensor_state_variables: if (sensor_id in var): del var[sensor_id]
'Extract from a format like - 0..1024|2048..4096|4096..MAX'
def _create_hash_ranges(self, hash_ranges_repr):
hash_ranges = [] for range_repr in hash_ranges_repr.split(SUB_RANGE_SEPARATOR): hash_range = Range(range_repr.strip()) hash_ranges.append(hash_range) return hash_ranges
'Run all sensors as determined by sensors_partitioner.'
def run_sensors(self):
sensors = self._sensors_partitioner.get_sensors() if sensors: LOG.info('Setting up container to run %d sensors.', len(sensors)) LOG.info(' DCTB Sensors list - %s.', [self._get_sensor_ref(sensor) for sensor in sensors]) sensors_to_run = [] for sensor in sensors: sensors_to_run.append(self._to_sensor_object(sensor)) LOG.info('(PID:%s) SensorContainer started.', os.getpid()) self._setup_sigterm_handler() self._spin_container_and_wait(sensors_to_run)
'All sensors are supported'
def is_sensor_owner(self, sensor_db):
return (sensor_db is not None)
'No other sensor supported just the single sensor which was previously loaded.'
def is_sensor_owner(self, sensor_db):
return False
'Retrieve an instance of a logger to be used by the sensor class.'
def get_logger(self, name):
logger_name = ('%s.%s' % (self._sensor_wrapper._logger.name, name)) logger = logging.getLogger(logger_name) logger.propagate = True return logger
'Method which dispatches the trigger. :param trigger: Full name / reference of the trigger. :type trigger: ``str`` :param payload: Trigger payload. :type payload: ``dict`` :param trace_tag: Tracer to track the triggerinstance. :type trace_tags: ``str``'
def dispatch(self, trigger, payload=None, trace_tag=None):
trace_context = (TraceContext(trace_tag=trace_tag) if trace_tag else None) self._logger.debug('Added trace_context %s to trigger %s.', trace_context, trigger) self.dispatch_with_context(trigger, payload=payload, trace_context=trace_context)
'Method which dispatches the trigger. :param trigger: Full name / reference of the trigger. :type trigger: ``str`` :param payload: Trigger payload. :type payload: ``dict`` :param trace_context: Trace context to associate with Trigger. :type trace_context: ``st2common.api.models.api.trace.TraceContext``'
def dispatch_with_context(self, trigger, payload=None, trace_context=None):
is_valid = True try: validate_trigger_payload(trigger_type_ref=trigger, payload=payload) except (ValidationError, Exception) as e: is_valid = False self._logger.warn(('Failed to validate payload (%s) for trigger "%s": %s' % (str(payload), trigger, str(e)))) if ((not is_valid) and cfg.CONF.system.validate_trigger_payload): self._logger.warn(('Trigger payload validation failed and validation is enabled, not dispatching a trigger "%s" (%s)' % (trigger, str(payload)))) return None self._logger.debug('Dispatching trigger %s with payload %s.', trigger, payload) self._dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context)
':param pack: Name of the pack this sensor belongs to. :type pack: ``str`` :param file_path: Path to the sensor module file. :type file_path: ``str`` :param class_name: Sensor class name. :type class_name: ``str`` :param trigger_types: A list of references to trigger types which belong to this sensor. :type trigger_types: ``list`` of ``str`` :param poll_interval: Sensor poll interval (in seconds). :type poll_interval: ``int`` or ``None`` :param parent_args: Command line arguments passed to the parent process. :type parse_args: ``list``'
def __init__(self, pack, file_path, class_name, trigger_types, poll_interval=None, parent_args=None):
self._pack = pack self._file_path = file_path self._class_name = class_name self._trigger_types = (trigger_types or []) self._poll_interval = poll_interval self._parent_args = (parent_args or []) self._trigger_names = {} try: config.parse_args(args=self._parent_args) except Exception: pass username = (cfg.CONF.database.username if hasattr(cfg.CONF.database, 'username') else None) password = (cfg.CONF.database.password if hasattr(cfg.CONF.database, 'password') else None) db_setup_with_retry(cfg.CONF.database.db_name, cfg.CONF.database.host, cfg.CONF.database.port, username=username, password=password, ssl=cfg.CONF.database.ssl, ssl_keyfile=cfg.CONF.database.ssl_keyfile, ssl_certfile=cfg.CONF.database.ssl_certfile, ssl_cert_reqs=cfg.CONF.database.ssl_cert_reqs, ssl_ca_certs=cfg.CONF.database.ssl_ca_certs, ssl_match_hostname=cfg.CONF.database.ssl_match_hostname) self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger, update_handler=self._handle_update_trigger, delete_handler=self._handle_delete_trigger, trigger_types=self._trigger_types, queue_suffix=('sensorwrapper_%s_%s' % (self._pack, self._class_name)), exclusive=True) self._logger = logging.getLogger(('SensorWrapper.%s.%s' % (self._pack, self._class_name))) logging.setup(cfg.CONF.sensorcontainer.logging) if ('--debug' in parent_args): set_log_level_for_all_loggers() self._sensor_instance = self._get_sensor_instance()
'Retrieve instance of a sensor class.'
def _get_sensor_instance(self):
(_, filename) = os.path.split(self._file_path) (module_name, _) = os.path.splitext(filename) try: sensor_class = loader.register_plugin_class(base_class=Sensor, file_path=self._file_path, class_name=self._class_name) except Exception as e: tb_msg = traceback.format_exc() msg = ('Failed to load sensor class from file "%s" (sensor file most likely doesn\'t exist or contains invalid syntax): %s' % (self._file_path, str(e))) msg += ('\n\n' + tb_msg) exc_cls = type(e) raise exc_cls(msg) if (not sensor_class): raise ValueError(('Sensor module is missing a class with name "%s"' % self._class_name)) sensor_class_kwargs = {} sensor_class_kwargs['sensor_service'] = SensorService(sensor_wrapper=self) sensor_config = self._get_sensor_config() sensor_class_kwargs['config'] = sensor_config if (self._poll_interval and issubclass(sensor_class, PollingSensor)): sensor_class_kwargs['poll_interval'] = self._poll_interval try: sensor_instance = sensor_class(**sensor_class_kwargs) except Exception: self._logger.exception(('Failed to instantiate "%s" sensor class' % self._class_name)) raise Exception(('Failed to instantiate "%s" sensor class' % self._class_name)) return sensor_instance
':param collection_interval: How often to check database for old data and perform garbage collection. :type collection_interval: ``int``'
def __init__(self, collection_interval=DEFAULT_COLLECTION_INTERVAL):
self._collection_interval = collection_interval self._action_executions_ttl = cfg.CONF.garbagecollector.action_executions_ttl self._trigger_instances_ttl = cfg.CONF.garbagecollector.trigger_instances_ttl self._validate_ttl_values() self._running = True
'Validate that a user has supplied reasonable TTL values.'
def _validate_ttl_values(self):
if (self._action_executions_ttl and (self._action_executions_ttl < MINIMUM_TTL_DAYS)): raise ValueError(('Minimum possible TTL in days is %s' % MINIMUM_TTL_DAYS)) if (self._trigger_instances_ttl and (self._trigger_instances_ttl < MINIMUM_TTL_DAYS)): raise ValueError(('Minimum possible TTL in days is %s' % MINIMUM_TTL_DAYS))
'Purge action executions and corresponding live actions which match the criteria defined in the config.'
def _purge_action_executions(self):
LOG.info('Performing garbage collection for action executions') utc_now = get_datetime_utc_now() timestamp = (utc_now - datetime.timedelta(days=self._action_executions_ttl)) if (timestamp > (utc_now - datetime.timedelta(days=MINIMUM_TTL_DAYS))): raise ValueError('Calculated timestamp would violate the minimum TTL constraint') timestamp_str = isotime.format(dt=timestamp) LOG.info(('Deleting action executions older than: %s' % timestamp_str)) assert (timestamp < utc_now) try: purge_executions(logger=LOG, timestamp=timestamp) except Exception as e: LOG.exception(('Failed to delete executions: %s' % str(e))) return True