desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Tear down the syndic minion'
def destroy(self):
super(Syndic, self).destroy() if hasattr(self, u'local'): del self.local if hasattr(self, u'forward_events'): self.forward_events.stop()
'Spawn all the coroutines which will sign in the syndics'
def _spawn_syndics(self):
self._syndics = OrderedDict() masters = self.opts[u'master'] if (not isinstance(masters, list)): masters = [masters] for master in masters: s_opts = copy.copy(self.opts) s_opts[u'master'] = master self._syndics[master] = self._connect_syndic(s_opts)
'Create a syndic, and asynchronously connect it to a master'
@tornado.gen.coroutine def _connect_syndic(self, opts):
last = 0 auth_wait = opts[u'acceptance_wait_time'] failed = False while True: log.debug(u'Syndic attempting to connect to %s', opts[u'master']) try: syndic = Syndic(opts, timeout=self.SYNDIC_CONNECT_TIMEOUT, safe=False, io_loop=self.io_loop) (yield syndic.connect_master(failed=failed)) syndic.tune_in_no_block() syndic.fire_master_syndic_start() log.info(u'Syndic successfully connected to %s', opts[u'master']) break except SaltClientError as exc: failed = True log.error(u'Error while bringing up syndic for multi-syndic. Is the master at %s responding?', opts[u'master']) last = time.time() if (auth_wait < self.max_auth_wait): auth_wait += self.auth_wait (yield tornado.gen.sleep(auth_wait)) except KeyboardInterrupt: raise except: failed = True log.critical(u'Unexpected error while connecting to %s', opts[u'master'], exc_info=True) raise tornado.gen.Return(syndic)
'Mark a master as dead. This will start the sign-in routine'
def _mark_master_dead(self, master):
if self._syndics[master].done(): syndic = self._syndics[master].result() self._syndics[master] = syndic.reconnect() else: log.info(u'Attempting to mark %s as dead, although it is already marked dead', master)
'Wrapper to call a given func on a syndic, best effort to get the one you asked for'
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
if (kwargs is None): kwargs = {} for (master, syndic_future) in self.iter_master_options(master_id): if ((not syndic_future.done()) or syndic_future.exception()): log.error(u'Unable to call %s on %s, that syndic is not connected', func, master) continue try: getattr(syndic_future.result(), func)(*args, **kwargs) return except SaltClientError: log.error(u'Unable to call %s on %s, trying another...', func, master) self._mark_master_dead(master) continue log.critical(u'Unable to call %s on any masters!', func)
'Wrapper to call the \'_return_pub_multi\' a syndic, best effort to get the one you asked for'
def _return_pub_syndic(self, values, master_id=None):
func = u'_return_pub_multi' for (master, syndic_future) in self.iter_master_options(master_id): if ((not syndic_future.done()) or syndic_future.exception()): log.error(u'Unable to call %s on %s, that syndic is not connected', func, master) continue (future, data) = self.pub_futures.get(master, (None, None)) if (future is not None): if (not future.done()): if (master == master_id): return False else: continue elif future.exception(): log.error(u'Unable to call %s on %s, trying another...', func, master) self._mark_master_dead(master) del self.pub_futures[master] self.delayed.extend(data) continue future = getattr(syndic_future.result(), func)(values) self.pub_futures[master] = (future, values) return True return False
'Iterate (in order) over your options for master'
def iter_master_options(self, master_id=None):
masters = list(self._syndics.keys()) if (self.opts[u'syndic_failover'] == u'random'): shuffle(masters) if (master_id not in self._syndics): master_id = masters.pop(0) else: masters.remove(master_id) while True: (yield (master_id, self._syndics[master_id])) if (len(masters) == 0): break master_id = masters.pop(0)
'Lock onto the publisher. This is the main event loop for the syndic'
def tune_in(self):
self._spawn_syndics() self.local = salt.client.get_local_client(self.opts[u'_minion_conf_file'], io_loop=self.io_loop) self.local.event.subscribe(u'') log.debug(u"SyndicManager '%s' trying to tune in", self.opts[u'id']) self.job_rets = {} self.raw_events = [] self._reset_event_aggregation() future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events, (self.opts[u'syndic_event_forward_timeout'] * 1000), io_loop=self.io_loop) self.forward_events.start() enable_sigusr1_handler() self.io_loop.start()
'Takes the data passed to a top file environment and determines if the data matches this minion'
def confirm_top(self, match, data, nodegroups=None):
matcher = u'compound' if (not data): log.error(u'Received bad data when setting the match from the top file') return False for item in data: if isinstance(item, dict): if (u'match' in item): matcher = item[u'match'] if hasattr(self, (matcher + u'_match')): funcname = u'{0}_match'.format(matcher) if (matcher == u'nodegroup'): return getattr(self, funcname)(match, nodegroups) return getattr(self, funcname)(match) else: log.error(u'Attempting to match with unknown matcher: %s', matcher) return False
'Returns true if the passed glob matches the id'
def glob_match(self, tgt):
if (not isinstance(tgt, six.string_types)): return False return fnmatch.fnmatch(self.opts[u'id'], tgt)
'Returns true if the passed pcre regex matches'
def pcre_match(self, tgt):
return bool(re.match(tgt, self.opts[u'id']))
'Determines if this host is on the list'
def list_match(self, tgt):
if isinstance(tgt, six.string_types): tgt = tgt.split(u',') return bool((self.opts[u'id'] in tgt))
'Reads in the grains glob match'
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
log.debug(u'grains target: %s', tgt) if (delimiter not in tgt): log.error(u'Got insufficient arguments for grains match statement from master') return False return salt.utils.subdict_match(self.opts[u'grains'], tgt, delimiter=delimiter)
'Matches a grain based on regex'
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
log.debug(u'grains pcre target: %s', tgt) if (delimiter not in tgt): log.error(u'Got insufficient arguments for grains pcre match statement from master') return False return salt.utils.subdict_match(self.opts[u'grains'], tgt, delimiter=delimiter, regex_match=True)
'Match based on the local data store on the minion'
def data_match(self, tgt):
if (self.functions is None): utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=utils) comps = tgt.split(u':') if (len(comps) < 2): return False val = self.functions[u'data.getval'](comps[0]) if (val is None): return False if isinstance(val, list): for member in val: if fnmatch.fnmatch(str(member).lower(), comps[1].lower()): return True return False if isinstance(val, dict): if (comps[1] in val): return True return False return bool(fnmatch.fnmatch(val, comps[1]))
'Reads in the pillar glob match'
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
log.debug(u'pillar target: %s', tgt) if (delimiter not in tgt): log.error(u'Got insufficient arguments for pillar match statement from master') return False return salt.utils.subdict_match(self.opts[u'pillar'], tgt, delimiter=delimiter)
'Reads in the pillar pcre match'
def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
log.debug(u'pillar PCRE target: %s', tgt) if (delimiter not in tgt): log.error(u'Got insufficient arguments for pillar PCRE match statement from master') return False return salt.utils.subdict_match(self.opts[u'pillar'], tgt, delimiter=delimiter, regex_match=True)
'Reads in the pillar match, no globbing, no PCRE'
def pillar_exact_match(self, tgt, delimiter=u':'):
log.debug(u'pillar target: %s', tgt) if (delimiter not in tgt): log.error(u'Got insufficient arguments for pillar match statement from master') return False return salt.utils.subdict_match(self.opts[u'pillar'], tgt, delimiter=delimiter, exact_match=True)
'Matches based on IP address or CIDR notation'
def ipcidr_match(self, tgt):
try: tgt = ipaddress.ip_address(tgt) except: try: tgt = ipaddress.ip_network(tgt) except: log.error(u'Invalid IP/CIDR target: %s', tgt) return [] proto = u'ipv{0}'.format(tgt.version) grains = self.opts[u'grains'] if (proto not in grains): match = False elif isinstance(tgt, (ipaddress.IPv4Address, ipaddress.IPv6Address)): match = (str(tgt) in grains[proto]) else: match = salt.utils.network.in_subnet(tgt, grains[proto]) return match
'Matches based on range cluster'
def range_match(self, tgt):
if HAS_RANGE: range_ = seco.range.Range(self.opts[u'range_server']) try: return (self.opts[u'grains'][u'fqdn'] in range_.expand(tgt)) except seco.range.RangeException as exc: log.debug(u'Range exception in compound match: %s', exc) return False return False
'Runs the compound target check'
def compound_match(self, tgt):
if ((not isinstance(tgt, six.string_types)) and (not isinstance(tgt, (list, tuple)))): log.error(u'Compound target received that is neither string, list nor tuple') return False log.debug(u'compound_match: %s ? %s', self.opts[u'id'], tgt) ref = {u'G': u'grain', u'P': u'grain_pcre', u'I': u'pillar', u'J': u'pillar_pcre', u'L': u'list', u'N': None, u'S': u'ipcidr', u'E': u'pcre'} if HAS_RANGE: ref[u'R'] = u'range' results = [] opers = [u'and', u'or', u'not', u'(', u')'] if isinstance(tgt, six.string_types): words = tgt.split() else: words = tgt for word in words: target_info = salt.utils.minions.parse_target(word) if (word in opers): if results: if ((results[(-1)] == u'(') and (word in (u'and', u'or'))): log.error(u'Invalid beginning operator after "(": %s', word) return False if (word == u'not'): if (not (results[(-1)] in (u'and', u'or', u'('))): results.append(u'and') results.append(word) else: if (word not in [u'(', u'not']): log.error(u'Invalid beginning operator: %s', word) return False results.append(word) elif (target_info and target_info[u'engine']): if (u'N' == target_info[u'engine']): log.error(u'Detected nodegroup expansion failure of "%s"', word) return False engine = ref.get(target_info[u'engine']) if (not engine): log.error(u'Unrecognized target engine "%s" for target expression "%s"', target_info[u'engine'], word) return False engine_args = [target_info[u'pattern']] engine_kwargs = {} if target_info[u'delimiter']: engine_kwargs[u'delimiter'] = target_info[u'delimiter'] results.append(str(getattr(self, u'{0}_match'.format(engine))(*engine_args, **engine_kwargs))) else: results.append(str(self.glob_match(word))) results = u' '.join(results) log.debug(u'compound_match %s ? "%s" => "%s"', self.opts[u'id'], tgt, results) try: return eval(results) except Exception: log.error(u'Invalid compound target: %s for results: %s', tgt, results) return False return False
'This is a compatibility matcher and is NOT called when using nodegroups for remote execution, but is called when the nodegroups matcher is used in states'
def nodegroup_match(self, tgt, nodegroups):
if (tgt in nodegroups): return self.compound_match(salt.utils.minions.nodegroup_comp(tgt, nodegroups)) return False
'Helper function to return the correct type of object'
def _create_minion_object(self, opts, timeout, safe, io_loop=None, loaded_base_name=None, jid_queue=None):
return ProxyMinion(opts, timeout, safe, io_loop=io_loop, loaded_base_name=loaded_base_name, jid_queue=jid_queue)
'Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check Minion._post_master_init to see if those changes need to be propagated. ProxyMinions need a significantly different post master setup, which is why the differences are not factored out into separate helper functions.'
@tornado.gen.coroutine def _post_master_init(self, master):
log.debug(u'subclassed _post_master_init') if self.connected: self.opts[u'master'] = master self.opts[u'pillar'] = (yield salt.pillar.get_async_pillar(self.opts, self.opts[u'grains'], self.opts[u'id'], saltenv=self.opts[u'environment'], pillarenv=self.opts.get(u'pillarenv')).compile_pillar()) if ((u'proxy' not in self.opts[u'pillar']) and (u'proxy' not in self.opts)): errmsg = (((u'No proxy key found in pillar or opts for id ' + self.opts[u'id']) + u'. ') + u'Check your pillar/opts configuration and contents. Salt-proxy aborted.') log.error(errmsg) self._running = False raise SaltSystemExit(code=(-1), msg=errmsg) if (u'proxy' not in self.opts): self.opts[u'proxy'] = self.opts[u'pillar'][u'proxy'] fq_proxyname = self.opts[u'proxy'][u'proxytype'] (self.functions, self.returners, self.function_errors, self.executors) = self._load_modules() self.functions[u'saltutil.sync_all'](saltenv=self.opts[u'environment']) self.utils = salt.loader.utils(self.opts) self.proxy = salt.loader.proxy(self.opts, utils=self.utils) (self.functions, self.returners, self.function_errors, self.executors) = self._load_modules() self.functions.pack[u'__proxy__'] = self.proxy self.proxy.pack[u'__salt__'] = self.functions self.proxy.pack[u'__ret__'] = self.returners self.proxy.pack[u'__pillar__'] = self.opts[u'pillar'] self.utils = salt.loader.utils(self.opts, proxy=self.proxy) self.proxy.pack[u'__utils__'] = self.utils self.proxy.reload_modules() self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager, proxy=self.proxy) if ((u'{0}.init'.format(fq_proxyname) not in self.proxy) or (u'{0}.shutdown'.format(fq_proxyname) not in self.proxy)): errmsg = (u'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + u'Check your proxymodule. Salt-proxy aborted.') log.error(errmsg) self._running = False raise SaltSystemExit(code=(-1), msg=errmsg) proxy_init_fn = self.proxy[(fq_proxyname + u'.init')] proxy_init_fn(self.opts) self.opts[u'grains'] = salt.loader.grains(self.opts, proxy=self.proxy) self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() self.matcher = Matcher(self.opts, self.functions) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.get_uid(user=self.opts.get(u'user', None)) self.proc_dir = get_proc_dir(self.opts[u'cachedir'], uid=uid) if (self.connected and self.opts[u'pillar']): (self.functions, self.returners, self.function_errors, self.executors) = self._load_modules() if hasattr(self, u'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if (not hasattr(self, u'schedule')): self.schedule = salt.utils.schedule.Schedule(self.opts, self.functions, self.returners, cleanup=[master_event(type=u'alive')], proxy=self.proxy) if (self.opts[u'mine_enabled'] and (u'mine.update' in self.functions)): self.schedule.add_job({u'__mine_interval': {u'function': u'mine.update', u'minutes': self.opts[u'mine_interval'], u'jid_include': True, u'maxrunning': 2, u'return_job': self.opts.get(u'mine_return_job', False)}}, persist=True) log.info(u'Added mine.update to scheduler') else: self.schedule.delete_job(u'__mine_interval', persist=True) if ((self.opts[u'transport'] != u'tcp') and (self.opts[u'master_alive_interval'] > 0)): self.schedule.add_job({master_event(type=u'alive', master=self.opts[u'master']): {u'function': u'status.master', u'seconds': self.opts[u'master_alive_interval'], u'jid_include': True, u'maxrunning': 1, u'return_job': False, u'kwargs': {u'master': self.opts[u'master'], u'connected': True}}}, persist=True) if (self.opts[u'master_failback'] and (u'master_list' in self.opts) and (self.opts[u'master'] != self.opts[u'master_list'][0])): self.schedule.add_job({master_event(type=u'failback'): {u'function': u'status.ping_master', u'seconds': self.opts[u'master_failback_interval'], u'jid_include': True, u'maxrunning': 1, u'return_job': False, u'kwargs': {u'master': self.opts[u'master_list'][0]}}}, persist=True) else: self.schedule.delete_job(master_event(type=u'failback'), persist=True) else: self.schedule.delete_job(master_event(type=u'alive', master=self.opts[u'master']), persist=True) self.schedule.delete_job(master_event(type=u'failback'), persist=True) proxy_alive_fn = (fq_proxyname + u'.alive') if ((proxy_alive_fn in self.proxy) and (u'status.proxy_reconnect' in self.functions) and self.opts.get(u'proxy_keep_alive', True)): self.schedule.add_job({u'__proxy_keepalive': {u'function': u'status.proxy_reconnect', u'minutes': self.opts.get(u'proxy_keep_alive_interval', 1), u'jid_include': True, u'maxrunning': 1, u'return_job': False, u'kwargs': {u'proxy_name': fq_proxyname}}}, persist=True) self.schedule.enable_schedule() else: self.schedule.delete_job(u'__proxy_keepalive', persist=True) self.functions[u'saltutil.sync_grains'](saltenv=u'base') self.grains_cache = self.opts[u'grains'] self.ready = True
'Execute salt-cp'
def run(self):
self.parse_args() self.setup_logfile_logger() verify_log(self.config) cp_ = SaltCP(self.config) cp_.run()
'Get a list of all specified files'
def _recurse(self, path):
files = {} empty_dirs = [] try: sub_paths = os.listdir(path) except OSError as exc: if (exc.errno == errno.ENOENT): sys.stderr.write('{0} does not exist\n'.format(path)) sys.exit(42) elif (exc.errno in (errno.EINVAL, errno.ENOTDIR)): files[path] = self._mode(path) else: if (not sub_paths): empty_dirs.append(path) for fn_ in sub_paths: (files_, empty_dirs_) = self._recurse(os.path.join(path, fn_)) files.update(files_) empty_dirs.extend(empty_dirs_) return (files, empty_dirs)
'Make the salt client call'
def run(self):
(files, empty_dirs) = self._list_files() dest = self.opts['dest'] gzip = self.opts['gzip'] tgt = self.opts['tgt'] timeout = self.opts['timeout'] selected_target_option = self.opts.get('selected_target_option') dest_is_dir = (bool(empty_dirs) or (len(files) > 1) or bool(re.search('[\\\\/]$', dest))) reader = (salt.utils.gzip_util.compress_file if gzip else salt.utils.itertools.read_file) minions = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=(selected_target_option or 'glob')) local = salt.client.get_local_client(self.opts['conf_file']) def _get_remote_path(fn_): if (fn_ in self.opts['src']): return (os.path.join(dest, os.path.basename(fn_)) if dest_is_dir else dest) else: for path in self.opts['src']: relpath = os.path.relpath(fn_, (path + os.sep)) if relpath.startswith(parent): continue return os.path.join(dest, os.path.basename(path), relpath) else: log.error('Failed to find remote path for %s', fn_) return None ret = {} parent = ('..' + os.sep) for (fn_, mode) in six.iteritems(files): remote_path = _get_remote_path(fn_) index = 1 failed = {} for chunk in reader(fn_, chunk_size=self.opts['salt_cp_chunk_size']): chunk = base64.b64encode(salt.utils.stringutils.to_bytes(chunk)) append = (index > 1) log.debug("Copying %s to %starget '%s' as %s%s", fn_, ('{0} '.format(selected_target_option) if selected_target_option else ''), tgt, remote_path, (' (chunk #{0})'.format(index) if append else '')) args = [tgt, 'cp.recv', [remote_path, chunk, append, gzip, mode], timeout] if (selected_target_option is not None): args.append(selected_target_option) result = local.cmd(*args) if (not result): msg = 'Publish failed.{0} It may be necessary to decrease salt_cp_chunk_size (current value: {1})'.format((' File partially transferred.' if (index > 1) else ''), self.opts['salt_cp_chunk_size']) for minion in minions: ret.setdefault(minion, {})[remote_path] = msg break for (minion_id, minion_ret) in six.iteritems(result): ret.setdefault(minion_id, {})[remote_path] = minion_ret if ((minion_ret is not True) and (minion_id not in failed)): failed[minion_id] = minion_ret index += 1 for (minion_id, msg) in six.iteritems(failed): ret[minion_id][remote_path] = msg for dirname in empty_dirs: remote_path = _get_remote_path(dirname) log.debug("Creating empty dir %s on %starget '%s'", dirname, ('{0} '.format(selected_target_option) if selected_target_option else ''), tgt) args = [tgt, 'cp.recv', [remote_path, None], timeout] if (selected_target_option is not None): args.append(selected_target_option) for (minion_id, minion_ret) in six.iteritems(local.cmd(*args)): ret.setdefault(minion_id, {})[remote_path] = minion_ret salt.output.display_output(ret, self.opts.get('output', 'nested'), self.opts)
'Return a list of minions to use for the batch run'
def __gather_minions(self):
args = [self.opts['tgt'], 'test.ping', [], self.opts['timeout']] selected_target_option = self.opts.get('selected_target_option', None) if (selected_target_option is not None): args.append(selected_target_option) else: args.append(self.opts.get('tgt_type', 'glob')) self.pub_kwargs['yield_pub_data'] = True ping_gen = self.local.cmd_iter(gather_job_timeout=self.opts['gather_job_timeout'], *args, **self.pub_kwargs) fret = set() nret = set() for ret in ping_gen: if (('minions' and 'jid') in ret): for minion in ret['minions']: nret.add(minion) continue else: try: m = next(six.iterkeys(ret)) except StopIteration: if (not self.quiet): salt.utils.print_cli('No minions matched the target.') break if (m is not None): fret.add(m) return (list(fret), ping_gen, nret.difference(fret))
'Return the active number of minions to maintain'
def get_bnum(self):
partition = (lambda x: ((float(x) / 100.0) * len(self.minions))) try: if ('%' in self.opts['batch']): res = partition(float(self.opts['batch'].strip('%'))) if (res < 1): return int(math.ceil(res)) else: return int(res) else: return int(self.opts['batch']) except ValueError: if (not self.quiet): salt.utils.print_cli('Invalid batch data sent: {0}\nData must be in the form of %10, 10% or 3'.format(self.opts['batch']))
'Execute the batch run'
def run(self):
args = [[], self.opts['fun'], self.opts['arg'], self.opts['timeout'], 'list'] bnum = self.get_bnum() if (not self.minions): return to_run = copy.deepcopy(self.minions) active = [] ret = {} iters = [] bwait = self.opts.get('batch_wait', 0) wait = [] if self.options: show_jid = self.options.show_jid show_verbose = self.options.verbose else: show_jid = False show_verbose = False minion_tracker = {} for down_minion in self.down_minions: salt.utils.print_cli('Minion {0} did not respond. No job will be sent.'.format(down_minion)) while (len(ret) < len(self.minions)): next_ = [] if (bwait and wait): self.__update_wait(wait) if ((len(to_run) <= (bnum - len(wait))) and (not active)): while to_run: next_.append(to_run.pop()) else: for i in range(((bnum - len(active)) - len(wait))): if to_run: minion_id = to_run.pop() if isinstance(minion_id, dict): next_.append(minion_id.keys()[0]) else: next_.append(minion_id) active += next_ args[0] = next_ if next_: if (not self.quiet): salt.utils.print_cli('\nExecuting run on {0}\n'.format(sorted(next_))) new_iter = self.local.cmd_iter_no_block(raw=self.opts.get('raw', False), ret=self.opts.get('return', ''), show_jid=show_jid, verbose=show_verbose, gather_job_timeout=self.opts['gather_job_timeout'], *args, **self.eauth) iters.append(new_iter) minion_tracker[new_iter] = {} minion_tracker[new_iter]['minions'] = next_ minion_tracker[new_iter]['active'] = True else: time.sleep(0.02) parts = {} for ping_ret in self.ping_gen: if (ping_ret is None): break m = next(six.iterkeys(ping_ret)) if (m not in self.minions): self.minions.append(m) to_run.append(m) for queue in iters: try: ncnt = 0 while True: part = next(queue) if (part is None): time.sleep(0.01) ncnt += 1 if (ncnt > 5): break continue if self.opts.get('raw'): parts.update({part['data']['id']: part}) if (part['data']['id'] in minion_tracker[queue]['minions']): minion_tracker[queue]['minions'].remove(part['data']['id']) else: salt.utils.print_cli('minion {0} was already deleted from tracker, probably a duplicate key'.format(part['id'])) else: parts.update(part) for id in part: if (id in minion_tracker[queue]['minions']): minion_tracker[queue]['minions'].remove(id) else: salt.utils.print_cli('minion {0} was already deleted from tracker, probably a duplicate key'.format(id)) except StopIteration: if (queue in minion_tracker): minion_tracker[queue]['active'] = False for minion in minion_tracker[queue]['minions']: if (minion not in parts): parts[minion] = {} parts[minion]['ret'] = {} for (minion, data) in six.iteritems(parts): if (minion in active): active.remove(minion) if bwait: wait.append((datetime.now() + timedelta(seconds=bwait))) failhard = False if (('retcode' in data) and isinstance(data['ret'], dict) and ('retcode' not in data['ret'])): data['ret']['retcode'] = data['retcode'] if (self.opts.get('failhard') and (data['ret']['retcode'] > 0)): failhard = True if self.opts.get('raw'): ret[minion] = data (yield data) else: ret[minion] = data['ret'] (yield {minion: data['ret']}) if (not self.quiet): ret[minion] = data['ret'] data[minion] = data.pop('ret') if ('out' in data): out = data.pop('out') else: out = None salt.output.display_output(data, out, self.opts) if failhard: log.error('ERROR: Minion {} returned with non-zero exit code. Batch run stopped due to failhard'.format(minion)) raise StopIteration for queue in minion_tracker: if ((not minion_tracker[queue]['active']) and (queue in iters)): iters.remove(queue) for minion in minion_tracker[queue]['minions']: if (minion in active): active.remove(minion) if bwait: wait.append((datetime.now() + timedelta(seconds=bwait)))
'Execute salt-run'
def run(self):
import salt.runner self.parse_args() self.setup_logfile_logger() verify_log(self.config) profiling_enabled = self.options.profiling_enabled runner = salt.runner.Runner(self.config) if self.options.doc: runner.print_docs() self.exit(salt.defaults.exitcodes.EX_OK) try: if check_user(self.config['user']): pr = salt.utils.activate_profile(profiling_enabled) try: ret = runner.run() if (isinstance(ret, dict) and ('retcode' in ret)): self.exit(ret['retcode']) elif (isinstance(ret, dict) and ('retcode' in ret.get('data', {}))): self.exit(ret['data']['retcode']) finally: salt.utils.output_profile(pr, stats_path=self.options.profiling_path, stop=True) except SaltClientError as exc: raise SystemExit(str(exc))
'Execute the salt command line'
def run(self):
import salt.client self.parse_args() self.setup_logfile_logger() verify_log(self.config) try: skip_perm_errors = (self.options.eauth != '') self.local_client = salt.client.get_local_client(self.get_config_file_path(), skip_perm_errors=skip_perm_errors, auto_reconnect=True) except SaltClientError as exc: self.exit(2, '{0}\n'.format(exc)) return if (self.options.batch or self.options.static): self._run_batch() return if self.options.preview_target: minion_list = self._preview_target() self._output_ret(minion_list, self.config.get('output', 'nested')) return if (self.options.timeout <= 0): self.options.timeout = self.local_client.opts['timeout'] kwargs = {'tgt': self.config['tgt'], 'fun': self.config['fun'], 'arg': self.config['arg'], 'timeout': self.options.timeout, 'show_timeout': self.options.show_timeout, 'show_jid': self.options.show_jid} if ('token' in self.config): import salt.utils.files try: with salt.utils.files.fopen(os.path.join(self.config['cachedir'], '.root_key'), 'r') as fp_: kwargs['key'] = fp_.readline() except IOError: kwargs['token'] = self.config['token'] kwargs['delimiter'] = self.options.delimiter if self.selected_target_option: kwargs['tgt_type'] = self.selected_target_option else: kwargs['tgt_type'] = 'glob' if (self.options.batch_safe_limit > 1): if (len(self._preview_target()) >= self.options.batch_safe_limit): salt.utils.print_cli('\nNOTICE: Too many minions targeted, switching to batch execution.') self.options.batch = self.options.batch_safe_size self._run_batch() return if getattr(self.options, 'return'): kwargs['ret'] = getattr(self.options, 'return') if getattr(self.options, 'return_config'): kwargs['ret_config'] = getattr(self.options, 'return_config') if getattr(self.options, 'return_kwargs'): kwargs['ret_kwargs'] = yamlify_arg(getattr(self.options, 'return_kwargs')) if getattr(self.options, 'module_executors'): kwargs['module_executors'] = yamlify_arg(getattr(self.options, 'module_executors')) if getattr(self.options, 'executor_opts'): kwargs['executor_opts'] = yamlify_arg(getattr(self.options, 'executor_opts')) if getattr(self.options, 'metadata'): kwargs['metadata'] = yamlify_arg(getattr(self.options, 'metadata')) if (('token' not in kwargs) and ('key' not in kwargs) and self.options.eauth): import salt.auth resolver = salt.auth.Resolver(self.config) res = resolver.cli(self.options.eauth) if (self.options.mktoken and res): tok = resolver.token_cli(self.options.eauth, res) if tok: kwargs['token'] = tok.get('token', '') if (not res): sys.stderr.write('ERROR: Authentication failed\n') sys.exit(2) kwargs.update(res) kwargs['eauth'] = self.options.eauth if self.config['async']: jid = self.local_client.cmd_async(**kwargs) salt.utils.print_cli('Executed command with job ID: {0}'.format(jid)) return if (not self.local_client): return retcodes = [] errors = [] try: if self.options.subset: cmd_func = self.local_client.cmd_subset kwargs['sub'] = self.options.subset kwargs['cli'] = True else: cmd_func = self.local_client.cmd_cli if self.options.progress: kwargs['progress'] = True self.config['progress'] = True ret = {} for progress in cmd_func(**kwargs): out = 'progress' try: self._progress_ret(progress, out) except salt.exceptions.LoaderError as exc: raise salt.exceptions.SaltSystemExit(exc) if ('return_count' not in progress): ret.update(progress) self._progress_end(out) self._print_returns_summary(ret) elif (self.config['fun'] == 'sys.doc'): ret = {} out = '' for full_ret in self.local_client.cmd_cli(**kwargs): (ret_, out, retcode) = self._format_ret(full_ret) ret.update(ret_) self._output_ret(ret, out) else: if self.options.verbose: kwargs['verbose'] = True ret = {} for full_ret in cmd_func(**kwargs): try: (ret_, out, retcode) = self._format_ret(full_ret) retcodes.append(retcode) self._output_ret(ret_, out) ret.update(full_ret) except KeyError: errors.append(full_ret) if (self.config['cli_summary'] is True): if (self.config['fun'] != 'sys.doc'): if (self.options.output is None): self._print_returns_summary(ret) self._print_errors_summary(errors) if (retcodes.count(0) < len(retcodes)): sys.stderr.write('ERROR: Minions returned with non-zero exit code\n') sys.exit(11) except (SaltInvocationError, EauthAuthenticationError, SaltClientError) as exc: ret = str(exc) self._output_ret(ret, '')
'Return a list of minions from a given target'
def _preview_target(self):
return self.local_client.gather_minions(self.config['tgt'], (self.selected_target_option or 'glob'))
'Display returns summary'
def _print_returns_summary(self, ret):
return_counter = 0 not_return_counter = 0 not_return_minions = [] not_response_minions = [] not_connected_minions = [] failed_minions = [] for each_minion in ret: minion_ret = ret[each_minion] if (isinstance(minion_ret, dict) and ('ret' in minion_ret)): minion_ret = ret[each_minion].get('ret') if (isinstance(minion_ret, six.string_types) and minion_ret.startswith('Minion did not return')): if ('Not connected' in minion_ret): not_connected_minions.append(each_minion) elif ('No response' in minion_ret): not_response_minions.append(each_minion) not_return_counter += 1 not_return_minions.append(each_minion) else: return_counter += 1 if self._get_retcode(ret[each_minion]): failed_minions.append(each_minion) salt.utils.print_cli('\n') salt.utils.print_cli('-------------------------------------------') salt.utils.print_cli('Summary') salt.utils.print_cli('-------------------------------------------') salt.utils.print_cli('# of minions targeted: {0}'.format((return_counter + not_return_counter))) salt.utils.print_cli('# of minions returned: {0}'.format(return_counter)) salt.utils.print_cli('# of minions that did not return: {0}'.format(not_return_counter)) salt.utils.print_cli('# of minions with errors: {0}'.format(len(failed_minions))) if self.options.verbose: if not_connected_minions: salt.utils.print_cli('Minions not connected: {0}'.format(' '.join(not_connected_minions))) if not_response_minions: salt.utils.print_cli('Minions not responding: {0}'.format(' '.join(not_response_minions))) if failed_minions: salt.utils.print_cli('Minions with failures: {0}'.format(' '.join(failed_minions))) salt.utils.print_cli('-------------------------------------------')
'Print progress events'
def _progress_ret(self, progress, out):
import salt.output if (not hasattr(self, 'progress_bar')): try: self.progress_bar = salt.output.get_progress(self.config, out, progress) except Exception as exc: raise salt.exceptions.LoaderError('\nWARNING: Install the `progressbar` python package. Requested job was still run but output cannot be displayed.\n') salt.output.update_progress(self.config, progress, self.progress_bar, out)
'Print the output from a single return to the terminal'
def _output_ret(self, ret, out):
import salt.output if ((self.config['fun'] == 'sys.doc') and (not isinstance(ret, Exception))): self._print_docs(ret) else: salt.output.display_output(ret, out, self.config) if (not ret): sys.stderr.write('ERROR: No return received\n') sys.exit(2)
'Take the full return data and format it to simple output'
def _format_ret(self, full_ret):
ret = {} out = '' retcode = 0 for (key, data) in six.iteritems(full_ret): ret[key] = data['ret'] if ('out' in data): out = data['out'] ret_retcode = self._get_retcode(data) if (ret_retcode > retcode): retcode = ret_retcode return (ret, out, retcode)
'Determine a retcode for a given return'
def _get_retcode(self, ret):
retcode = 0 if (isinstance(ret, dict) and (ret.get('retcode', 0) != 0)): return ret['retcode'] elif (isinstance(ret, bool) and (not ret)): return 1 return retcode
'Print out the docstrings for all of the functions on the minions'
def _print_docs(self, ret):
import salt.output docs = {} if (not ret): self.exit(2, 'No minions found to gather docs from\n') if isinstance(ret, six.string_types): self.exit(2, '{0}\n'.format(ret)) for host in ret: if (isinstance(ret[host], six.string_types) and (ret[host].startswith('Minion did not return') or (ret[host] == 'VALUE TRIMMED'))): continue for fun in ret[host]: if ((fun not in docs) and ret[host][fun]): docs[fun] = ret[host][fun] if self.options.output: for fun in sorted(docs): salt.output.display_output({fun: docs[fun]}, 'nested', self.config) else: for fun in sorted(docs): salt.utils.print_cli('{0}:'.format(fun)) salt.utils.print_cli(docs[fun]) salt.utils.print_cli('')
'Execute salt-key'
def run(self):
import salt.key self.parse_args() multi = False if (self.config.get('zmq_behavior') and (self.config.get('transport') == 'raet')): multi = True self.setup_logfile_logger() verify_log(self.config) if multi: key = salt.key.MultiKeyCLI(self.config) else: key = salt.key.KeyCLI(self.config) if check_user(self.config['user']): key.run()
'Verify and display a nag-messsage to the log if vulnerable hash-type is used. :return:'
def verify_hash_type(self):
if (self.config['hash_type'].lower() in ['md5', 'sha1']): log.warning('IMPORTANT: Do not use {h_type} hashing algorithm! Please set "hash_type" to sha256 in Salt {d_name} config!'.format(h_type=self.config['hash_type'], d_name=self.__class__.__name__))
'Say daemon starting. :param action :return:'
def action_log_info(self, action):
log.info('{action} the Salt {d_name}'.format(d_name=self.__class__.__name__, action=action))
'Say daemon starting. :return:'
def start_log_info(self):
log.info('The Salt {d_name} is starting up'.format(d_name=self.__class__.__name__))
'Say daemon shutting down. :return:'
def shutdown_log_info(self):
log.info('The Salt {d_name} is shut down'.format(d_name=self.__class__.__name__))
'Log environment failure for the daemon and exit with the error code. :param error: :return:'
def environment_failure(self, error):
log.exception('Failed to create environment for {d_name}: {reason}'.format(d_name=self.__class__.__name__, reason=get_error_message(error))) self.shutdown(error)
'Run the preparation sequence required to start a salt master server. If sub-classed, don\'t **ever** forget to run: super(YourSubClass, self).prepare()'
def prepare(self):
super(Master, self).prepare() try: if self.config['verify_env']: v_dirs = [self.config['pki_dir'], os.path.join(self.config['pki_dir'], 'minions'), os.path.join(self.config['pki_dir'], 'minions_pre'), os.path.join(self.config['pki_dir'], 'minions_denied'), os.path.join(self.config['pki_dir'], 'minions_autosign'), os.path.join(self.config['pki_dir'], 'minions_rejected'), self.config['cachedir'], os.path.join(self.config['cachedir'], 'jobs'), os.path.join(self.config['cachedir'], 'proc'), self.config['sock_dir'], self.config['token_dir'], self.config['syndic_dir'], self.config['sqlite_queue_dir']] if (self.config.get('transport') == 'raet'): v_dirs.append(os.path.join(self.config['pki_dir'], 'accepted')) v_dirs.append(os.path.join(self.config['pki_dir'], 'pending')) v_dirs.append(os.path.join(self.config['pki_dir'], 'rejected')) v_dirs.append(os.path.join(self.config['cachedir'], 'raet')) verify_env(v_dirs, self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir']) for syndic_file in os.listdir(self.config['syndic_dir']): os.remove(os.path.join(self.config['syndic_dir'], syndic_file)) except OSError as error: self.environment_failure(error) self.setup_logfile_logger() verify_log(self.config) self.action_log_info('Setting up') if (self.config['transport'].lower() in ('zeromq', 'tcp')): if (not verify_socket(self.config['interface'], self.config['publish_port'], self.config['ret_port'])): self.shutdown(4, 'The ports are not available to bind') self.config['interface'] = ip_bracket(self.config['interface']) migrations.migrate_paths(self.config) import salt.master self.master = salt.master.Master(self.config) else: import salt.daemons.flo self.master = salt.daemons.flo.IofloMaster(self.config) self.daemonize_if_required() self.set_pidfile() salt.utils.process.notify_systemd()
'Start the actual master. If sub-classed, don\'t **ever** forget to run: super(YourSubClass, self).start() NOTE: Run any required code before calling `super()`.'
def start(self):
super(Master, self).start() if check_user(self.config['user']): self.action_log_info('Starting up') self.verify_hash_type() self.master.start()
'If sub-classed, run any shutdown operations on this method.'
def shutdown(self, exitcode=0, exitmsg=None):
self.shutdown_log_info() msg = 'The salt master is shutdown. ' if (exitmsg is not None): exitmsg = (msg + exitmsg) else: exitmsg = msg.strip() super(Master, self).shutdown(exitcode, exitmsg)
'Run the preparation sequence required to start a salt minion. If sub-classed, don\'t **ever** forget to run: super(YourSubClass, self).prepare()'
def prepare(self):
super(Minion, self).prepare() try: if self.config['verify_env']: confd = self.config.get('default_include') if confd: if ('*' in confd): confd = os.path.dirname(confd) if (not os.path.isabs(confd)): confd = os.path.join(os.path.dirname(self.config['conf_file']), confd) else: confd = os.path.join(os.path.dirname(self.config['conf_file']), 'minion.d') v_dirs = [self.config['pki_dir'], self.config['cachedir'], self.config['sock_dir'], self.config['extension_modules'], confd] if (self.config.get('transport') == 'raet'): v_dirs.append(os.path.join(self.config['pki_dir'], 'accepted')) v_dirs.append(os.path.join(self.config['pki_dir'], 'pending')) v_dirs.append(os.path.join(self.config['pki_dir'], 'rejected')) v_dirs.append(os.path.join(self.config['cachedir'], 'raet')) verify_env(v_dirs, self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir']) except OSError as error: self.environment_failure(error) self.setup_logfile_logger() verify_log(self.config) log.info('Setting up the Salt Minion "{0}"'.format(self.config['id'])) migrations.migrate_paths(self.config) if self.check_running(): self.action_log_info('An instance is already running. Exiting') self.shutdown(1) transport = self.config.get('transport').lower() if (transport in ('zeromq', 'tcp', 'detect')): import salt.minion self.daemonize_if_required() self.set_pidfile() if (self.config.get('master_type') == 'func'): salt.minion.eval_master_func(self.config) self.minion = salt.minion.MinionManager(self.config) elif (transport == 'raet'): import salt.daemons.flo self.daemonize_if_required() self.set_pidfile() self.minion = salt.daemons.flo.IofloMinion(self.config) else: log.error("The transport '{0}' is not supported. Please use one of the following: tcp, raet, or zeromq.".format(transport)) self.shutdown(1)
'Start the actual minion. If sub-classed, don\'t **ever** forget to run: super(YourSubClass, self).start() NOTE: Run any required code before calling `super()`.'
def start(self):
super(Minion, self).start() try: if check_user(self.config['user']): self.action_log_info('Starting up') self.verify_hash_type() self.minion.tune_in() if self.minion.restart: raise SaltClientError('Minion could not connect to Master') except (KeyboardInterrupt, SaltSystemExit) as error: self.action_log_info('Stopping') if isinstance(error, KeyboardInterrupt): log.warning('Exiting on Ctrl-c') self.shutdown() else: log.error(str(error)) self.shutdown(error.code)
'Start the actual minion as a caller minion. cleanup_protecteds is list of yard host addresses that should not be cleaned up this is to fix race condition when salt-caller minion starts up If sub-classed, don\'t **ever** forget to run: super(YourSubClass, self).start() NOTE: Run any required code before calling `super()`.'
def call(self, cleanup_protecteds):
try: self.prepare() if check_user(self.config['user']): self.minion.opts['__role'] = kinds.APPL_KIND_NAMES[kinds.applKinds.caller] self.minion.opts['raet_cleanup_protecteds'] = cleanup_protecteds self.minion.call_in() except (KeyboardInterrupt, SaltSystemExit) as exc: self.action_log_info('Stopping') if isinstance(exc, KeyboardInterrupt): log.warning('Exiting on Ctrl-c') self.shutdown() else: log.error(str(exc)) self.shutdown(exc.code)
'If sub-classed, run any shutdown operations on this method. :param exitcode :param exitmsg'
def shutdown(self, exitcode=0, exitmsg=None):
self.action_log_info('Shutting down') if hasattr(self, 'minion'): self.minion.destroy() super(Minion, self).shutdown(exitcode, 'The Salt {0} is shutdown. {1}'.format(self.__class__.__name__, (exitmsg or '')).strip())
'Run the preparation sequence required to start a salt proxy minion. If sub-classed, don\'t **ever** forget to run: super(YourSubClass, self).prepare()'
def prepare(self):
super(ProxyMinion, self).prepare() if (not self.values.proxyid): self.error('salt-proxy requires --proxyid') try: if self.config['verify_env']: confd = self.config.get('default_include') if confd: if ('*' in confd): confd = os.path.dirname(confd) if (not os.path.isabs(confd)): confd = os.path.join(os.path.dirname(self.config['conf_file']), confd) else: confd = os.path.join(os.path.dirname(self.config['conf_file']), 'proxy.d') v_dirs = [self.config['pki_dir'], self.config['cachedir'], self.config['sock_dir'], self.config['extension_modules'], confd] if (self.config.get('transport') == 'raet'): v_dirs.append(os.path.join(self.config['pki_dir'], 'accepted')) v_dirs.append(os.path.join(self.config['pki_dir'], 'pending')) v_dirs.append(os.path.join(self.config['pki_dir'], 'rejected')) v_dirs.append(os.path.join(self.config['cachedir'], 'raet')) verify_env(v_dirs, self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir']) except OSError as error: self.environment_failure(error) self.setup_logfile_logger() verify_log(self.config) self.action_log_info('Setting up "{0}"'.format(self.config['id'])) migrations.migrate_paths(self.config) if self.check_running(): self.action_log_info('An instance is already running. Exiting') self.shutdown(1) if (self.config['transport'].lower() in ('zeromq', 'tcp', 'detect')): import salt.minion self.daemonize_if_required() self.set_pidfile() if (self.config.get('master_type') == 'func'): salt.minion.eval_master_func(self.config) self.minion = salt.minion.ProxyMinionManager(self.config) else: import salt.daemons.flo self.daemonize_if_required() self.set_pidfile() self.minion = salt.daemons.flo.IofloMinion(self.config)
'Start the actual proxy minion. If sub-classed, don\'t **ever** forget to run: super(YourSubClass, self).start() NOTE: Run any required code before calling `super()`.'
def start(self):
super(ProxyMinion, self).start() try: if check_user(self.config['user']): self.action_log_info('The Proxy Minion is starting up') self.verify_hash_type() self.minion.tune_in() if self.minion.restart: raise SaltClientError('Proxy Minion could not connect to Master') except (KeyboardInterrupt, SaltSystemExit) as exc: self.action_log_info('Proxy Minion Stopping') if isinstance(exc, KeyboardInterrupt): log.warning('Exiting on Ctrl-c') self.shutdown() else: log.error(str(exc)) self.shutdown(exc.code)
'If sub-classed, run any shutdown operations on this method. :param exitcode :param exitmsg'
def shutdown(self, exitcode=0, exitmsg=None):
if (hasattr(self, 'minion') and ('proxymodule' in self.minion.opts)): proxy_fn = (self.minion.opts['proxymodule'].loaded_base_name + '.shutdown') self.minion.opts['proxymodule'][proxy_fn](self.minion.opts) self.action_log_info('Shutting down') super(ProxyMinion, self).shutdown(exitcode, 'The Salt {0} is shutdown. {1}'.format(self.__class__.__name__, (exitmsg or '')).strip())
'Run the preparation sequence required to start a salt syndic minion. If sub-classed, don\'t **ever** forget to run: super(YourSubClass, self).prepare()'
def prepare(self):
super(Syndic, self).prepare() try: if self.config['verify_env']: verify_env([self.config['pki_dir'], self.config['cachedir'], self.config['sock_dir'], self.config['extension_modules']], self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir']) except OSError as error: self.environment_failure(error) self.setup_logfile_logger() verify_log(self.config) self.action_log_info('Setting up "{0}"'.format(self.config['id'])) import salt.minion self.daemonize_if_required() self.syndic = salt.minion.SyndicManager(self.config) self.set_pidfile()
'Start the actual syndic. If sub-classed, don\'t **ever** forget to run: super(YourSubClass, self).start() NOTE: Run any required code before calling `super()`.'
def start(self):
super(Syndic, self).start() if check_user(self.config['user']): self.action_log_info('Starting up') self.verify_hash_type() try: self.syndic.tune_in() except KeyboardInterrupt: self.action_log_info('Stopping') self.shutdown()
'If sub-classed, run any shutdown operations on this method. :param exitcode :param exitmsg'
def shutdown(self, exitcode=0, exitmsg=None):
self.action_log_info('Shutting down') super(Syndic, self).shutdown(exitcode, 'The Salt {0} is shutdown. {1}'.format(self.__class__.__name__, (exitmsg or '')).strip())
'Run the preparation sequence required to start a salt-api daemon. If sub-classed, don\'t **ever** forget to run: super(YourSubClass, self).prepare()'
def prepare(self):
super(SaltAPI, self).prepare() try: if self.config['verify_env']: logfile = self.config['log_file'] if ((logfile is not None) and (not logfile.startswith(('tcp://', 'udp://', 'file://')))): current_umask = os.umask(23) verify_files([logfile], self.config['user']) os.umask(current_umask) except OSError as err: log.exception('Failed to prepare salt environment') self.shutdown(err.errno) self.setup_logfile_logger() verify_log(self.config) log.info('Setting up the Salt API') self.api = salt.client.netapi.NetapiClient(self.config) self.daemonize_if_required() self.set_pidfile()
'Start the actual master. If sub-classed, don\'t **ever** forget to run: super(YourSubClass, self).start() NOTE: Run any required code before calling `super()`.'
def start(self):
super(SaltAPI, self).start() if check_user(self.config['user']): log.info('The salt-api is starting up') self.api.run()
'If sub-classed, run any shutdown operations on this method.'
def shutdown(self, exitcode=0, exitmsg=None):
log.info('The salt-api is shutting down..') msg = 'The salt-api is shutdown. ' if (exitmsg is not None): exitmsg = (msg + exitmsg) else: exitmsg = msg.strip() super(SaltAPI, self).shutdown(exitcode, exitmsg)
'Run the api'
def run(self):
ui = salt.spm.SPMCmdlineInterface() self.parse_args() self.setup_logfile_logger() verify_log(self.config) client = salt.spm.SPMClient(ui, self.config) client.run(self.args)
'Execute the salt call!'
def run(self):
self.parse_args() if self.options.file_root: file_root = os.path.abspath(self.options.file_root) self.config['file_roots'] = {'base': _expand_glob_path([file_root])} if self.options.pillar_root: pillar_root = os.path.abspath(self.options.pillar_root) self.config['pillar_roots'] = {'base': _expand_glob_path([pillar_root])} if self.options.states_dir: states_dir = os.path.abspath(self.options.states_dir) self.config['states_dirs'] = [states_dir] if self.options.local: self.config['file_client'] = 'local' if self.options.master: self.config['master'] = self.options.master self.setup_logfile_logger() verify_log(self.config) caller = salt.cli.caller.Caller.factory(self.config) if self.options.doc: caller.print_docs() self.exit(salt.defaults.exitcodes.EX_OK) if self.options.grains_run: caller.print_grains() self.exit(salt.defaults.exitcodes.EX_OK) caller.run()
'Pass in command line opts'
def __init__(self, opts):
self.opts = opts self.opts['caller'] = True self.serial = salt.payload.Serial(self.opts) try: self.minion = salt.minion.SMinion(opts) except SaltClientError as exc: raise SystemExit(str(exc))
'Pick up the documentation for all of the modules and print it out.'
def print_docs(self):
docs = {} for (name, func) in six.iteritems(self.minion.functions): if (name not in docs): if func.__doc__: docs[name] = func.__doc__ for name in sorted(docs): if name.startswith(self.opts.get('fun', '')): salt.utils.print_cli('{0}:\n{1}\n'.format(name, docs[name]))
'Print out the grains'
def print_grains(self):
grains = salt.loader.grains(self.opts) salt.output.display_output({'local': grains}, 'grains', self.opts)
'Execute the salt call logic'
def run(self):
profiling_enabled = self.opts.get('profiling_enabled', False) try: pr = salt.utils.activate_profile(profiling_enabled) try: ret = self.call() finally: salt.utils.output_profile(pr, stats_path=self.opts.get('profiling_path', '/tmp/stats'), stop=True) out = ret.get('out', 'nested') if self.opts['print_metadata']: print_ret = ret out = 'nested' else: print_ret = ret.get('return', {}) salt.output.display_output({'local': print_ret}, out, self.opts) if self.opts.get('retcode_passthrough', False): sys.exit(ret['retcode']) except SaltInvocationError as err: raise SystemExit(err)
'Call the module'
def call(self):
ret = {} fun = self.opts['fun'] ret['jid'] = salt.utils.jid.gen_jid() proc_fn = os.path.join(salt.minion.get_proc_dir(self.opts['cachedir']), ret['jid']) if (fun not in self.minion.functions): docs = self.minion.functions['sys.doc']('{0}*'.format(fun)) if docs: docs[fun] = self.minion.functions.missing_fun_string(fun) ret['out'] = 'nested' ret['return'] = docs return ret sys.stderr.write(self.minion.functions.missing_fun_string(fun)) mod_name = fun.split('.')[0] if (mod_name in self.minion.function_errors): sys.stderr.write(' Possible reasons: {0}\n'.format(self.minion.function_errors[mod_name])) else: sys.stderr.write('\n') sys.exit((-1)) metadata = self.opts.get('metadata') if (metadata is not None): metadata = salt.utils.args.yamlify_arg(metadata) try: sdata = {'fun': fun, 'pid': os.getpid(), 'jid': ret['jid'], 'tgt': 'salt-call'} if (metadata is not None): sdata['metadata'] = metadata (args, kwargs) = salt.minion.load_args_and_kwargs(self.minion.functions[fun], salt.utils.args.parse_input(self.opts['arg'], no_parse=self.opts.get('no_parse', [])), data=sdata) try: with salt.utils.files.fopen(proc_fn, 'w+b') as fp_: fp_.write(self.serial.dumps(sdata)) except NameError: pass except IOError: sys.stderr.write('Cannot write to process directory. Do you have permissions to write to {0} ?\n'.format(proc_fn)) func = self.minion.functions[fun] try: ret['return'] = func(*args, **kwargs) except TypeError as exc: sys.stderr.write('\nPassed invalid arguments: {0}.\n\nUsage:\n'.format(exc)) salt.utils.print_cli(func.__doc__) active_level = LOG_LEVELS.get(self.opts['log_level'].lower(), logging.ERROR) if (active_level <= logging.DEBUG): trace = traceback.format_exc() sys.stderr.write(trace) sys.exit(salt.defaults.exitcodes.EX_GENERIC) try: ret['retcode'] = sys.modules[func.__module__].__context__.get('retcode', 0) except AttributeError: ret['retcode'] = 1 except CommandExecutionError as exc: msg = "Error running '{0}': {1}\n" active_level = LOG_LEVELS.get(self.opts['log_level'].lower(), logging.ERROR) if (active_level <= logging.DEBUG): sys.stderr.write(traceback.format_exc()) sys.stderr.write(msg.format(fun, str(exc))) sys.exit(salt.defaults.exitcodes.EX_GENERIC) except CommandNotFoundError as exc: msg = "Command required for '{0}' not found: {1}\n" sys.stderr.write(msg.format(fun, str(exc))) sys.exit(salt.defaults.exitcodes.EX_GENERIC) try: os.remove(proc_fn) except (IOError, OSError): pass if hasattr(self.minion.functions[fun], '__outputter__'): oput = self.minion.functions[fun].__outputter__ if isinstance(oput, six.string_types): ret['out'] = oput is_local = (self.opts['local'] or (self.opts.get('file_client', False) == 'local') or (self.opts.get('master_type') == 'disable')) returners = self.opts.get('return', '').split(',') if ((not is_local) or returners): ret['id'] = self.opts['id'] ret['fun'] = fun ret['fun_args'] = self.opts['arg'] if (metadata is not None): ret['metadata'] = metadata for returner in returners: if (not returner): continue try: ret['success'] = True self.minion.returners['{0}.returner'.format(returner)](ret) except Exception: pass if (not is_local): try: mret = ret.copy() mret['jid'] = 'req' self.return_pub(mret) except Exception: pass elif self.opts['cache_jobs']: salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) return ret
'Pass in the command line options'
def __init__(self, opts):
super(ZeroMQCaller, self).__init__(opts)
'Return the data up to the master'
def return_pub(self, ret):
channel = salt.transport.Channel.factory(self.opts, usage='salt_call') load = {'cmd': '_return', 'id': self.opts['id']} for (key, value) in six.iteritems(ret): load[key] = value channel.send(load)
'Pass in the command line options'
def __init__(self, opts):
self.process = None if (not opts['local']): self.stack = self._setup_caller_stack(opts) salt.transport.jobber_stack = self.stack if (opts.get('__role') == kinds.APPL_KIND_NAMES[kinds.applKinds.caller]): self.process = MultiprocessingProcess(target=raet_minion_run, kwargs={'cleanup_protecteds': [self.stack.ha]}) self.process.start() self._wait_caller(opts) super(RAETCaller, self).__init__(opts)
'Execute the salt call logic'
def run(self):
try: ret = self.call() if (not self.opts['local']): self.stack.server.close() salt.transport.jobber_stack = None if self.opts['print_metadata']: print_ret = ret else: print_ret = ret.get('return', {}) if self.process: self.process.terminate() salt.output.display_output({'local': print_ret}, ret.get('out', 'nested'), self.opts) if self.opts.get('retcode_passthrough', False): sys.exit(ret['retcode']) except SaltInvocationError as err: raise SystemExit(err)
'Setup and return the LaneStack and Yard used by by channel when global not already setup such as in salt-call to communicate to-from the minion'
def _setup_caller_stack(self, opts):
role = opts.get('id') if (not role): emsg = 'Missing role required to setup RAETChannel.' log.error((emsg + '\n')) raise ValueError(emsg) kind = opts.get('__role') if (kind not in kinds.APPL_KINDS): emsg = "Invalid application kind = '{0}' for RAETChannel.".format(kind) log.error((emsg + '\n')) raise ValueError(emsg) if (kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.minion], kinds.APPL_KIND_NAMES[kinds.applKinds.caller]]): lanename = '{0}_{1}'.format(role, kind) else: emsg = "Unsupported application kind '{0}' for RAETChannel.".format(kind) log.error((emsg + '\n')) raise ValueError(emsg) sockdirpath = opts['sock_dir'] stackname = ('caller' + nacling.uuid(size=18)) stack = LaneStack(name=stackname, lanename=lanename, sockdirpath=sockdirpath) stack.Pk = raeting.PackKind.pack.value stack.addRemote(RemoteYard(stack=stack, name='manor', lanename=lanename, dirpath=sockdirpath)) log.debug('Created Caller Jobber Stack {0}\n'.format(stack.name)) return stack
'Returns when RAET Minion Yard is available'
def _wait_caller(self, opts):
yardname = 'manor' dirpath = opts['sock_dir'] role = opts.get('id') if (not role): emsg = 'Missing role required to setup RAET SaltCaller.' log.error((emsg + '\n')) raise ValueError(emsg) kind = opts.get('__role') if (kind not in kinds.APPL_KINDS): emsg = "Invalid application kind = '{0}' for RAET SaltCaller.".format(kind) log.error((emsg + '\n')) raise ValueError(emsg) if (kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.minion], kinds.APPL_KIND_NAMES[kinds.applKinds.caller]]): lanename = '{0}_{1}'.format(role, kind) else: emsg = "Unsupported application kind '{0}' for RAET SaltCaller.".format(kind) log.error((emsg + '\n')) raise ValueError(emsg) (ha, dirpath) = Yard.computeHa(dirpath, lanename, yardname) if is_windows(): exists = False while (not exists): try: f = win32file.CreateFile(ha, (win32file.GENERIC_WRITE | win32file.GENERIC_READ), win32file.FILE_SHARE_READ, None, win32file.OPEN_EXISTING, 0, None) win32file.CloseHandle(f) exists = True except win32file.error: time.sleep(0.1) else: while (not (os.path.exists(ha) and (not os.path.isfile(ha)) and (not os.path.isdir(ha)))): time.sleep(0.1) time.sleep(0.5)
'Format the low data for RunnerClient()\'s master_call() function This also normalizes the following low data formats to a single, common low data structure. Old-style low: ``{\'fun\': \'jobs.lookup_jid\', \'jid\': \'1234\'}`` New-style: ``{\'fun\': \'jobs.lookup_jid\', \'kwarg\': {\'jid\': \'1234\'}}`` CLI-style: ``{\'fun\': \'jobs.lookup_jid\', \'arg\': [\'jid="1234"\']}``'
def _reformat_low(self, low):
fun = low.pop(u'fun') verify_fun(self.functions, fun) eauth_creds = dict([(i, low.pop(i)) for i in [u'username', u'password', u'eauth', u'token', u'client', u'user', u'key'] if (i in low)]) (_arg, _kwarg) = salt.utils.args.parse_input(low.pop(u'arg', []), condition=False) _kwarg.update(low.pop(u'kwarg', {})) _kwarg.update(low) munged = [] munged.extend(_arg) munged.append(dict(__kwarg__=True, **_kwarg)) (arg, kwarg) = salt.minion.load_args_and_kwargs(self.functions[fun], munged, self.opts, ignore_invalid=True) return dict(fun=fun, kwarg={u'kwarg': kwarg, u'arg': arg}, **eauth_creds)
'Execute a runner function asynchronously; eauth is respected This function requires that :conf_master:`external_auth` is configured and the user is authorized to execute runner functions: (``@runner``). .. code-block:: python runner.eauth_async({ \'fun\': \'jobs.list_jobs\', \'username\': \'saltdev\', \'password\': \'saltdev\', \'eauth\': \'pam\','
def cmd_async(self, low):
reformatted_low = self._reformat_low(low) return mixins.AsyncClientMixin.cmd_async(self, reformatted_low)
'Execute a runner function synchronously; eauth is respected This function requires that :conf_master:`external_auth` is configured and the user is authorized to execute runner functions: (``@runner``). .. code-block:: python runner.eauth_sync({ \'fun\': \'jobs.list_jobs\', \'username\': \'saltdev\', \'password\': \'saltdev\', \'eauth\': \'pam\','
def cmd_sync(self, low, timeout=None, full_return=False):
reformatted_low = self._reformat_low(low) return mixins.SyncClientMixin.cmd_sync(self, reformatted_low, timeout, full_return)
'Execute a function'
def cmd(self, fun, arg=None, pub_data=None, kwarg=None, print_event=True, full_return=False):
return super(RunnerClient, self).cmd(fun, arg, pub_data, kwarg, print_event, full_return)
'Print out the documentation!'
def print_docs(self):
arg = self.opts.get(u'fun', None) docs = super(Runner, self).get_docs(arg) for fun in sorted(docs): display_output(u'{0}:'.format(fun), u'text', self.opts) print(docs[fun])
'Execute the runner sequence'
def run(self):
import salt.minion ret = {} if self.opts.get(u'doc', False): self.print_docs() else: low = {u'fun': self.opts[u'fun']} try: async_pub = self._gen_async_pub() self.jid = async_pub[u'jid'] fun_args = salt.utils.args.parse_input(self.opts[u'arg'], no_parse=self.opts.get(u'no_parse', [])) verify_fun(self.functions, low[u'fun']) (args, kwargs) = salt.minion.load_args_and_kwargs(self.functions[low[u'fun']], fun_args, self.opts) low[u'arg'] = args low[u'kwarg'] = kwargs if self.opts.get(u'eauth'): if (u'token' in self.opts): try: with salt.utils.files.fopen(os.path.join(self.opts[u'cachedir'], u'.root_key'), u'r') as fp_: low[u'key'] = fp_.readline() except IOError: low[u'token'] = self.opts[u'token'] if ((u'token' not in low) and (u'key' not in low) and self.opts[u'eauth']): import salt.auth resolver = salt.auth.Resolver(self.opts) res = resolver.cli(self.opts[u'eauth']) if (self.opts[u'mktoken'] and res): tok = resolver.token_cli(self.opts[u'eauth'], res) if tok: low[u'token'] = tok.get(u'token', u'') if (not res): log.error(u'Authentication failed') return ret low.update(res) low[u'eauth'] = self.opts[u'eauth'] else: user = salt.utils.get_specific_user() if (low[u'fun'] == u'state.orchestrate'): low[u'kwarg'][u'orchestration_jid'] = async_pub[u'jid'] if self.opts.get(u'async', False): if self.opts.get(u'eauth'): async_pub = self.cmd_async(low) else: async_pub = self.async(self.opts[u'fun'], low, user=user, pub=async_pub) log.warning(u'Running in async mode. Results of this execution may be collected by attaching to the master event bus or by examing the master job cache, if configured. This execution is running under tag %s', async_pub[u'tag']) return async_pub[u'jid'] if self.opts.get(u'eauth'): ret = self.cmd_sync(low) if (isinstance(ret, dict) and (set(ret) == set((u'data', u'outputter')))): outputter = ret[u'outputter'] ret = ret[u'data'] else: outputter = None display_output(ret, outputter, self.opts) else: ret = self._proc_function(self.opts[u'fun'], low, user, async_pub[u'tag'], async_pub[u'jid'], daemonize=False) except salt.exceptions.SaltException as exc: evt = salt.utils.event.get_event(u'master', opts=self.opts) evt.fire_event({u'success': False, u'return': u'{0}'.format(exc), u'retcode': 254, u'fun': self.opts[u'fun'], u'fun_args': fun_args, u'jid': self.jid}, tag=u'salt/run/{0}/ret'.format(self.jid)) if (u'fun' in low): ret = self.get_docs(u'{0}*'.format(low[u'fun'])) else: ret = None if (not ret): ret = u'{0}'.format(exc) if (not self.opts.get(u'quiet', False)): display_output(ret, u'nested', self.opts) else: log.debug(u'Runner return: %s', ret) return ret
'Pack this exception into a serializable dictionary that is safe for transport via msgpack'
def pack(self):
if six.PY3: return {u'message': str(self), u'args': self.args} return dict(message=self.__unicode__(), args=self.args)
'Recursively iterate down through data structures to determine output'
def display(self, ret, indent, prefix, out):
if isinstance(ret, six.string_types): lines = ret.split(u'\n') for line in lines: out += u'{0}{1}{2}{3}{4}\n'.format(self.colors[u'RED'], (u' ' * indent), prefix, line, self.colors[u'ENDC']) elif isinstance(ret, dict): for key in sorted(ret): val = ret[key] out += u'{0}{1}{2}{3}{4}:\n'.format(self.colors[u'CYAN'], (' ' * indent), prefix, key, self.colors[u'ENDC']) out = self.display(val, (indent + 4), u'', out) return out
'Build the unicode string to be displayed.'
def ustring(self, indent, color, msg, prefix='', suffix='', endc=None):
if (endc is None): endc = self.ENDC indent *= ' ' fmt = u'{0}{1}{2}{3}{4}{5}' try: return fmt.format(indent, color, prefix, msg, endc, suffix) except UnicodeDecodeError: return fmt.format(indent, color, prefix, salt.utils.locales.sdecode(msg), endc, suffix)
'When the text inside the column is longer then the width, will split by space and continue on the next line.'
def wrap_onspace(self, text):
def _truncate(line, word): return '{line}{part}{word}'.format(line=line, part=' \n'[((len(line[(line.rfind('\n') + 1):]) + len(word.split('\n', 1)[0])) >= self.width)], word=word) return reduce(_truncate, text.split(' '))
'Prepare rows content to be displayed.'
def prepare_rows(self, rows, indent, has_header):
out = [] def row_wrapper(row): new_rows = [self.wrapfunc(item).split('\n') for item in row] rows = [] for item in map(None, *new_rows): if isinstance(item, (tuple, list)): rows.append([(substr or '') for substr in item]) else: rows.append([item]) return rows logical_rows = [row_wrapper(row) for row in rows] columns = map(None, *reduce(operator.add, logical_rows)) max_widths = [max([len(str(item)) for item in column]) for column in columns] row_separator = (self.row_delimiter * (((len(self.prefix) + len(self.suffix)) + sum(max_widths)) + (len(self.delim) * (len(max_widths) - 1)))) justify = self._JUSTIFY_MAP[self.justify.lower()] if self.separate_rows: out.append(self.ustring(indent, self.LIGHT_GRAY, row_separator)) for physical_rows in logical_rows: for row in physical_rows: line = ((self.prefix + self.delim.join([justify(str(item), width) for (item, width) in zip(row, max_widths)])) + self.suffix) out.append(self.ustring(indent, self.WHITE, line)) if (self.separate_rows or has_header): out.append(self.ustring(indent, self.LIGHT_GRAY, row_separator)) has_header = False return out
'Prepares row content and displays.'
def display_rows(self, rows, labels, indent):
out = [] if (not rows): return out first_row_type = type(rows[0]) consistent = True for row in rows[1:]: if (type(row) != first_row_type): consistent = False if (not consistent): return out if isinstance(labels, dict): labels_temp = [] for key in sorted(labels): labels_temp.append(labels[key]) labels = labels_temp if (first_row_type is dict): temp_rows = [] if (not labels): labels = [str(label).replace('_', ' ').title() for label in sorted(rows[0])] for row in rows: temp_row = [] for key in sorted(row): temp_row.append(str(row[key])) temp_rows.append(temp_row) rows = temp_rows elif isinstance(rows[0], string_types): rows = [[row] for row in rows] labels_and_rows = (([labels] + rows) if labels else rows) has_header = (self.has_header and labels) return self.prepare_rows(labels_and_rows, (indent + 4), has_header)
'Display table(s).'
def display(self, ret, indent, out, rows_key=None, labels_key=None):
rows = [] labels = None if isinstance(ret, dict): if ((not rows_key) or (rows_key and (rows_key in list(ret.keys())))): for key in sorted(ret): if (rows_key and (key != rows_key)): continue val = ret[key] if (not rows_key): out.append(self.ustring(indent, self.DARK_GRAY, key, suffix=':')) out.append(self.ustring(indent, self.DARK_GRAY, '----------')) if isinstance(val, (list, tuple)): rows = val if labels_key: labels = ret.get(labels_key) out.extend(self.display_rows(rows, labels, indent)) else: self.display(val, (indent + 4), out, rows_key=rows_key, labels_key=labels_key) elif rows_key: for key in sorted(ret): val = ret[key] self.display(val, indent, out, rows_key=rows_key, labels_key=labels_key) elif isinstance(ret, (list, tuple)): if (not rows_key): rows = ret out.extend(self.display_rows(rows, labels, indent)) return out
'Recursively iterate down through data structures to determine output'
def display(self, ret, indent, prefix, out):
if ((ret is None) or (ret is True) or (ret is False)): out.append(self.ustring(indent, self.LIGHT_YELLOW, ret, prefix=prefix)) elif isinstance(ret, Number): out.append(self.ustring(indent, self.LIGHT_YELLOW, ret, prefix=prefix)) elif isinstance(ret, string_types): first_line = True for line in ret.splitlines(): if self.strip_colors: line = salt.output.strip_esc_sequence(line) line_prefix = ((' ' * len(prefix)) if (not first_line) else prefix) out.append(self.ustring(indent, self.GREEN, line, prefix=line_prefix)) first_line = False elif isinstance(ret, (list, tuple)): for ind in ret: if isinstance(ind, (list, tuple, dict)): out.append(self.ustring(indent, self.GREEN, '|_')) prefix = ('' if isinstance(ind, dict) else '- ') self.display(ind, (indent + 2), prefix, out) else: self.display(ind, indent, '- ', out) elif isinstance(ret, dict): if indent: out.append(self.ustring(indent, self.CYAN, '----------')) if isinstance(ret, salt.utils.odict.OrderedDict): keys = ret.keys() else: keys = sorted(ret) for key in keys: val = ret[key] out.append(self.ustring(indent, self.CYAN, key, suffix=':', prefix=prefix)) self.display(val, (indent + 4), '', out) return out
':param attrs: are the attribute names of any format codes in `codes` :param kwargs: may contain `x`, an integer in the range [0-255] that selects the corresponding color from the extended ANSI 256 color space for foreground text `rgb`, an iterable of 3 integers in the range [0-255] that select the corresponding colors from the extended ANSI 256^3 color space for foreground text `bg_x`, an integer in the range [0-255] that selects the corresponding color from the extended ANSI 256 color space for background text `bg_rgb`, an iterable of 3 integers in the range [0-255] that select the corresponding colors from the extended ANSI 256^3 color space for background text `reset`, prepend reset SGR code to sequence (default `True`) Examples: .. code-block:: python red_underlined = TextFormat(\'red\', \'underline\') nuanced_text = TextFormat(x=29, bg_x=71) magenta_on_green = TextFormat(\'magenta\', \'bg_green\') print( \'{0}Can you read this?{1}\' ).format(magenta_on_green, TextFormat(\'reset\'))'
def __init__(self, *attrs, **kwargs):
self.codes = [codes[attr.lower()] for attr in attrs if isinstance(attr, six.string_types)] if kwargs.get(u'reset', True): self.codes[:0] = [codes[u'reset']] def qualify_int(i): if isinstance(i, int): return (i % 256) def qualify_triple_int(t): if (isinstance(t, (list, tuple)) and (len(t) == 3)): return (qualify_int(t[0]), qualify_int(t[1]), qualify_int(t[2])) if (kwargs.get(u'x', None) is not None): self.codes.extend((codes[u'extended'], u'5', qualify_int(kwargs[u'x']))) elif (kwargs.get(u'rgb', None) is not None): self.codes.extend((codes[u'extended'], u'2')) self.codes.extend(*qualify_triple_int(kwargs[u'rgb'])) if (kwargs.get(u'bg_x', None) is not None): self.codes.extend((codes[u'extended'], u'5', qualify_int(kwargs[u'bg_x']))) elif (kwargs.get(u'bg_rgb', None) is not None): self.codes.extend((codes[u'extended'], u'2')) self.codes.extend(*qualify_triple_int(kwargs[u'bg_rgb'])) self.sequence = (u'%s%s%s' % (graph_prefix, u';'.join(self.codes), graph_suffix))
'Format :param text: by prefixing `self.sequence` and suffixing the reset sequence if :param reset: is `True`. Examples: .. code-block:: python green_blink_text = TextFormat(\'blink\', \'green\') \'The answer is: {0}\'.format(green_blink_text(42))'
def __call__(self, text, reset=True):
end = (TextFormat(u'reset') if reset else u'') return (u'%s%s%s' % (self.sequence, text, end))
'Gather the specified data from the minion data cache'
def gather_cache(self):
cache = {'grains': {}, 'pillar': {}} if (self.grains or self.pillar): if self.opts.get('minion_data_cache'): minions = self.cache.ls('minions') if (not minions): return cache for minion in minions: total = self.cache.fetch('minions/{0}'.format(minion), 'data') if ('pillar' in total): if self.pillar_keys: for key in self.pillar_keys: if (key in total['pillar']): cache['pillar'][minion][key] = total['pillar'][key] else: cache['pillar'][minion] = total['pillar'] else: cache['pillar'][minion] = {} if ('grains' in total): if self.grain_keys: for key in self.grain_keys: if (key in total['grains']): cache['grains'][minion][key] = total['grains'][key] else: cache['grains'][minion] = total['grains'] else: cache['grains'][minion] = {} return cache
'Start the system!'
def start_runtime(self):
while True: try: self.call_runtime() except Exception: log.error('Exception in Thorium: ', exc_info=True) time.sleep(self.opts['thorium_interval'])
'Compile the top file and return the lowstate for the thorium runtime to iterate over'
def get_chunks(self, exclude=None, whitelist=None):
ret = {} err = [] try: top = self.get_top() except SaltRenderError as err: return ret except Exception: trb = traceback.format_exc() err.append(trb) return err err += self.verify_tops(top) matches = self.top_matches(top) if (not matches): msg = 'No Top file found!' raise SaltRenderError(msg) matches = self.matches_whitelist(matches, whitelist) (high, errors) = self.render_highstate(matches) if exclude: if isinstance(exclude, six.string_types): exclude = exclude.split(',') if ('__exclude__' in high): high['__exclude__'].extend(exclude) else: high['__exclude__'] = exclude err += errors (high, ext_errors) = self.state.reconcile_extend(high) err += ext_errors err += self.state.verify_high(high) if err: raise SaltRenderError(err) return self.state.compile_high_data(high)
'iterate over the available events and return a list of events'
def get_events(self):
ret = [] while True: event = self.event.get_event(wait=1, full=True) if (event is None): return ret ret.append(event)
'Execute the runtime'
def call_runtime(self):
cache = self.gather_cache() chunks = self.get_chunks() interval = self.opts['thorium_interval'] recompile = self.opts.get('thorium_recompile', 300) r_start = time.time() while True: events = self.get_events() if (not events): time.sleep(interval) continue start = time.time() self.state.inject_globals['__events__'] = events self.state.call_chunks(chunks) elapsed = (time.time() - start) left = (interval - elapsed) if (left > 0): time.sleep(left) self.state.reset_run_num() if ((start - r_start) > recompile): cache = self.gather_cache() chunks = self.get_chunks() if (self.reg_ret is not None): self.returners['{0}.save_reg'.format(self.reg_ret)](chunks) r_start = time.time()
'Set the opts dict to defaults and allow for opts to be overridden in the kwargs'
def _opts_defaults(self, **kwargs):
opts = salt.config.DEFAULT_CLOUD_OPTS.copy() opts.update(self.opts.copy()) opts['parallel'] = False opts['keep_tmp'] = False opts['deploy'] = True opts['update_bootstrap'] = False opts['show_deploy_args'] = False opts['script_args'] = '' if ('kwargs' in kwargs): opts.update(kwargs['kwargs']) opts.update(kwargs) profile = opts.get('profile', None) if profile: tmp_profiles = opts.get('profiles', {}).copy() for _profile in [a for a in tmp_profiles]: if (not (_profile == profile)): tmp_profiles.pop(_profile) providers = [a.get('provider', '').split(':')[0] for a in six.itervalues(tmp_profiles) if a.get('provider', '')] if providers: _providers = opts.get('providers', {}) for provider in list(_providers): if (provider not in providers): _providers.pop(provider) return opts
'Pass the cloud function and low data structure to run'
def low(self, fun, low):
l_fun = getattr(self, fun) f_call = salt.utils.format_call(l_fun, low) return l_fun(*f_call.get('args', ()), **f_call.get('kwargs', {}))
'List all available sizes in configured cloud systems'
def list_sizes(self, provider=None):
mapper = salt.cloud.Map(self._opts_defaults()) return salt.utils.simple_types_filter(mapper.size_list(provider))
'List all available images in configured cloud systems'
def list_images(self, provider=None):
mapper = salt.cloud.Map(self._opts_defaults()) return salt.utils.simple_types_filter(mapper.image_list(provider))
'List all available locations in configured cloud systems'
def list_locations(self, provider=None):
mapper = salt.cloud.Map(self._opts_defaults()) return salt.utils.simple_types_filter(mapper.location_list(provider))