Code
stringlengths
103
85.9k
Summary
sequencelengths
0
94
Please provide a description of the function:def get_results_from_passive(self, scheduler_instance_id): # Do I know this scheduler? # logger.info("My schedulers: %s %s", self.schedulers, type(self.schedulers)) if not self.schedulers: # Probably not yet configured ... logger.debug("I do not have any scheduler: %s", self.schedulers) return [] scheduler_link = None for link in list(self.schedulers.values()): if scheduler_instance_id == link.instance_id: scheduler_link = link break else: logger.warning("I do not know this scheduler: %s", scheduler_instance_id) return [] logger.debug("Get results for the scheduler: %s", scheduler_instance_id) ret, scheduler_link.wait_homerun = scheduler_link.wait_homerun, {} logger.debug("Results: %s" % (list(ret.values())) if ret else "No results available") return list(ret.values())
[ "Get executed actions results from a passive satellite for a specific scheduler\n\n :param scheduler_instance_id: scheduler id\n :type scheduler_instance_id: int\n :return: Results list\n :rtype: list\n " ]
Please provide a description of the function:def clean_previous_run(self): # Clean all lists self.arbiters.clear() self.schedulers.clear() with self.external_commands_lock: self.external_commands = self.external_commands[:]
[ "Clean variables from previous configuration,\n such as schedulers, broks and external commands\n\n :return: None\n " ]
Please provide a description of the function:def setup_new_conf(self): # pylint: disable=too-many-locals, too-many-branches with self.conf_lock: # No more configuration now! self.have_conf = False logger.info("Received a new configuration (arbiters / schedulers)") # Clean our execution context self.clean_previous_run() # Check configuration is valid if '_status' in self.new_conf: logger.error(self.new_conf['_status']) self.cur_conf = {} # Get the new configuration self.cur_conf = self.new_conf # self_conf is our own configuration from the alignak environment self_conf = self.cur_conf['self_conf'] logger.debug("Received a new configuration, containing:") for key in self.cur_conf: try: logger.debug("- %s: %s", key, self.cur_conf[key]) except UnicodeDecodeError: logger.error("- %s: %s", key, self.cur_conf[key].decode('utf8', 'ignore')) logger.debug("satellite self configuration part: %s", self_conf) if 'satellites' not in self.cur_conf: self.cur_conf['satellites'] = [] if 'modules' not in self.cur_conf: self.cur_conf['modules'] = [] # Update Alignak name self.alignak_name = self.cur_conf['alignak_name'] logger.info("My Alignak instance: %s", self.alignak_name) # This to indicate that the new configuration got managed... self.new_conf = {} # Set our timezone from arbiter use_timezone = self_conf.get('use_timezone', 'NOTSET') if use_timezone != 'NOTSET': logger.info("Setting our timezone to %s", use_timezone) os.environ['TZ'] = use_timezone time.tzset() # Now we create our arbiters and schedulers links for link_type in ['arbiters', 'schedulers']: if link_type not in self.cur_conf: logger.error("Missing %s in the configuration!", link_type) continue if link_type == 'schedulers' and self.type == 'scheduler': # Do not do anything with my own link! continue my_satellites = getattr(self, link_type, {}) received_satellites = self.cur_conf[link_type] for link_uuid in received_satellites: rs_conf = received_satellites[link_uuid] logger.debug("- received %s - %s: %s", rs_conf['instance_id'], rs_conf['type'], rs_conf['name']) # Must look if we already had a configuration and save our broks already_got = rs_conf['instance_id'] in my_satellites broks = [] actions = {} wait_homerun = {} external_commands = {} running_id = 0 if already_got: logger.warning("I already got: %s", rs_conf['instance_id']) # Save some information running_id = my_satellites[link_uuid].running_id (broks, actions, wait_homerun, external_commands) = \ my_satellites[link_uuid].get_and_clear_context() # Delete the former link del my_satellites[link_uuid] # My new satellite link... new_link = SatelliteLink.get_a_satellite_link(link_type[:-1], rs_conf) my_satellites[new_link.uuid] = new_link logger.info("I got a new %s satellite: %s", link_type[:-1], new_link) new_link.running_id = running_id new_link.external_commands = external_commands new_link.broks = broks new_link.wait_homerun = wait_homerun new_link.actions = actions # replacing satellite address and port by those defined in satellite_map if new_link.name in self_conf.get('satellite_map', {}): overriding = self_conf.get('satellite_map')[new_link.name] # satellite = dict(satellite) # make a copy # new_link.update(self_conf.get('satellite_map', {})[new_link.name]) logger.warning("Do not override the configuration for: %s, with: %s. " "Please check whether this is necessary!", new_link.name, overriding) # For each scheduler, we received its managed hosts list self.hosts_schedulers = {} logger.debug("My arbiters: %s", self.arbiters) logger.debug("My schedulers: %s", self.schedulers) for link_uuid in self.schedulers: # We received the hosts names for each scheduler for host_name in self.schedulers[link_uuid].managed_hosts_names: self.hosts_schedulers[host_name] = link_uuid
[ "Setup the new configuration received from Arbiter\n\n This function is the generic treatment needed for every Alignak daemon when it receivss\n a new configuration from the Arbiter:\n - save the new configuration\n - dump the main configuration elements\n - get its own configuration (self_conf)\n - get its name and update the process title\n - set the timezone if needed\n - register its statistics manager\n - get and configure its arbiters and schedulers relation\n\n Setting the self.new_conf as None is to indicate that the new configuration has been\n managed.\n\n Note: it is important to protect the configuration management thanks to a lock!\n\n :return: None\n " ]
Please provide a description of the function:def get_events(self): res = copy.copy(self.events) del self.events[:] return res
[ "Get event list from satellite\n\n :return: A copy of the events list\n :rtype: list\n " ]
Please provide a description of the function:def get_daemon_stats(self, details=False): # call the daemon one res = super(BaseSatellite, self).get_daemon_stats(details=details) counters = res['counters'] counters['external-commands'] = len(self.external_commands) counters['satellites.arbiters'] = len(self.arbiters) counters['satellites.schedulers'] = len(self.schedulers) return res
[ "Increase the stats provided by the Daemon base class\n\n :return: stats dictionary\n :rtype: dict\n " ]
Please provide a description of the function:def manage_action_return(self, action): # Maybe our workers send us something else than an action # if so, just add this in other queues and return # todo: test a class instance if action.__class__.my_type not in ['check', 'notification', 'eventhandler']: self.add(action) return # Ok, it's a result. Get the concerned scheduler uuid scheduler_uuid = action.my_scheduler logger.debug("Got action return: %s / %s", scheduler_uuid, action.uuid) try: # Now that we know where to put the action result, we do not need any reference to # the scheduler nor the worker del action.my_scheduler del action.my_worker except AttributeError: # pragma: no cover, simple protection logger.error("AttributeError Got action return: %s / %s", scheduler_uuid, action) # And we remove it from the actions queue of the scheduler too try: del self.schedulers[scheduler_uuid].actions[action.uuid] except KeyError as exp: logger.error("KeyError del scheduler action: %s / %s - %s", scheduler_uuid, action.uuid, str(exp)) # We tag it as "return wanted", and move it in the wait return queue try: self.schedulers[scheduler_uuid].wait_homerun[action.uuid] = action except KeyError: # pragma: no cover, simple protection logger.error("KeyError Add home run action: %s / %s - %s", scheduler_uuid, action.uuid, str(exp))
[ "Manage action return from Workers\n We just put them into the corresponding sched\n and we clean unused properties like my_scheduler\n\n :param action: the action to manage\n :type action: alignak.action.Action\n :return: None\n " ]
Please provide a description of the function:def push_results(self): # For all schedulers, we check for wait_homerun # and we send back results for scheduler_link_uuid in self.schedulers: scheduler_link = self.schedulers[scheduler_link_uuid] if not scheduler_link.active: logger.warning("My scheduler '%s' is not active currently", scheduler_link.name) continue if not scheduler_link.wait_homerun: # Nothing to push back... continue # NB: it's **mostly** safe for us to not use some lock around # this 'results' / sched['wait_homerun']. # Because it can only be modified (for adding new values) by the # same thread running this function (that is the main satellite # thread), and this occurs exactly in self.manage_action_return(). # Another possibility is for the sched['wait_homerun'] to be # cleared within/by : # ISchedulers.get_results() -> Satelitte.get_return_for_passive() # This can so happen in an (http) client thread. results = scheduler_link.wait_homerun logger.debug("Pushing %d results to '%s'", len(results), scheduler_link.name) # So, at worst, some results would be received twice on the # scheduler level, which shouldn't be a problem given they are # indexed by their "action_id". scheduler_link.push_results(list(results.values()), self.name) results.clear()
[ "Push the checks/actions results to our schedulers\n\n :return: None\n " ]
Please provide a description of the function:def create_and_launch_worker(self, module_name='fork'): logger.info("Allocating new '%s' worker...", module_name) # If we are in the fork module, we do not specify a target target = None __warned = [] if module_name == 'fork': target = None else: for module in self.modules_manager.instances: # First, see if the module name matches... if module.get_name() == module_name: # ... and then if is a 'worker' module one or not if not module.properties.get('worker_capable', False): raise NotWorkerMod target = module.work if target is None: if module_name not in __warned: logger.warning("No target found for %s, NOT creating a worker for it...", module_name) __warned.append(module_name) return # We give to the Worker the instance name of the daemon (eg. poller-master) # and not the daemon type (poller) queue = Queue() worker = Worker(module_name, queue, self.returns_queue, self.processes_by_worker, max_plugins_output_length=self.max_plugins_output_length, target=target, loaded_into=self.name) # worker.module_name = module_name # save this worker self.workers[worker.get_id()] = worker # And save the Queue of this worker, with key = worker id # self.q_by_mod[module_name][worker.uuid] = queue self.q_by_mod[module_name][worker.get_id()] = queue # Ok, all is good. Start it! worker.start() logger.info("Started '%s' worker: %s (pid=%d)", module_name, worker.get_id(), worker.get_pid())
[ "Create and launch a new worker, and put it into self.workers\n It can be mortal or not\n\n :param module_name: the module name related to the worker\n default is \"fork\" for no module\n Indeed, it is actually the module 'python_name'\n :type module_name: str\n :return: None\n " ]
Please provide a description of the function:def do_stop_workers(self): logger.info("Stopping all workers (%d)", len(self.workers)) for worker in list(self.workers.values()): try: logger.info(" - stopping '%s'", worker.get_id()) worker.terminate() worker.join(timeout=1) logger.info(" - stopped") # A already dead worker or in a worker except (AttributeError, AssertionError): pass except Exception as exp: # pylint: disable=broad-except logger.error("exception: %s", str(exp))
[ "Stop all workers\n\n :return: None\n " ]
Please provide a description of the function:def get_broks(self): res = copy.copy(self.broks) del self.broks[:] return res
[ "Get brok list from satellite\n\n :return: A copy of the broks list\n :rtype: list\n " ]
Please provide a description of the function:def check_and_del_zombie_workers(self): # pragma: no cover, not with unit tests... # pylint: disable= not-callable # Active children make a join with everyone, useful :) # active_children() for p in active_children(): logger.debug("got child: %s", p) w_to_del = [] for worker in list(self.workers.values()): # If a worker goes down and we did not ask him, it's not # good: we can think that we have a worker and it's not True # So we del it logger.debug("checking if worker %s (pid=%d) is alive", worker.get_id(), worker.get_pid()) if not self.interrupted and not worker.is_alive(): logger.warning("The worker %s (pid=%d) went down unexpectedly!", worker.get_id(), worker.get_pid()) # Terminate immediately worker.terminate() worker.join(timeout=1) w_to_del.append(worker.get_id()) # OK, now really del workers from queues # And requeue the actions it was managed for worker_id in w_to_del: worker = self.workers[worker_id] # Del the queue of the module queue del self.q_by_mod[worker.module_name][worker.get_id()] for scheduler_uuid in self.schedulers: sched = self.schedulers[scheduler_uuid] for act in list(sched.actions.values()): if act.status == ACT_STATUS_QUEUED and act.my_worker == worker_id: # Got a check that will NEVER return if we do not restart it self.assign_to_a_queue(act) # So now we can really forgot it del self.workers[worker_id]
[ "Check if worker are fine and kill them if not.\n Dispatch the actions in the worker to another one\n\n TODO: see if unit tests would allow to check this code?\n\n :return: None\n " ]
Please provide a description of the function:def adjust_worker_number_by_load(self): if self.interrupted: logger.debug("Trying to adjust worker number. Ignoring because we are stopping.") return to_del = [] logger.debug("checking worker count." " Currently: %d workers, min per module : %d, max per module : %d", len(self.workers), self.min_workers, self.max_workers) # I want at least min_workers by module then if I can, I add worker for load balancing for mod in self.q_by_mod: # At least min_workers todo = max(0, self.min_workers - len(self.q_by_mod[mod])) for _ in range(todo): try: self.create_and_launch_worker(module_name=mod) # Maybe this modules is not a true worker one. # if so, just delete if from q_by_mod except NotWorkerMod: to_del.append(mod) break for mod in to_del: logger.warning("The module %s is not a worker one, I remove it from the worker list.", mod) del self.q_by_mod[mod]
[ "Try to create the minimum workers specified in the configuration\n\n :return: None\n " ]
Please provide a description of the function:def _get_queue_for_the_action(self, action): # get the module name, if not, take fork mod = getattr(action, 'module_type', 'fork') queues = list(self.q_by_mod[mod].items()) # Maybe there is no more queue, it's very bad! if not queues: return (0, None) # if not get action round robin index to get action queue based # on the action id self.rr_qid = (self.rr_qid + 1) % len(queues) (worker_id, queue) = queues[self.rr_qid] # return the id of the worker (i), and its queue return (worker_id, queue)
[ "Find action queue for the action depending on the module.\n The id is found with action modulo on action id\n\n :param a: the action that need action queue to be assigned\n :type action: object\n :return: worker id and queue. (0, None) if no queue for the module_type\n :rtype: tuple\n " ]
Please provide a description of the function:def add_actions(self, actions_list, scheduler_instance_id): # We check for new check in each schedulers and put the result in new_checks scheduler_link = None for scheduler_id in self.schedulers: logger.debug("Trying to add an action, scheduler: %s", self.schedulers[scheduler_id]) if scheduler_instance_id == self.schedulers[scheduler_id].instance_id: scheduler_link = self.schedulers[scheduler_id] break else: logger.error("Trying to add actions from an unknwown scheduler: %s", scheduler_instance_id) return if not scheduler_link: logger.error("Trying to add actions, but scheduler link is not found for: %s, " "actions: %s", scheduler_instance_id, actions_list) return logger.debug("Found scheduler link: %s", scheduler_link) for action in actions_list: # First we look if the action is identified uuid = getattr(action, 'uuid', None) if uuid is None: try: action = unserialize(action, no_load=True) uuid = action.uuid except AlignakClassLookupException: logger.error('Cannot un-serialize action: %s', action) continue # If we already have this action, we are already working for it! if uuid in scheduler_link.actions: continue # Action is attached to a scheduler action.my_scheduler = scheduler_link.uuid scheduler_link.actions[action.uuid] = action self.assign_to_a_queue(action)
[ "Add a list of actions to the satellite queues\n\n :param actions_list: Actions list to add\n :type actions_list: list\n :param scheduler_instance_id: sheduler link to assign the actions to\n :type scheduler_instance_id: SchedulerLink\n :return: None\n " ]
Please provide a description of the function:def assign_to_a_queue(self, action): (worker_id, queue) = self._get_queue_for_the_action(action) if not worker_id: return # Tag the action as "in the worker i" action.my_worker = worker_id action.status = ACT_STATUS_QUEUED msg = Message(_type='Do', data=action, source=self.name) logger.debug("Queuing message: %s", msg) queue.put_nowait(msg) logger.debug("Queued")
[ "Take an action and put it to a worker actions queue\n\n :param action: action to put\n :type action: alignak.action.Action\n :return: None\n " ]
Please provide a description of the function:def get_new_actions(self): try: _t0 = time.time() self.do_get_new_actions() statsmgr.timer('actions.got.time', time.time() - _t0) except RuntimeError: logger.error("Exception like issue #1007")
[ " Wrapper function for do_get_new_actions\n For stats purpose\n\n :return: None\n TODO: Use a decorator for timing this function\n " ]
Please provide a description of the function:def do_get_new_actions(self): # Here are the differences between a poller and a reactionner: # Poller will only do checks, # Reactionner will do actions (notifications and event handlers) do_checks = self.__class__.do_checks do_actions = self.__class__.do_actions # We check and get the new actions to execute in each of our schedulers for scheduler_link_uuid in self.schedulers: scheduler_link = self.schedulers[scheduler_link_uuid] if not scheduler_link.active: logger.warning("My scheduler '%s' is not active currently", scheduler_link.name) continue logger.debug("get new actions, scheduler: %s", scheduler_link.name) # OK, go for it :) _t0 = time.time() actions = scheduler_link.get_actions({'do_checks': do_checks, 'do_actions': do_actions, 'poller_tags': self.poller_tags, 'reactionner_tags': self.reactionner_tags, 'worker_name': self.name, 'module_types': list(self.q_by_mod.keys())}) if actions: logger.debug("Got %d actions from %s", len(actions), scheduler_link.name) # We 'tag' them with my_scheduler and put into queue for workers self.add_actions(actions, scheduler_link.instance_id) logger.debug("Got %d actions from %s in %s", len(actions), scheduler_link.name, time.time() - _t0) statsmgr.gauge('actions.added.count.%s' % (scheduler_link.name), len(actions))
[ "Get new actions from schedulers\n Create a Message and put into the module queue\n REF: doc/alignak-action-queues.png (1)\n\n :return: None\n " ]
Please provide a description of the function:def clean_previous_run(self): # Execute the base class treatment... super(Satellite, self).clean_previous_run() # Clean my lists del self.broks[:] del self.events[:]
[ "Clean variables from previous configuration,\n such as schedulers, broks and external commands\n\n :return: None\n " ]
Please provide a description of the function:def do_loop_turn(self): # pylint: disable=too-many-branches # Try to see if one of my module is dead, and restart previously dead modules self.check_and_del_zombie_modules() # Also if some zombie workers exist... self.check_and_del_zombie_workers() # Call modules that manage a starting tick pass self.hook_point('tick') # Print stats for debug for _, sched in self.schedulers.items(): for mod in self.q_by_mod: # In workers we've got actions sent to queue - queue size for (worker_id, queue) in list(self.q_by_mod[mod].items()): try: actions_count = queue.qsize() results_count = self.returns_queue.qsize() logger.debug("[%s][%s][%s] actions queued: %d, results queued: %d", sched.name, mod, worker_id, actions_count, results_count) # Update the statistics statsmgr.gauge('worker.%s.actions-queue-size' % worker_id, actions_count) statsmgr.gauge('worker.%s.results-queue-size' % worker_id, results_count) except (IOError, EOFError): pass # todo temporaray deactivate all this stuff! # Before return or get new actions, see how we managed # the former ones: are they still in queue(s)? If so, we # must wait more or at least have more workers # wait_ratio = self.wait_ratio.get_load() # total_q = 0 # try: # for mod in self.q_by_mod: # for queue in list(self.q_by_mod[mod].values()): # total_q += queue.qsize() # except (IOError, EOFError): # pass # if total_q != 0 and wait_ratio < 2 * self.worker_polling_interval: # logger.debug("I decide to increase the wait ratio") # self.wait_ratio.update_load(wait_ratio * 2) # # self.wait_ratio.update_load(self.worker_polling_interval) # else: # # Go to self.worker_polling_interval on normal run, if wait_ratio # # was >2*self.worker_polling_interval, # # it make it come near 2 because if < 2, go up :) # self.wait_ratio.update_load(self.worker_polling_interval) # wait_ratio = self.wait_ratio.get_load() # statsmgr.timer('core.wait-ratio', wait_ratio) # if self.log_loop: # logger.debug("[%s] wait ratio: %f", self.name, wait_ratio) # Maybe we do not have enough workers, we check for it # and launch the new ones if needed self.adjust_worker_number_by_load() # Manage all messages we've got in the last timeout # for queue in self.return_messages: try: logger.debug("[%s] manage action results: %d results", self.name, self.returns_queue.qsize()) while self.returns_queue.qsize(): msg = self.returns_queue.get_nowait() if msg is None: continue if not isinstance(msg, Message): logger.warning("Should have received a Message, got a %s!", type(msg)) continue logger.debug("Got a message: %s", msg) if msg.get_type() == 'Done': logger.debug("Got (from %s) an action result: %s", msg.get_source(), msg.get_data()) self.manage_action_return(msg.get_data()) elif msg.get_type() == 'Stats': logger.debug("Got (from %s) stats: %s", msg.get_source(), msg.get_data()) if msg.get_source() in self.workers: self.workers[msg.get_source()].stats = msg.get_data() else: logger.warning("Ignoring message of type: %s", msg.get_type()) except Full: logger.warning("Returns queue is full") except Empty: logger.debug("Returns queue is empty") except (IOError, EOFError) as exp: logger.warning("My returns queue is no more available: %s", str(exp)) except Exception as exp: # pylint: disable=broad-except logger.error("Failed getting messages in returns queue: %s", str(exp)) logger.error(traceback.format_exc()) for _, sched in self.schedulers.items(): if sched.wait_homerun: logger.debug("scheduler home run: %d results", len(sched.wait_homerun)) if not self.passive: # If we are an active satellite, we do not initiate the check getting # and return try: # We send to our schedulers the results of all finished checks logger.debug("pushing results...") self.push_results() except LinkError as exp: logger.warning("Scheduler connection failed, I could not send my results!") try: # And we get the new actions from our schedulers logger.debug("getting new actions...") self.get_new_actions() except LinkError as exp: logger.warning("Scheduler connection failed, I could not get new actions!") # Get objects from our modules that are not Worker based if self.log_loop: logger.debug("[%s] get objects from queues", self.name) self.get_objects_from_from_queues() statsmgr.gauge('external-commands.count', len(self.external_commands)) statsmgr.gauge('broks.count', len(self.broks)) statsmgr.gauge('events.count', len(self.events))
[ "Satellite main loop::\n\n * Check and delete zombies actions / modules\n * Get returns from queues\n * Adjust worker number\n * Get new actions\n\n :return: None\n " ]
Please provide a description of the function:def setup_new_conf(self): # pylint: disable=too-many-branches # Execute the base class treatment... super(Satellite, self).setup_new_conf() # ...then our own specific treatment! with self.conf_lock: logger.info("Received a new configuration") # self_conf is our own configuration from the alignak environment # self_conf = self.cur_conf['self_conf'] # Now manage modules if not self.have_modules: try: self.modules = unserialize(self.cur_conf['modules'], no_load=True) except AlignakClassLookupException as exp: # pragma: no cover, simple protection logger.error('Cannot un-serialize modules configuration ' 'received from arbiter: %s', exp) if self.modules: logger.info("I received some modules configuration: %s", self.modules) self.have_modules = True for module in self.modules: if module.name not in self.q_by_mod: self.q_by_mod[module.name] = {} self.do_load_modules(self.modules) # and start external modules too self.modules_manager.start_external_instances() else: logger.info("I do not have modules") # Initialize connection with all our satellites logger.info("Initializing connection with my satellites:") my_satellites = self.get_links_of_type(s_type='') for satellite in list(my_satellites.values()): logger.info("- : %s/%s", satellite.type, satellite.name) if not self.daemon_connection_init(satellite): logger.error("Satellite connection failed: %s", satellite) # Now I have a configuration! self.have_conf = True
[ "Setup the new configuration received from Arbiter\n\n This function calls the base satellite treatment and manages the configuration needed\n for a simple satellite daemon that executes some actions (eg. poller or reactionner):\n - configure the passive mode\n - configure the workers\n - configure the tags\n - configure the modules\n\n :return: None\n " ]
Please provide a description of the function:def get_daemon_stats(self, details=False): # call the daemon one res = super(Satellite, self).get_daemon_stats(details=details) counters = res['counters'] counters['broks'] = len(self.broks) counters['events'] = len(self.events) counters['workers'] = len(self.workers) if self.workers: res['workers'] = {} for worker in list(self.workers.values()): stats = getattr(self.workers[worker.get_id()], 'stats', None) if stats: res['workers'][worker.get_id()] = stats return res
[ "Increase the stats provided by the Daemon base class\n\n :return: stats dictionary\n :rtype: dict\n " ]
Please provide a description of the function:def main(self): try: # Start the daemon mode if not self.do_daemon_init_and_start(): self.exit_on_error(message="Daemon initialization error", exit_code=3) self.do_post_daemon_init() # We wait for initial conf self.wait_for_initial_conf() if self.new_conf: # Setup the received configuration self.setup_new_conf() # Allocate Mortal Threads self.adjust_worker_number_by_load() # Now main loop self.do_main_loop() logger.info("Exited from the main loop.") self.request_stop() except Exception: # pragma: no cover, this should never happen indeed ;) self.exit_on_exception(traceback.format_exc()) raise
[ "Main satellite function. Do init and then mainloop\n\n :return: None\n " ]
Please provide a description of the function:def check_activation(self, contacts): now = time.time() was_is_in_effect = self.is_in_effect self.is_in_effect = (self.start_time <= now <= self.end_time) # Raise a log entry when we get in the downtime if not was_is_in_effect and self.is_in_effect: self.enter(contacts) # Same for exit purpose if was_is_in_effect and not self.is_in_effect: self.exit(contacts)
[ "Enter or exit downtime if necessary\n\n :return: None\n " ]
Please provide a description of the function:def exit(self, contacts): contact = contacts[self.ref] contact.raise_exit_downtime_log_entry() self.can_be_deleted = True
[ "Wrapper to call raise_exit_downtime_log_entry for ref (host/service)\n set can_be_deleted to True\n\n :return: None\n " ]
Please provide a description of the function:def cancel(self, contacts): self.is_in_effect = False contact = contacts[self.ref] contact.raise_cancel_downtime_log_entry() self.can_be_deleted = True
[ "Wrapper to call raise_cancel_downtime_log_entry for ref (host/service)\n set can_be_deleted to True\n set is_in_effect to False\n\n :return: None\n " ]
Please provide a description of the function:def split_semicolon(line, maxsplit=None): r # Split on ';' character split_line = line.split(';') split_line_size = len(split_line) # if maxsplit is not specified, we set it to the number of part if maxsplit is None or maxsplit < 0: maxsplit = split_line_size # Join parts to the next one, if ends with a '\' # because we mustn't split if the semicolon is escaped i = 0 while i < split_line_size - 1: # for each part, check if its ends with a '\' ends = split_line[i].endswith('\\') if ends: # remove the last character '\' split_line[i] = split_line[i][:-1] # append the next part to the current if it is not the last and the current # ends with '\' or if there is more than maxsplit parts if (ends or i >= maxsplit) and i < split_line_size - 1: split_line[i] = ";".join([split_line[i], split_line[i + 1]]) # delete the next part del split_line[i + 1] split_line_size -= 1 # increase i only if we don't have append because after append the new # string can end with '\' else: i += 1 return split_line
[ "Split a line on semicolons characters but not on the escaped semicolons\n\n :param line: line to split\n :type line: str\n :param maxsplit: maximal number of split (if None, no limit)\n :type maxsplit: None | int\n :return: split line\n :rtype: list\n\n >>> split_semicolon('a,b;c;;g')\n ['a,b', 'c', '', 'g']\n\n >>> split_semicolon('a,b;c;;g', 2)\n ['a,b', 'c', ';g']\n\n >>> split_semicolon(r'a,b;c\\;;g', 2)\n ['a,b', 'c;', 'g']\n " ]
Please provide a description of the function:def jsonify_r(obj): # pragma: no cover, not for unit tests... # pylint: disable=too-many-branches res = {} cls = obj.__class__ if not hasattr(cls, 'properties'): try: json.dumps(obj) return obj except TypeError: return None properties = list(cls.properties.keys()) if hasattr(cls, 'running_properties'): properties += list(cls.running_properties.keys()) for prop in properties: if not hasattr(obj, prop): continue val = getattr(obj, prop) # Maybe the property is not jsonable try: if isinstance(val, set): val = list(val) if isinstance(val, list): val = sorted(val) json.dumps(val) res[prop] = val except TypeError: if isinstance(val, list): lst = [] for subval in val: o_type = getattr(subval.__class__, 'my_type', '') if o_type == 'CommandCall': try: lst.append(subval.call) except AttributeError: # pragma: no cover, should not happen... pass continue if o_type and hasattr(subval, o_type + '_name'): lst.append(getattr(subval, o_type + '_name')) else: pass res[prop] = lst else: o_type = getattr(val.__class__, 'my_type', '') if o_type == 'CommandCall': try: res[prop] = val.call except AttributeError: # pragma: no cover, should not happen... pass continue if o_type and hasattr(val, o_type + '_name'): res[prop] = getattr(val, o_type + '_name') return res
[ "Convert an object into json (recursively on attribute)\n\n :param obj: obj to jsonify\n :type obj: object\n :return: json representation of obj\n :rtype: dict\n " ]
Please provide a description of the function:def format_t_into_dhms_format(timestamp): mins, timestamp = divmod(timestamp, 60) hour, mins = divmod(mins, 60) day, hour = divmod(hour, 24) return '%sd %sh %sm %ss' % (day, hour, mins, timestamp)
[ " Convert an amount of second into day, hour, min and sec\n\n :param timestamp: seconds\n :type timestamp: int\n :return: 'Ad Bh Cm Ds'\n :rtype: str\n\n >>> format_t_into_dhms_format(456189)\n '5d 6h 43m 9s'\n\n >>> format_t_into_dhms_format(3600)\n '0d 1h 0m 0s'\n\n " ]
Please provide a description of the function:def merge_periods(data): # sort by start date newdata = sorted(data, key=lambda drange: drange[0]) end = 0 for period in newdata: if period[0] != end and period[0] != (end - 1): end = period[1] # dat = np.array(newdata) dat = newdata new_intervals = [] cur_start = None cur_end = None for (dt_start, dt_end) in dat: if cur_end is None: cur_start = dt_start cur_end = dt_end continue else: if cur_end >= dt_start: # merge, keep existing cur_start, extend cur_end cur_end = dt_end else: # new interval, save previous and reset current to this new_intervals.append((cur_start, cur_end)) cur_start = dt_start cur_end = dt_end # make sure final interval is saved new_intervals.append((cur_start, cur_end)) return new_intervals
[ "\n Merge periods to have better continous periods.\n Like 350-450, 400-600 => 350-600\n\n :param data: list of periods\n :type data: list\n :return: better continous periods\n :rtype: list\n " ]
Please provide a description of the function:def to_split(val, split_on_comma=True): if isinstance(val, list): return val if not split_on_comma: return [val] val = val.split(',') if val == ['']: val = [] return val
[ "Try to split a string with comma separator.\n If val is already a list return it\n If we don't have to split just return [val]\n If split gives only [''] empty it\n\n :param val: value to split\n :type val:\n :param split_on_comma:\n :type split_on_comma: bool\n :return: split value on comma\n :rtype: list\n\n >>> to_split('a,b,c')\n ['a', 'b', 'c']\n\n >>> to_split('a,b,c', False)\n ['a,b,c']\n\n >>> to_split(['a,b,c'])\n ['a,b,c']\n\n >>> to_split('')\n []\n " ]
Please provide a description of the function:def list_split(val, split_on_comma=True): if not split_on_comma: return val new_val = [] for subval in val: # This may happen when re-serializing if isinstance(subval, list): continue new_val.extend(subval.split(',')) return new_val
[ "Try to split each member of a list with comma separator.\n If we don't have to split just return val\n\n :param val: value to split\n :type val:\n :param split_on_comma:\n :type split_on_comma: bool\n :return: list with members split on comma\n :rtype: list\n\n >>> list_split(['a,b,c'], False)\n ['a,b,c']\n\n >>> list_split(['a,b,c'])\n ['a', 'b', 'c']\n\n >>> list_split('')\n []\n\n " ]
Please provide a description of the function:def to_best_int_float(val): integer = int(float(val)) flt = float(val) # If the f is a .0 value, # best match is int if integer == flt: return integer return flt
[ "Get best type for value between int and float\n\n :param val: value\n :type val:\n :return: int(float(val)) if int(float(val)) == float(val), else float(val)\n :rtype: int | float\n\n >>> to_best_int_float(\"20.1\")\n 20.1\n\n >>> to_best_int_float(\"20.0\")\n 20\n\n >>> to_best_int_float(\"20\")\n 20\n " ]
Please provide a description of the function:def dict_to_serialized_dict(ref, the_dict): result = {} for elt in list(the_dict.values()): if not getattr(elt, 'serialize', None): continue result[elt.uuid] = elt.serialize() return result
[ "Serialize the list of elements to a dictionary\n\n Used for the retention store\n\n :param ref: Not used\n :type ref:\n :param the_dict: dictionary to convert\n :type the_dict: dict\n :return: dict of serialized\n :rtype: dict\n " ]
Please provide a description of the function:def list_to_serialized(ref, the_list): result = [] for elt in the_list: if not getattr(elt, 'serialize', None): continue result.append(elt.serialize()) return result
[ "Serialize the list of elements\n\n Used for the retention store\n\n :param ref: Not used\n :type ref:\n :param the_list: dictionary to convert\n :type the_list: dict\n :return: dict of serialized\n :rtype: dict\n " ]
Please provide a description of the function:def to_hostnames_list(ref, tab): # pragma: no cover, to be deprecated? res = [] for host in tab: if hasattr(host, 'host_name'): res.append(host.host_name) return res
[ "Convert Host list into a list of host_name\n\n :param ref: Not used\n :type ref:\n :param tab: Host list\n :type tab: list[alignak.objects.host.Host]\n :return: host_name list\n :rtype: list\n " ]
Please provide a description of the function:def to_svc_hst_distinct_lists(ref, tab): # pragma: no cover, to be deprecated? res = {'hosts': [], 'services': []} for elem in tab: cls = elem.__class__ name = elem.get_full_name() if cls.my_type == 'service': res['services'].append(name) else: res['hosts'].append(name) return res
[ "create a dict with 2 lists::\n\n * services: all services of the tab\n * hosts: all hosts of the tab\n\n :param ref: Not used\n :type ref:\n :param tab: list of Host and Service\n :type tab: list\n :return: dict with hosts and services names\n :rtype: dict\n " ]
Please provide a description of the function:def master_then_spare(data): master = [] spare = [] for sdata in data: if sdata.spare: spare.append(sdata) else: master.append(sdata) rdata = [] rdata.extend(master) rdata.extend(spare) return rdata
[ "Return the provided satellites list sorted as:\n - alive first,\n - then spare\n - then dead\n satellites.\n\n :param data: the SatelliteLink list\n :type data: list\n :return: sorted list\n :rtype: list\n " ]
Please provide a description of the function:def sort_by_number_values(x00, y00): # pragma: no cover, looks like not used! if len(x00) < len(y00): return 1 if len(x00) > len(y00): return -1 # So is equal return 0
[ "Compare x00, y00 base on number of values\n\n :param x00: first elem to compare\n :type x00: list\n :param y00: second elem to compare\n :type y00: list\n :return: x00 > y00 (-1) if len(x00) > len(y00), x00 == y00 (0) if id equals, x00 < y00 (1) else\n :rtype: int\n " ]
Please provide a description of the function:def average_percentile(values): if not values: return None, None, None value_avg = round(float(sum(values)) / len(values), 2) value_max = round(percentile(values, 95), 2) value_min = round(percentile(values, 5), 2) return value_avg, value_min, value_max
[ "\n Get the average, min percentile (5%) and\n max percentile (95%) of a list of values.\n\n :param values: list of value to compute\n :type values: list\n :return: tuple containing average, min and max value\n :rtype: tuple\n " ]
Please provide a description of the function:def strip_and_uniq(tab): _list = [] for elt in tab: val = elt.strip() if val and val not in _list: _list.append(val) return _list
[ "Strip every element of a list and keep a list of ordered unique values\n\n :param tab: list to strip\n :type tab: list\n :return: stripped list with unique values\n :rtype: list\n " ]
Please provide a description of the function:def expand_ranges(value): match_dict = RANGE_REGEX.match(value).groupdict() # the regex is supposed to always match.. before = match_dict['before'] after = match_dict['after'] from_value = match_dict['from'] if from_value is None: yield value else: # we have a [x-y] range from_value = int(from_value) to_value = int(match_dict['to']) + 1 # y is inclusive step = int(match_dict['step'] or 1) for idx in range(from_value, to_value, step): # yield "%s%s%s" % (before, idx, after) for sub_val in expand_ranges("%s%s%s" % (before, idx, after)): yield sub_val
[ "\n :param str value: The value to be \"expanded\".\n :return: A generator to yield the different resulting values from expanding\n the eventual ranges present in the input value.\n\n >>> tuple(expand_ranges(\"Item [1-3] - Bla\"))\n ('Item 1 - Bla', 'Item 2 - Bla', 'Item 3 - Bla')\n >>> tuple(expand_ranges(\"X[1-10/2]Y\"))\n ('X1Y', 'X3Y', 'X5Y', 'X7Y', 'X9Y')\n >>> tuple(expand_ranges(\"[1-6/2] [1-3]\"))\n ('1 1', '1 2', '1 3', '3 1', '3 2', '3 3', '5 1', '5 2', '5 3')\n " ]
Please provide a description of the function:def generate_key_value_sequences(entry, default_value): no_one_yielded = True for value in entry.split(','): value = value.strip() if not value: continue full_match = KEY_VALUES_REGEX.match(value) if full_match is None: raise KeyValueSyntaxError("%r is an invalid key(-values) pattern" % value) key = full_match.group(1) tmp = {'KEY': key} values = full_match.group(2) if values: # there is, at least, one value provided for idx, value_match in enumerate(VALUE_REGEX.finditer(values), 1): tmp['VALUE%s' % idx] = value_match.group(1) else: # no value provided for this key, use the default provided: tmp['VALUE1'] = default_value tmp['VALUE'] = tmp['VALUE1'] # alias from VALUE -> VALUE1 for subkey in expand_ranges(key): current = tmp.copy() current['KEY'] = subkey yield current no_one_yielded = False if no_one_yielded: raise KeyValueSyntaxError('At least one key must be present')
[ "Parse a key value config entry (used in duplicate foreach)\n\n If we have a key that look like [X-Y] we will expand it into Y-X+1 keys\n\n :param str entry: The config line to be parsed.\n :param str default_value: The default value to be used when none is available.\n :return: a generator yielding dicts with 'KEY' & 'VALUE' & 'VALUE1' keys,\n with eventual others 'VALUEx' (x 1 -> N) keys.\n\n >>> rsp = list(generate_key_value_sequences(\"var$(/var)$,root $(/)$\"))\n >>> import pprint\n >>> pprint.pprint(rsp)\n [{'KEY': 'var', 'VALUE': '/var', 'VALUE1': '/var'},\n {'KEY': 'root', 'VALUE': '/', 'VALUE1': '/'}]\n " ]
Please provide a description of the function:def filter_host_by_name(name): def inner_filter(items): host = items["host"] if host is None: return False return host.host_name == name return inner_filter
[ "Filter for host\n Filter on name\n\n :param name: name to filter\n :type name: str\n :return: Filter\n :rtype: bool\n ", "Inner filter for host. Accept if host_name == name" ]
Please provide a description of the function:def filter_host_by_regex(regex): host_re = re.compile(regex) def inner_filter(items): host = items["host"] if host is None: return False return host_re.match(host.host_name) is not None return inner_filter
[ "Filter for host\n Filter on regex\n\n :param regex: regex to filter\n :type regex: str\n :return: Filter\n :rtype: bool\n ", "Inner filter for host. Accept if regex match host_name" ]
Please provide a description of the function:def filter_host_by_group(group): def inner_filter(items): host = items["host"] if host is None: return False return group in [items["hostgroups"][g].hostgroup_name for g in host.hostgroups] return inner_filter
[ "Filter for host\n Filter on group\n\n :param group: group name to filter\n :type group: str\n :return: Filter\n :rtype: bool\n ", "Inner filter for host. Accept if group in host.hostgroups" ]
Please provide a description of the function:def filter_host_by_tag(tpl): def inner_filter(items): host = items["host"] if host is None: return False return tpl in [t.strip() for t in host.tags] return inner_filter
[ "Filter for host\n Filter on tag\n\n :param tpl: tag to filter\n :type tpl: str\n :return: Filter\n :rtype: bool\n ", "Inner filter for host. Accept if tag in host.tags" ]
Please provide a description of the function:def filter_service_by_name(name): def inner_filter(items): service = items["service"] if service is None: return False return service.service_description == name return inner_filter
[ "Filter for service\n Filter on name\n\n :param name: name to filter\n :type name: str\n :return: Filter\n :rtype: bool\n ", "Inner filter for service. Accept if service_description == name" ]
Please provide a description of the function:def filter_service_by_regex_name(regex): host_re = re.compile(regex) def inner_filter(items): service = items["service"] if service is None: return False return host_re.match(service.service_description) is not None return inner_filter
[ "Filter for service\n Filter on regex\n\n :param regex: regex to filter\n :type regex: str\n :return: Filter\n :rtype: bool\n ", "Inner filter for service. Accept if regex match service_description" ]
Please provide a description of the function:def filter_service_by_host_name(host_name): def inner_filter(items): service = items["service"] host = items["hosts"][service.host] if service is None or host is None: return False return host.host_name == host_name return inner_filter
[ "Filter for service\n Filter on host_name\n\n :param host_name: host_name to filter\n :type host_name: str\n :return: Filter\n :rtype: bool\n ", "Inner filter for service. Accept if service.host.host_name == host_name" ]
Please provide a description of the function:def filter_service_by_regex_host_name(regex): host_re = re.compile(regex) def inner_filter(items): service = items["service"] host = items["hosts"][service.host] if service is None or host is None: return False return host_re.match(host.host_name) is not None return inner_filter
[ "Filter for service\n Filter on regex host_name\n\n :param regex: regex to filter\n :type regex: str\n :return: Filter\n :rtype: bool\n ", "Inner filter for service. Accept if regex match service.host.host_name" ]
Please provide a description of the function:def filter_service_by_hostgroup_name(group): def inner_filter(items): service = items["service"] host = items["hosts"][service.host] if service is None or host is None: return False return group in [items["hostgroups"][g].hostgroup_name for g in host.hostgroups] return inner_filter
[ "Filter for service\n Filter on hostgroup\n\n :param group: hostgroup to filter\n :type group: str\n :return: Filter\n :rtype: bool\n ", "Inner filter for service. Accept if hostgroup in service.host.hostgroups" ]
Please provide a description of the function:def filter_service_by_host_tag_name(tpl): def inner_filter(items): service = items["service"] host = items["hosts"][service.host] if service is None or host is None: return False return tpl in [t.strip() for t in host.tags] return inner_filter
[ "Filter for service\n Filter on tag\n\n :param tpl: tag to filter\n :type tpl: str\n :return: Filter\n :rtype: bool\n ", "Inner filter for service. Accept if tpl in service.host.tags" ]
Please provide a description of the function:def filter_service_by_servicegroup_name(group): def inner_filter(items): service = items["service"] if service is None: return False return group in [items["servicegroups"][g].servicegroup_name for g in service.servicegroups] return inner_filter
[ "Filter for service\n Filter on group\n\n :param group: group to filter\n :type group: str\n :return: Filter\n :rtype: bool\n ", "Inner filter for service. Accept if group in service.servicegroups" ]
Please provide a description of the function:def filter_host_by_bp_rule_label(label): def inner_filter(items): host = items["host"] if host is None: return False return label in host.labels return inner_filter
[ "Filter for host\n Filter on label\n\n :param label: label to filter\n :type label: str\n :return: Filter\n :rtype: bool\n ", "Inner filter for host. Accept if label in host.labels" ]
Please provide a description of the function:def filter_service_by_host_bp_rule_label(label): def inner_filter(items): service = items["service"] host = items["hosts"][service.host] if service is None or host is None: return False return label in host.labels return inner_filter
[ "Filter for service\n Filter on label\n\n :param label: label to filter\n :type label: str\n :return: Filter\n :rtype: bool\n ", "Inner filter for service. Accept if label in service.host.labels" ]
Please provide a description of the function:def filter_service_by_bp_rule_label(label): def inner_filter(items): service = items["service"] if service is None: return False return label in service.labels return inner_filter
[ "Filter for service\n Filter on label\n\n :param label: label to filter\n :type label: str\n :return: Filter\n :rtype: bool\n ", "Inner filter for service. Accept if label in service.labels" ]
Please provide a description of the function:def parse_daemon_args(arbiter=False): parser = argparse.ArgumentParser(description="Alignak version %s daemon parameters" % VERSION, epilog="And that's it!") if arbiter: parser.add_argument('-a', '--arbiter', action='append', dest='legacy_cfg_files', help='Legacy configuration file(s). ' 'This option is still available but is is preferable to declare ' 'the Nagios-like objects files in the alignak-configuration ' 'section of the environment file specified with the -e option.' 'Multiple -a can be used to include several configuration files.') parser.add_argument('-V', '--verify-config', dest='verify_only', action='store_true', help='Verify the configuration file(s) and exit') parser.add_argument('-k', '--alignak-name', dest='alignak_name', default='My Alignak', help='Set the name of the Alignak instance. If not set, the arbiter ' 'name will be used in place. Note that if an alignak_name ' 'variable is defined in the configuration, it will overwrite ' 'this parameter. ' 'For a spare arbiter, this parameter must contain its name!') parser.add_argument('-n', '--name', dest='daemon_name', default='arbiter-master', help='Daemon unique name. Must be unique for the same daemon type.') else: parser.add_argument('-n', '--name', dest='daemon_name', required=True, help='Daemon unique name. Must be unique for the same daemon type.') parser.add_argument('-c', '--config', dest='config_file', help='Daemon configuration file. ' 'Deprecated parameter, do not use it anymore!') parser.add_argument('-d', '--daemon', dest='is_daemon', default=False, action='store_true', help='Run as a daemon. Fork the launched process and daemonize.') parser.add_argument('-r', '--replace', dest='do_replace', default=False, action='store_true', help='Replace previous running daemon if any pid file is found.') parser.add_argument('-vv', '--debug', dest='debug', default=False, action='store_true', help='Set log level to debug mode (DEBUG)') parser.add_argument('-v', '--verbose', dest='verbose', default=False, action='store_true', help='Set log level to verbose mode (INFO)') parser.add_argument('-o', '--host', dest='host', help='Host interface used by the daemon. ' 'Default is 0.0.0.0 (all interfaces).') parser.add_argument('-p', '--port', dest='port', help='Port used by the daemon. ' 'Default is set according to the daemon type.') parser.add_argument('-l', '--log_file', dest='log_filename', help='File used for the daemon log. Set as empty to disable log file.') parser.add_argument('-Lm', '--log_level', dest='log_level', help='Log level: DEBUG, INFO, WARNING, ERROR or CRITICAL. ' 'Overrides the -v and -vv options.') parser.add_argument('-i', '--pid_file', dest='pid_filename', help='File used to store the daemon pid') parser.add_argument('-e', '--environment', dest='env_file', required=True, default='../../etc/alignak.ini', help='Alignak global environment file. ' 'This file defines all the daemons of this Alignak ' 'instance and their configuration. Each daemon configuration ' 'is defined in a specifc section of this file.') # parser.add_argument('env_file', # help='Alignak global environment file. ' # 'This file defines all the daemons of this Alignak ' # 'instance and their configuration. Each daemon configuration ' # 'is defined in a specifc section of this file.') return parser.parse_args()
[ "Generic parsing function for daemons\n\n All daemons:\n '-n', \"--name\": Set the name of the daemon to pick in the configuration files.\n This allows an arbiter to find its own configuration in the whole Alignak configuration\n Using this parameter is mandatory for all the daemons except for the arbiter\n (defaults to arbiter-master). If several arbiters are existing in the\n configuration this will allow to determine which one is the master/spare.\n The spare arbiter must be launched with this parameter!\n\n '-e', '--environment': Alignak environment file - the most important and mandatory\n parameter to define the name of the alignak.ini configuration file\n\n '-c', '--config': Daemon configuration file (ini file) - deprecated!\n '-d', '--daemon': Run as a daemon\n '-r', '--replace': Replace previous running daemon\n '-f', '--debugfile': File to dump debug logs.\n\n These parameters allow to override the one defined in the Alignak configuration file:\n '-o', '--host': interface the daemon will listen to\n '-p', '--port': port the daemon will listen to\n\n '-l', '--log_file': set the daemon log file name\n '-i', '--pid_file': set the daemon pid file name\n\n Arbiter only:\n \"-a\", \"--arbiter\": Monitored configuration file(s),\n (multiple -a can be used, and they will be concatenated to make a global configuration\n file) - Note that this parameter is not necessary anymore\n \"-V\", \"--verify-config\": Verify configuration file(s) and exit\n\n\n\n :param arbiter: Do we parse args for arbiter?\n :type arbiter: bool\n :return: args\n " ]
Please provide a description of the function:def manage_signal(self, sig, frame): # pylint: disable=unused-argument logger.info("worker '%s' (pid=%d) received a signal: %s", self._id, os.getpid(), SIGNALS_TO_NAMES_DICT[sig]) # Do not do anything... our master daemon is managing our termination. self.interrupted = True
[ "Manage signals caught by the process but I do not do anything...\n our master daemon is managing our termination.\n\n :param sig: signal caught by daemon\n :type sig: str\n :param frame: current stack frame\n :type frame:\n :return: None\n " ]
Please provide a description of the function:def set_exit_handler(self): signal.signal(signal.SIGINT, self.manage_signal) signal.signal(signal.SIGTERM, self.manage_signal) signal.signal(signal.SIGHUP, self.manage_signal) signal.signal(signal.SIGQUIT, self.manage_signal)
[ "Set the signal handler to manage_signal (defined in this class)\n Only set handlers for signal.SIGTERM, signal.SIGINT, signal.SIGUSR1, signal.SIGUSR2\n\n :return: None\n " ]
Please provide a description of the function:def get_new_checks(self, queue, return_queue): try: logger.debug("get_new_checks: %s / %s", len(self.checks), self.processes_by_worker) while len(self.checks) < self.processes_by_worker: msg = queue.get_nowait() if msg is None: time.sleep(0.01) continue logger.debug("Got a message: %s", msg) if msg.get_type() == 'Do': logger.debug("Got an action: %s", msg.get_data()) self.checks.append(msg.get_data()) self.actions_got += 1 elif msg.get_type() == 'ping': msg = Message(_type='pong', data='pong!', source=self._id) logger.debug("Queuing message: %s", msg) return_queue.put_nowait(msg) logger.debug("Queued") else: logger.warning("Ignoring message of type: %s", msg.get_type()) except Full: logger.warning("Actions queue is full") except Empty: logger.debug("Actions queue is empty") if not self.checks: self._idletime += 1 # Maybe the Queue() has been deleted by our master ? except (IOError, EOFError) as exp: logger.warning("My actions queue is no more available: %s", str(exp)) self.interrupted = True except Exception as exp: # pylint: disable=broad-except logger.error("Failed getting messages in actions queue: %s", str(exp)) logger.debug("get_new_checks exit")
[ "Get new checks if less than nb_checks_max\n If no new checks got and no check in queue, sleep for 1 sec\n REF: doc/alignak-action-queues.png (3)\n\n :return: None\n " ]
Please provide a description of the function:def launch_new_checks(self): # queue for chk in self.checks: if chk.status not in [ACT_STATUS_QUEUED]: continue logger.debug("Launch check: %s", chk.uuid) self._idletime = 0 self.actions_launched += 1 process = chk.execute() # Maybe we got a true big problem in the action launching if process == 'toomanyopenfiles': # We should die as soon as we return all checks logger.error("I am dying because of too many open files: %s", chk) self.i_am_dying = True else: if not isinstance(process, string_types): logger.debug("Launched check: %s, pid=%d", chk.uuid, process.pid)
[ "Launch checks that are in status\n REF: doc/alignak-action-queues.png (4)\n\n :return: None\n " ]
Please provide a description of the function:def manage_finished_checks(self, queue): to_del = [] wait_time = 1.0 now = time.time() logger.debug("--- manage finished checks") for action in self.checks: logger.debug("--- checking: last poll: %s, now: %s, wait_time: %s, action: %s", action.last_poll, now, action.wait_time, action) if action.status == ACT_STATUS_LAUNCHED and action.last_poll < now - action.wait_time: action.check_finished(self.max_plugins_output_length) wait_time = min(wait_time, action.wait_time) # If action done, we can launch a new one if action.status in [ACT_STATUS_DONE, ACT_STATUS_TIMEOUT]: logger.debug("--- check done/timeout: %s", action.uuid) self.actions_finished += 1 to_del.append(action) # We answer to our master try: msg = Message(_type='Done', data=action, source=self._id) logger.debug("Queuing message: %s", msg) queue.put_nowait(msg) except Exception as exp: # pylint: disable=broad-except logger.error("Failed putting messages in returns queue: %s", str(exp)) for chk in to_del: logger.debug("--- delete check: %s", chk.uuid) self.checks.remove(chk) # Little sleep logger.debug("--- manage finished checks terminated, I will wait: %s", wait_time) time.sleep(wait_time)
[ "Check the status of checks\n if done, return message finished :)\n REF: doc/alignak-action-queues.png (5)\n\n :return: None\n " ]
Please provide a description of the function:def check_for_system_time_change(self): # pragma: no cover, hardly testable with unit tests... now = time.time() difference = now - self.t_each_loop # Now set the new value for the tick loop self.t_each_loop = now # If we have more than 15 min time change, we need to compensate it # todo: confirm that 15 minutes is a good choice... if abs(difference) > 900: # pragma: no cover, not with unit tests... return difference return 0
[ "Check if our system time change. If so, change our\n\n :return: 0 if the difference < 900, difference else\n :rtype: int\n " ]
Please provide a description of the function:def work(self, actions_queue, returns_queue, control_queue=None): # pragma: no cover try: logger.info("[%s] (pid=%d) starting my job...", self._id, os.getpid()) self.do_work(actions_queue, returns_queue, control_queue) logger.info("[%s] (pid=%d) stopped", self._id, os.getpid()) except ActionError as exp: logger.error("[%s] exited with an ActionError exception : %s", self._id, str(exp)) logger.exception(exp) raise # Catch any exception, log the exception and exit anyway except Exception as exp: # pragma: no cover, this should never happen indeed ;) logger.error("[%s] exited with an unmanaged exception : %s", self._id, str(exp)) logger.exception(exp) raise
[ "Wrapper function for do_work in order to catch the exception\n to see the real work, look at do_work\n\n :param actions_queue: Global Queue Master->Slave\n :type actions_queue: Queue.Queue\n :param returns_queue: queue managed by manager\n :type returns_queue: Queue.Queue\n :return: None\n " ]
Please provide a description of the function:def do_work(self, actions_queue, returns_queue, control_queue=None): # pragma: no cover # restore default signal handler for the workers: # signal.signal(signal.SIGTERM, signal.SIG_DFL) self.interrupted = False self.set_exit_handler() setproctitle("alignak-%s worker %s" % (self.loaded_into, self._id)) timeout = 1.0 self.checks = [] self.t_each_loop = time.time() while True: begin = time.time() logger.debug("--- loop begin: %s", begin) # If we are dying (big problem!) we do not # take new jobs, we just finished the current one if not self.i_am_dying: # REF: doc/alignak-action-queues.png (3) self.get_new_checks(actions_queue, returns_queue) # REF: doc/alignak-action-queues.png (4) self.launch_new_checks() # REF: doc/alignak-action-queues.png (5) self.manage_finished_checks(returns_queue) logger.debug("loop middle, %d checks", len(self.checks)) # Now get order from master, if any... if control_queue: try: control_message = control_queue.get_nowait() logger.info("[%s] Got a message: %s", self._id, control_message) if control_message and control_message.get_type() == 'Die': logger.info("[%s] The master said we must die... :(", self._id) break except Full: logger.warning("Worker control queue is full") except Empty: pass except Exception as exp: # pylint: disable=broad-except logger.error("Exception when getting master orders: %s. ", str(exp)) # Maybe someone asked us to die, if so, do it :) if self.interrupted: logger.info("I die because someone asked ;)") break # Look if we are dying, and if we finish all current checks # if so, we really die, our master poller will launch a new # worker because we were too weak to manage our job :( if not self.checks and self.i_am_dying: logger.warning("I die because I cannot do my job as I should " "(too many open files?)... forgive me please.") break # Manage a possible time change (our avant will be change with the diff) diff = self.check_for_system_time_change() begin += diff logger.debug("loop check timechange: %s", diff) timeout -= time.time() - begin if timeout < 0: timeout = 1.0 else: time.sleep(0.1) logger.debug("+++ loop end: timeout = %s, idle: %s, checks: %d, " "actions (got: %d, launched: %d, finished: %d)", timeout, self._idletime, len(self.checks), self.actions_got, self.actions_launched, self.actions_finished)
[ "Main function of the worker.\n * Get checks\n * Launch new checks\n * Manage finished checks\n\n :param actions_queue: Global Queue Master->Slave\n :type actions_queue: Queue.Queue\n :param returns_queue: queue managed by manager\n :type returns_queue: Queue.Queue\n :return: None\n " ]
Please provide a description of the function:def read_requirements(filename='requirements.txt'): # allow for some leeway with the argument if not filename.startswith('requirements'): filename = 'requirements-' + filename if not os.path.splitext(filename)[1]: filename += '.txt' # no extension, add default def valid_line(line): line = line.strip() return line and not any(line.startswith(p) for p in ('#', '-')) def extract_requirement(line): egg_eq = '#egg=' if egg_eq in line: _, requirement = line.split(egg_eq, 1) return requirement return line with open(filename) as f: lines = f.readlines() return list(map(extract_requirement, filter(valid_line, lines)))
[ "Reads the list of requirements from given file.\n\n :param filename: Filename to read the requirements from.\n Uses ``'requirements.txt'`` by default.\n\n :return: Requirments as list of strings.\n " ]
Please provide a description of the function:def init_running_properties(self): for prop, entry in list(self.__class__.running_properties.items()): val = entry.default # Make a copy of the value for complex iterable types # As such, each instance has its own copy and not a simple reference setattr(self, prop, copy(val) if isinstance(val, (set, list, dict)) else val)
[ "\n Initialize the running_properties.\n Each instance have own property.\n\n :return: None\n " ]
Please provide a description of the function:def copy(self): # New dummy item with it's own running properties copied_item = self.__class__({}) # Now, copy the properties for prop in self.__class__.properties: if prop in ['uuid']: continue val = getattr(self, prop, None) if val is not None: setattr(copied_item, prop, val) # Also copy some running properties # The custom variables if hasattr(self, "customs"): copied_item.customs = copy(self.customs) # And tags/templates if hasattr(self, "tags"): copied_item.tags = copy(self.tags) if hasattr(self, "templates"): copied_item.templates = copy(self.templates) return copied_item
[ "\n Get a copy of this item but with a new id\n\n :return: copy of this object with a new id\n :rtype: object\n " ]
Please provide a description of the function:def clean(self): for prop in ('imported_from', 'use', 'plus', 'templates', 'register'): try: delattr(self, prop) except AttributeError: pass for prop in ('configuration_warnings', 'configuration_errors'): try: if getattr(self, prop, None) is not None and not getattr(self, prop): delattr(self, prop) except AttributeError: pass
[ "\n Clean properties only needed for initialization and configuration\n\n :return: None\n " ]
Please provide a description of the function:def serialize(self): cls = self.__class__ # id is not in *_properties res = {'uuid': self.uuid} for prop in cls.properties: if hasattr(self, prop) and getattr(self, prop, None) is not None: res[prop] = serialize(getattr(self, prop), True) for prop in cls.running_properties: if hasattr(self, prop) and getattr(self, prop, None) is not None: res[prop] = serialize(getattr(self, prop), True) return res
[ "This function serialize into a simple dict object.\n It is used when transferring data to other daemons over the network (http)\n\n Here is the generic function that simply export attributes declared in the\n properties dictionary and the running_properties of the object.\n\n :return: Dictionary containing key and value from properties and running_properties\n :rtype: dict\n " ]
Please provide a description of the function:def load_global_conf(cls, global_configuration): logger.debug("Propagate global parameter for %s:", cls) for prop, entry in global_configuration.properties.items(): # If some global managed configuration properties have a class_inherit clause, if not entry.managed or not getattr(entry, 'class_inherit'): continue for (cls_dest, change_name) in entry.class_inherit: if cls_dest == cls: # ok, we've got something to get value = getattr(global_configuration, prop) logger.debug("- global parameter %s=%s -> %s=%s", prop, getattr(global_configuration, prop), change_name, value) if change_name is None: setattr(cls, prop, value) else: setattr(cls, change_name, value)
[ "\n Apply global Alignak configuration.\n\n Some objects inherit some properties from the global configuration if they do not\n define their own value. E.g. the global 'accept_passive_service_checks' is inherited\n by the services as 'accept_passive_checks'\n\n :param cls: parent object\n :type cls: object\n :param global_configuration: current object (child)\n :type global_configuration: object\n :return: None\n " ]
Please provide a description of the function:def get_templates(self): use = getattr(self, 'use', '') if isinstance(use, list): return [n.strip() for n in use if n.strip()] return [n.strip() for n in use.split(',') if n.strip()]
[ "\n Get list of templates this object use\n\n :return: list of templates\n :rtype: list\n " ]
Please provide a description of the function:def get_all_plus_and_delete(self): res = {} props = list(self.plus.keys()) # we delete entries, so no for ... in ... for prop in props: res[prop] = self.get_plus_and_delete(prop) return res
[ "\n Get all self.plus items of list. We copy it, delete the original and return the copy list\n\n :return: list of self.plus\n :rtype: list\n " ]
Please provide a description of the function:def get_plus_and_delete(self, prop): val = self.plus[prop] del self.plus[prop] return val
[ "\n get a copy of the property (parameter) in self.plus, delete the original and return the\n value of copy\n\n :param prop: a property\n :type prop: str\n :return: return the value of the property\n :rtype: str\n " ]
Please provide a description of the function:def add_error(self, txt): self.configuration_errors.append(txt) self.conf_is_correct = False
[ "Add a message in the configuration errors list so we can print them\n all in one place\n\n Set the object configuration as not correct\n\n :param txt: error message\n :type txt: str\n :return: None\n " ]
Please provide a description of the function:def is_correct(self): state = self.conf_is_correct properties = self.__class__.properties for prop, entry in list(properties.items()): if hasattr(self, 'special_properties') and prop in getattr(self, 'special_properties'): continue if not hasattr(self, prop) and entry.required: msg = "[%s::%s] %s property is missing" % (self.my_type, self.get_name(), prop) self.add_error(msg) state = state & self.conf_is_correct return state
[ "\n Check if this object is correct\n\n This function:\n - checks if the required properties are defined, ignoring special_properties if some exist\n - logs the previously found warnings and errors\n\n :return: True if it's correct, otherwise False\n :rtype: bool\n " ]
Please provide a description of the function:def old_properties_names_to_new(self): old_properties = getattr(self.__class__, "old_properties", {}) for old_name, new_name in list(old_properties.items()): # Ok, if we got old_name and NO new name, # we switch the name if hasattr(self, old_name) and not hasattr(self, new_name): value = getattr(self, old_name) setattr(self, new_name, value) delattr(self, old_name)
[ "\n This function is used by service and hosts to transform Nagios2 parameters to Nagios3\n ones, like normal_check_interval to check_interval. There is a old_parameters tab\n in Classes that give such modifications to do.\n\n :return: None\n " ]
Please provide a description of the function:def get_raw_import_values(self): # pragma: no cover, never used res = {} properties = list(self.__class__.properties.keys()) # Register is not by default in the properties if 'register' not in properties: properties.append('register') for prop in properties: if hasattr(self, prop): val = getattr(self, prop) res[prop] = val return res
[ "\n Get properties => values of this object\n\n TODO: never called anywhere, still useful?\n\n :return: dictionary of properties => values\n :rtype: dict\n " ]
Please provide a description of the function:def del_downtime(self, downtime_id): if downtime_id in self.downtimes: self.downtimes[downtime_id].can_be_deleted = True del self.downtimes[downtime_id]
[ "\n Delete a downtime in this object\n\n :param downtime_id: id of the downtime to delete\n :type downtime_id: int\n :return: None\n " ]
Please provide a description of the function:def get_property_value_for_brok(self, prop, tab): entry = tab[prop] # Get the current value, or the default if need value = getattr(self, prop, entry.default) # Apply brok_transformation if need # Look if we must preprocess the value first pre_op = entry.brok_transformation if pre_op is not None: value = pre_op(self, value) return value
[ "\n Get the property of an object and brok_transformation if needed and return the value\n\n :param prop: property name\n :type prop: str\n :param tab: object with all properties of an object\n :type tab: object\n :return: value of the property original or brok converted\n :rtype: str\n " ]
Please provide a description of the function:def fill_data_brok_from(self, data, brok_type): cls = self.__class__ # Configuration properties for prop, entry in list(cls.properties.items()): # Is this property intended for broking? if brok_type in entry.fill_brok: data[prop] = self.get_property_value_for_brok(prop, cls.properties) # And the running properties if hasattr(cls, 'running_properties'): # We've got prop in running_properties too for prop, entry in list(cls.running_properties.items()): # if 'fill_brok' in cls.running_properties[prop]: if brok_type in entry.fill_brok: data[prop] = self.get_property_value_for_brok(prop, cls.running_properties)
[ "\n Add properties to 'data' parameter with properties of this object when 'brok_type'\n parameter is defined in fill_brok of these properties\n\n :param data: object to fill\n :type data: object\n :param brok_type: name of brok_type\n :type brok_type: var\n :return: None\n " ]
Please provide a description of the function:def get_initial_status_brok(self, extra=None): data = {'uuid': self.uuid} self.fill_data_brok_from(data, 'full_status') if extra: data.update(extra) return Brok({'type': 'initial_' + self.my_type + '_status', 'data': data})
[ "\n Create an initial status brok\n\n :param extra: some extra information to be added in the brok data\n :type extra: dict\n :return: Brok object\n :rtype: alignak.Brok\n " ]
Please provide a description of the function:def get_update_status_brok(self): data = {'uuid': self.uuid} self.fill_data_brok_from(data, 'full_status') return Brok({'type': 'update_' + self.my_type + '_status', 'data': data})
[ "\n Create an update item brok\n\n :return: Brok object\n :rtype: alignak.Brok\n " ]
Please provide a description of the function:def get_check_result_brok(self): data = {'uuid': self.uuid} self.fill_data_brok_from(data, 'check_result') return Brok({'type': self.my_type + '_check_result', 'data': data})
[ "\n Create check_result brok\n\n :return: Brok object\n :rtype: alignak.Brok\n " ]
Please provide a description of the function:def get_next_schedule_brok(self): data = {'uuid': self.uuid} self.fill_data_brok_from(data, 'next_schedule') return Brok({'type': self.my_type + '_next_schedule', 'data': data})
[ "\n Create next_schedule (next check) brok\n\n :return: Brok object\n :rtype: alignak.Brok\n " ]
Please provide a description of the function:def get_snapshot_brok(self, snap_output, exit_status): data = { 'uuid': self.uuid, 'snapshot_output': snap_output, 'snapshot_time': int(time.time()), 'snapshot_exit_status': exit_status, } self.fill_data_brok_from(data, 'check_result') return Brok({'type': self.my_type + '_snapshot', 'data': data})
[ "\n Create snapshot (check_result type) brok\n\n :param snap_output: value of output\n :type snap_output: str\n :param exit_status: status of exit\n :type exit_status: integer\n :return: Brok object\n :rtype: alignak.Brok\n " ]
Please provide a description of the function:def dump(self, dump_file_name=None): # pragma: no cover, never called # pylint: disable=unused-argument dump = {} for prop in self.properties: if not hasattr(self, prop): continue attr = getattr(self, prop) if isinstance(attr, list) and attr and isinstance(attr[0], Item): dump[prop] = [i.dump() for i in attr] elif isinstance(attr, Item): dump[prop] = attr.dump() elif attr: dump[prop] = getattr(self, prop) return dump
[ "\n Dump Item object properties\n\n :return: dictionary with properties\n :rtype: dict\n " ]
Please provide a description of the function:def add_items(self, items, index_items): count_templates = 0 count_items = 0 generated_items = [] for item in items: if item.is_tpl(): self.add_template(item) count_templates = count_templates + 1 else: new_items = self.add_item(item, index_items) count_items = count_items + max(1, len(new_items)) if new_items: generated_items.extend(new_items) if count_templates: logger.info(' indexed %d template(s)', count_templates) if count_items: logger.info(' created %d %s(s).', count_items, self.inner_class.my_type)
[ "\n Add items to template if is template, else add in item list\n\n :param items: items list to add\n :type items: alignak.objects.item.Items\n :param index_items: Flag indicating if the items should be indexed on the fly.\n :type index_items: bool\n :return: None\n " ]
Please provide a description of the function:def manage_conflict(self, item, name): if item.is_tpl(): existing = self.name_to_template[name] else: existing = self.name_to_item[name] if existing == item: return item existing_prio = getattr( existing, "definition_order", existing.properties["definition_order"].default) item_prio = getattr( item, "definition_order", item.properties["definition_order"].default) if existing_prio < item_prio: # Existing item has lower priority, so it has precedence. return existing if existing_prio > item_prio: # New item has lower priority, so it has precedence. # Existing item will be deleted below pass else: # Don't know which one to keep, lastly defined has precedence objcls = getattr(self.inner_class, "my_type", "[unknown]") mesg = "duplicate %s '%s', from: '%s' and '%s', using lastly defined. " \ "You may manually set the definition_order parameter to avoid this message." \ % (objcls, name, item.imported_from, existing.imported_from) item.configuration_warnings.append(mesg) if item.is_tpl(): self.remove_template(existing) else: self.remove_item(existing) return item
[ "\n Checks if an object holding the same name already exists in the index.\n\n If so, it compares their definition order: the lowest definition order\n is kept. If definition order equal, an error is risen.Item\n\n The method returns the item that should be added after it has decided\n which one should be kept.\n\n If the new item has precedence over the New existing one, the\n existing is removed for the new to replace it.\n\n :param item: object to check for conflict\n :type item: alignak.objects.item.Item\n :param name: name of the object\n :type name: str\n :return: 'item' parameter modified\n :rtype: object\n " ]
Please provide a description of the function:def add_template(self, tpl): tpl = self.index_template(tpl) self.templates[tpl.uuid] = tpl
[ "\n Add and index a template into the `templates` container.\n\n :param tpl: The template to add\n :type tpl: alignak.objects.item.Item\n :return: None\n " ]
Please provide a description of the function:def index_template(self, tpl): objcls = self.inner_class.my_type name = getattr(tpl, 'name', '') if not name: mesg = "a %s template has been defined without name, from: %s" % \ (objcls, tpl.imported_from) tpl.add_error(mesg) elif name in self.name_to_template: tpl = self.manage_conflict(tpl, name) self.name_to_template[name] = tpl logger.debug("Indexed a %s template: %s, uses: %s", tpl.my_type, name, getattr(tpl, 'use', 'Nothing')) return tpl
[ "\n Indexes a template by `name` into the `name_to_template` dictionary.\n\n :param tpl: The template to index\n :type tpl: alignak.objects.item.Item\n :return: None\n " ]
Please provide a description of the function:def remove_template(self, tpl): try: del self.templates[tpl.uuid] except KeyError: # pragma: no cover, simple protection pass self.unindex_template(tpl)
[ "\n Removes and un-index a template from the `templates` container.\n\n :param tpl: The template to remove\n :type tpl: alignak.objects.item.Item\n :return: None\n " ]
Please provide a description of the function:def unindex_template(self, tpl): name = getattr(tpl, 'name', '') try: del self.name_to_template[name] except KeyError: # pragma: no cover, simple protection pass
[ "\n Unindex a template from the `templates` container.\n\n :param tpl: The template to un-index\n :type tpl: alignak.objects.item.Item\n :return: None\n " ]
Please provide a description of the function:def add_item(self, item, index=True): # pylint: disable=too-many-branches, too-many-locals, too-many-nested-blocks name_property = getattr(self.__class__, "name_property", None) # Check if some hosts are to be self-generated... generated_hosts = [] if name_property: name = getattr(item, name_property, None) if name and '[' in name and ']' in name: # We can create several objects from the same configuration! pattern = name[name.find("[")+1:name.find("]")] if '-' in pattern: logger.debug("Found an host with a patterned name: %s", pattern) # pattern is format-min-max # format is optional limits = pattern.split('-') fmt = "%d" min_v = 1 max_v = 1 if len(limits) == 3: fmt = limits[2] new_name = name.replace('[%s-%s-%s]' % (limits[0], limits[1], fmt), '***') else: new_name = name.replace('[%s-%s]' % (limits[0], limits[1]), '***') try: min_v = int(limits[0]) except ValueError: pass try: max_v = int(limits[1]) except ValueError: pass for idx in range(min_v, max_v + 1): logger.debug("- cloning host: %s", new_name.replace('***', fmt % idx)) new_host = deepcopy(item) new_host.uuid = get_a_new_object_id() new_host.host_name = new_name.replace('***', fmt % idx) # Update some fields with the newly generated host name for prop in ['display_name', 'alias', 'notes', 'notes_url', 'action_url']: if getattr(new_host, prop, None) is None: continue value = getattr(new_host, prop) if '$HOSTNAME$' in value: setattr(new_host, prop, value.replace('$HOSTNAME$', new_host.host_name)) generated_hosts.append(new_host) if generated_hosts: for new_host in generated_hosts: if index is True: new_host = self.index_item(new_host) self.items[new_host.uuid] = new_host logger.info(" cloned %d hosts from %s", len(generated_hosts), item.get_name()) else: if index is True and name_property: item = self.index_item(item) self.items[item.uuid] = item return generated_hosts
[ "\n Add an item into our containers, and index it depending on the `index` flag.\n\n :param item: object to add\n :type item: alignak.objects.item.Item\n :param index: Flag indicating if the item should be indexed\n :type index: bool\n :return: the new items created\n :rtype list\n " ]
Please provide a description of the function:def remove_item(self, item): self.unindex_item(item) self.items.pop(item.uuid, None)
[ "\n Remove (and un-index) an object\n\n :param item: object to remove\n :type item: alignak.objects.item.Item\n :return: None\n " ]
Please provide a description of the function:def index_item(self, item): name_property = getattr(self.__class__, "name_property", None) if name_property is None: return None name = getattr(item, name_property, None) if name is None: item.add_error("a %s item has been defined without %s, from: %s" % (self.inner_class.my_type, name_property, getattr(item, 'imported_from', 'Unknown importation source!'))) elif name in self.name_to_item: item = self.manage_conflict(item, name) self.name_to_item[name] = item return item
[ "\n Index an item into our `name_to_item` dictionary.\n If an object holding the same item's name/key already exists in the index\n then the conflict is managed by the `manage_conflict` method.\n\n :param item: item to index\n :type item: alignak.objects.item.Item\n :return: item modified\n :rtype: object\n " ]
Please provide a description of the function:def unindex_item(self, item): name_property = getattr(self.__class__, "name_property", None) if name_property is None: return name = getattr(item, name_property, None) if name is None: return self.name_to_item.pop(name, None)
[ "\n Un-index an item from our name_to_item dict.\n :param item: the item to un-index\n :type item: alignak.objects.item.Item\n :return: None\n " ]
Please provide a description of the function:def old_properties_names_to_new(self): # pragma: no cover, never called for i in itertools.chain(iter(list(self.items.values())), iter(list(self.templates.values()))): i.old_properties_names_to_new()
[ "Convert old Nagios2 names to Nagios3 new names\n\n TODO: still useful?\n\n :return: None\n " ]
Please provide a description of the function:def get_all_tags(self, item): all_tags = item.get_templates() for template_id in item.templates: template = self.templates[template_id] all_tags.append(template.name) all_tags.extend(self.get_all_tags(template)) return list(set(all_tags))
[ "\n Get all tags of an item\n\n :param item: an item\n :type item: Item\n :return: list of tags\n :rtype: list\n " ]
Please provide a description of the function:def linkify_item_templates(self, item): tpls = [] tpl_names = item.get_templates() for name in tpl_names: template = self.find_tpl_by_name(name) if not template: # TODO: Check if this should not be better to report as an error ? self.add_warning("%s %s use/inherit from an unknown template: %s ! from: %s" % (type(item).__name__, item.get_name(), name, item.imported_from)) else: if template is item: self.add_error("%s %s use/inherits from itself ! from: %s" % (type(item).__name__, item._get_name(), item.imported_from)) else: tpls.append(template.uuid) item.templates = tpls
[ "\n Link templates\n\n :param item: an item\n :type item: alignak.objects.item.Item\n :return: None\n " ]