repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
Alignak-monitoring/alignak
|
alignak/objects/servicedependency.py
|
Servicedependencies.linkify_sd_by_tp
|
def linkify_sd_by_tp(self, timeperiods):
"""Replace dependency_period by a real object in service dependency
:param timeperiods: list of timeperiod, used to look for a specific one
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
"""
for servicedep in self:
try:
tp_name = servicedep.dependency_period
timeperiod = timeperiods.find_by_name(tp_name)
if timeperiod:
servicedep.dependency_period = timeperiod.uuid
else:
servicedep.dependency_period = ''
except AttributeError as exp:
logger.error("[servicedependency] fail to linkify by timeperiods: %s", exp)
|
python
|
def linkify_sd_by_tp(self, timeperiods):
"""Replace dependency_period by a real object in service dependency
:param timeperiods: list of timeperiod, used to look for a specific one
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
"""
for servicedep in self:
try:
tp_name = servicedep.dependency_period
timeperiod = timeperiods.find_by_name(tp_name)
if timeperiod:
servicedep.dependency_period = timeperiod.uuid
else:
servicedep.dependency_period = ''
except AttributeError as exp:
logger.error("[servicedependency] fail to linkify by timeperiods: %s", exp)
|
[
"def",
"linkify_sd_by_tp",
"(",
"self",
",",
"timeperiods",
")",
":",
"for",
"servicedep",
"in",
"self",
":",
"try",
":",
"tp_name",
"=",
"servicedep",
".",
"dependency_period",
"timeperiod",
"=",
"timeperiods",
".",
"find_by_name",
"(",
"tp_name",
")",
"if",
"timeperiod",
":",
"servicedep",
".",
"dependency_period",
"=",
"timeperiod",
".",
"uuid",
"else",
":",
"servicedep",
".",
"dependency_period",
"=",
"''",
"except",
"AttributeError",
"as",
"exp",
":",
"logger",
".",
"error",
"(",
"\"[servicedependency] fail to linkify by timeperiods: %s\"",
",",
"exp",
")"
] |
Replace dependency_period by a real object in service dependency
:param timeperiods: list of timeperiod, used to look for a specific one
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
|
[
"Replace",
"dependency_period",
"by",
"a",
"real",
"object",
"in",
"service",
"dependency"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/servicedependency.py#L381-L397
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/servicedependency.py
|
Servicedependencies.linkify_s_by_sd
|
def linkify_s_by_sd(self, services):
"""Add dependency in service objects
:return: None
"""
for servicedep in self:
# Only used for debugging purpose when loops are detected
setattr(servicedep, "service_description_string", "undefined")
setattr(servicedep, "dependent_service_description_string", "undefined")
if getattr(servicedep, 'service_description', None) is None or\
getattr(servicedep, 'dependent_service_description', None) is None:
continue
services.add_act_dependency(servicedep.dependent_service_description,
servicedep.service_description,
servicedep.notification_failure_criteria,
getattr(servicedep, 'dependency_period', ''),
servicedep.inherits_parent)
services.add_chk_dependency(servicedep.dependent_service_description,
servicedep.service_description,
servicedep.execution_failure_criteria,
getattr(servicedep, 'dependency_period', ''),
servicedep.inherits_parent)
# Only used for debugging purpose when loops are detected
setattr(servicedep, "service_description_string",
services[servicedep.service_description].get_name())
setattr(servicedep, "dependent_service_description_string",
services[servicedep.dependent_service_description].get_name())
|
python
|
def linkify_s_by_sd(self, services):
"""Add dependency in service objects
:return: None
"""
for servicedep in self:
# Only used for debugging purpose when loops are detected
setattr(servicedep, "service_description_string", "undefined")
setattr(servicedep, "dependent_service_description_string", "undefined")
if getattr(servicedep, 'service_description', None) is None or\
getattr(servicedep, 'dependent_service_description', None) is None:
continue
services.add_act_dependency(servicedep.dependent_service_description,
servicedep.service_description,
servicedep.notification_failure_criteria,
getattr(servicedep, 'dependency_period', ''),
servicedep.inherits_parent)
services.add_chk_dependency(servicedep.dependent_service_description,
servicedep.service_description,
servicedep.execution_failure_criteria,
getattr(servicedep, 'dependency_period', ''),
servicedep.inherits_parent)
# Only used for debugging purpose when loops are detected
setattr(servicedep, "service_description_string",
services[servicedep.service_description].get_name())
setattr(servicedep, "dependent_service_description_string",
services[servicedep.dependent_service_description].get_name())
|
[
"def",
"linkify_s_by_sd",
"(",
"self",
",",
"services",
")",
":",
"for",
"servicedep",
"in",
"self",
":",
"# Only used for debugging purpose when loops are detected",
"setattr",
"(",
"servicedep",
",",
"\"service_description_string\"",
",",
"\"undefined\"",
")",
"setattr",
"(",
"servicedep",
",",
"\"dependent_service_description_string\"",
",",
"\"undefined\"",
")",
"if",
"getattr",
"(",
"servicedep",
",",
"'service_description'",
",",
"None",
")",
"is",
"None",
"or",
"getattr",
"(",
"servicedep",
",",
"'dependent_service_description'",
",",
"None",
")",
"is",
"None",
":",
"continue",
"services",
".",
"add_act_dependency",
"(",
"servicedep",
".",
"dependent_service_description",
",",
"servicedep",
".",
"service_description",
",",
"servicedep",
".",
"notification_failure_criteria",
",",
"getattr",
"(",
"servicedep",
",",
"'dependency_period'",
",",
"''",
")",
",",
"servicedep",
".",
"inherits_parent",
")",
"services",
".",
"add_chk_dependency",
"(",
"servicedep",
".",
"dependent_service_description",
",",
"servicedep",
".",
"service_description",
",",
"servicedep",
".",
"execution_failure_criteria",
",",
"getattr",
"(",
"servicedep",
",",
"'dependency_period'",
",",
"''",
")",
",",
"servicedep",
".",
"inherits_parent",
")",
"# Only used for debugging purpose when loops are detected",
"setattr",
"(",
"servicedep",
",",
"\"service_description_string\"",
",",
"services",
"[",
"servicedep",
".",
"service_description",
"]",
".",
"get_name",
"(",
")",
")",
"setattr",
"(",
"servicedep",
",",
"\"dependent_service_description_string\"",
",",
"services",
"[",
"servicedep",
".",
"dependent_service_description",
"]",
".",
"get_name",
"(",
")",
")"
] |
Add dependency in service objects
:return: None
|
[
"Add",
"dependency",
"in",
"service",
"objects"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/servicedependency.py#L399-L429
|
train
|
Alignak-monitoring/alignak
|
alignak/modules/inner_metrics.py
|
InnerMetrics.init
|
def init(self): # pylint: disable=too-many-branches
"""Called by the daemon broker to initialize the module"""
if not self.enabled:
logger.info(" the module is disabled.")
return True
try:
connections = self.test_connection()
except Exception as exp: # pylint: disable=broad-except
logger.error("initialization, test connection failed. Error: %s", str(exp))
if self.influxdb_enabled:
try:
# Check that configured TSDB is existing, else creates...
dbs = self.influx.get_list_database()
for db in dbs:
if db.get('name') == self.influxdb_database:
logger.info("the database %s is existing.", self.influxdb_database)
break
else:
# Create the database
logger.info("creating database %s...", self.influxdb_database)
self.influx.create_database(self.influxdb_database)
# Check that configured TSDB retention is existing, else creates...
if self.influxdb_retention_name:
rps = self.influx.get_list_retention_policies()
for rp in rps:
if rp.get('name') == self.influxdb_retention_name:
logger.info("the retention policy %s is existing.",
self.influxdb_retention_name)
break
else:
# Create a retention policy for this database
logger.info("creating database retention policy: %s - %s - %s...",
self.influxdb_retention_name, self.influxdb_retention_duration,
self.influxdb_retention_replication)
self.influx.create_retention_policy(
self.influxdb_retention_name, self.influxdb_retention_duration,
self.influxdb_retention_replication, database=self.influxdb_database)
# Check that configured TSDB user is existing, else creates...
if self.influxdb_username:
users = self.influx.get_list_users()
for user in users:
if user.get('user') == self.influxdb_username:
logger.info("the user %s is existing.",
self.influxdb_username)
break
else:
# Create a retention policy for this database
logger.info("creating user: %s...", self.influxdb_username)
self.influx.create_user(self.influxdb_username, self.influxdb_password,
admin=False)
connections = connections or True
except Exception as exp: # pylint: disable=broad-except
logger.error("InfluxDB, DB initialization failed. Error: %s", str(exp))
return connections
|
python
|
def init(self): # pylint: disable=too-many-branches
"""Called by the daemon broker to initialize the module"""
if not self.enabled:
logger.info(" the module is disabled.")
return True
try:
connections = self.test_connection()
except Exception as exp: # pylint: disable=broad-except
logger.error("initialization, test connection failed. Error: %s", str(exp))
if self.influxdb_enabled:
try:
# Check that configured TSDB is existing, else creates...
dbs = self.influx.get_list_database()
for db in dbs:
if db.get('name') == self.influxdb_database:
logger.info("the database %s is existing.", self.influxdb_database)
break
else:
# Create the database
logger.info("creating database %s...", self.influxdb_database)
self.influx.create_database(self.influxdb_database)
# Check that configured TSDB retention is existing, else creates...
if self.influxdb_retention_name:
rps = self.influx.get_list_retention_policies()
for rp in rps:
if rp.get('name') == self.influxdb_retention_name:
logger.info("the retention policy %s is existing.",
self.influxdb_retention_name)
break
else:
# Create a retention policy for this database
logger.info("creating database retention policy: %s - %s - %s...",
self.influxdb_retention_name, self.influxdb_retention_duration,
self.influxdb_retention_replication)
self.influx.create_retention_policy(
self.influxdb_retention_name, self.influxdb_retention_duration,
self.influxdb_retention_replication, database=self.influxdb_database)
# Check that configured TSDB user is existing, else creates...
if self.influxdb_username:
users = self.influx.get_list_users()
for user in users:
if user.get('user') == self.influxdb_username:
logger.info("the user %s is existing.",
self.influxdb_username)
break
else:
# Create a retention policy for this database
logger.info("creating user: %s...", self.influxdb_username)
self.influx.create_user(self.influxdb_username, self.influxdb_password,
admin=False)
connections = connections or True
except Exception as exp: # pylint: disable=broad-except
logger.error("InfluxDB, DB initialization failed. Error: %s", str(exp))
return connections
|
[
"def",
"init",
"(",
"self",
")",
":",
"# pylint: disable=too-many-branches",
"if",
"not",
"self",
".",
"enabled",
":",
"logger",
".",
"info",
"(",
"\" the module is disabled.\"",
")",
"return",
"True",
"try",
":",
"connections",
"=",
"self",
".",
"test_connection",
"(",
")",
"except",
"Exception",
"as",
"exp",
":",
"# pylint: disable=broad-except",
"logger",
".",
"error",
"(",
"\"initialization, test connection failed. Error: %s\"",
",",
"str",
"(",
"exp",
")",
")",
"if",
"self",
".",
"influxdb_enabled",
":",
"try",
":",
"# Check that configured TSDB is existing, else creates...",
"dbs",
"=",
"self",
".",
"influx",
".",
"get_list_database",
"(",
")",
"for",
"db",
"in",
"dbs",
":",
"if",
"db",
".",
"get",
"(",
"'name'",
")",
"==",
"self",
".",
"influxdb_database",
":",
"logger",
".",
"info",
"(",
"\"the database %s is existing.\"",
",",
"self",
".",
"influxdb_database",
")",
"break",
"else",
":",
"# Create the database",
"logger",
".",
"info",
"(",
"\"creating database %s...\"",
",",
"self",
".",
"influxdb_database",
")",
"self",
".",
"influx",
".",
"create_database",
"(",
"self",
".",
"influxdb_database",
")",
"# Check that configured TSDB retention is existing, else creates...",
"if",
"self",
".",
"influxdb_retention_name",
":",
"rps",
"=",
"self",
".",
"influx",
".",
"get_list_retention_policies",
"(",
")",
"for",
"rp",
"in",
"rps",
":",
"if",
"rp",
".",
"get",
"(",
"'name'",
")",
"==",
"self",
".",
"influxdb_retention_name",
":",
"logger",
".",
"info",
"(",
"\"the retention policy %s is existing.\"",
",",
"self",
".",
"influxdb_retention_name",
")",
"break",
"else",
":",
"# Create a retention policy for this database",
"logger",
".",
"info",
"(",
"\"creating database retention policy: %s - %s - %s...\"",
",",
"self",
".",
"influxdb_retention_name",
",",
"self",
".",
"influxdb_retention_duration",
",",
"self",
".",
"influxdb_retention_replication",
")",
"self",
".",
"influx",
".",
"create_retention_policy",
"(",
"self",
".",
"influxdb_retention_name",
",",
"self",
".",
"influxdb_retention_duration",
",",
"self",
".",
"influxdb_retention_replication",
",",
"database",
"=",
"self",
".",
"influxdb_database",
")",
"# Check that configured TSDB user is existing, else creates...",
"if",
"self",
".",
"influxdb_username",
":",
"users",
"=",
"self",
".",
"influx",
".",
"get_list_users",
"(",
")",
"for",
"user",
"in",
"users",
":",
"if",
"user",
".",
"get",
"(",
"'user'",
")",
"==",
"self",
".",
"influxdb_username",
":",
"logger",
".",
"info",
"(",
"\"the user %s is existing.\"",
",",
"self",
".",
"influxdb_username",
")",
"break",
"else",
":",
"# Create a retention policy for this database",
"logger",
".",
"info",
"(",
"\"creating user: %s...\"",
",",
"self",
".",
"influxdb_username",
")",
"self",
".",
"influx",
".",
"create_user",
"(",
"self",
".",
"influxdb_username",
",",
"self",
".",
"influxdb_password",
",",
"admin",
"=",
"False",
")",
"connections",
"=",
"connections",
"or",
"True",
"except",
"Exception",
"as",
"exp",
":",
"# pylint: disable=broad-except",
"logger",
".",
"error",
"(",
"\"InfluxDB, DB initialization failed. Error: %s\"",
",",
"str",
"(",
"exp",
")",
")",
"return",
"connections"
] |
Called by the daemon broker to initialize the module
|
[
"Called",
"by",
"the",
"daemon",
"broker",
"to",
"initialize",
"the",
"module"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/modules/inner_metrics.py#L235-L294
|
train
|
Alignak-monitoring/alignak
|
alignak/modules/inner_metrics.py
|
InnerMetrics.get_metrics_from_perfdata
|
def get_metrics_from_perfdata(self, service, perf_data):
"""Decode the performance data to build a metrics list"""
result = []
metrics = PerfDatas(perf_data)
for metric in metrics:
logger.debug("service: %s, metric: %s (%s)", service, metric, metric.__dict__)
if metric.name in ['time']:
metric.name = "duration"
name = sanitize_name(metric.name)
name = self.multiple_values.sub(r'.\1', name)
if not name:
continue
# get metric value and its thresholds values if they exist
name_value = {
name: metric.value,
'uom_' + name: metric.uom
}
# Get or ignore extra values depending upon module configuration
if metric.warning and self.send_warning:
name_value[name + '_warn'] = metric.warning
if metric.critical and self.send_critical:
name_value[name + '_crit'] = metric.critical
if metric.min and self.send_min:
name_value[name + '_min'] = metric.min
if metric.max and self.send_max:
name_value[name + '_max'] = metric.max
for key, value in name_value.items():
result.append((key, value, metric.uom))
logger.debug("Metrics: %s - %s", service, result)
return result
|
python
|
def get_metrics_from_perfdata(self, service, perf_data):
"""Decode the performance data to build a metrics list"""
result = []
metrics = PerfDatas(perf_data)
for metric in metrics:
logger.debug("service: %s, metric: %s (%s)", service, metric, metric.__dict__)
if metric.name in ['time']:
metric.name = "duration"
name = sanitize_name(metric.name)
name = self.multiple_values.sub(r'.\1', name)
if not name:
continue
# get metric value and its thresholds values if they exist
name_value = {
name: metric.value,
'uom_' + name: metric.uom
}
# Get or ignore extra values depending upon module configuration
if metric.warning and self.send_warning:
name_value[name + '_warn'] = metric.warning
if metric.critical and self.send_critical:
name_value[name + '_crit'] = metric.critical
if metric.min and self.send_min:
name_value[name + '_min'] = metric.min
if metric.max and self.send_max:
name_value[name + '_max'] = metric.max
for key, value in name_value.items():
result.append((key, value, metric.uom))
logger.debug("Metrics: %s - %s", service, result)
return result
|
[
"def",
"get_metrics_from_perfdata",
"(",
"self",
",",
"service",
",",
"perf_data",
")",
":",
"result",
"=",
"[",
"]",
"metrics",
"=",
"PerfDatas",
"(",
"perf_data",
")",
"for",
"metric",
"in",
"metrics",
":",
"logger",
".",
"debug",
"(",
"\"service: %s, metric: %s (%s)\"",
",",
"service",
",",
"metric",
",",
"metric",
".",
"__dict__",
")",
"if",
"metric",
".",
"name",
"in",
"[",
"'time'",
"]",
":",
"metric",
".",
"name",
"=",
"\"duration\"",
"name",
"=",
"sanitize_name",
"(",
"metric",
".",
"name",
")",
"name",
"=",
"self",
".",
"multiple_values",
".",
"sub",
"(",
"r'.\\1'",
",",
"name",
")",
"if",
"not",
"name",
":",
"continue",
"# get metric value and its thresholds values if they exist",
"name_value",
"=",
"{",
"name",
":",
"metric",
".",
"value",
",",
"'uom_'",
"+",
"name",
":",
"metric",
".",
"uom",
"}",
"# Get or ignore extra values depending upon module configuration",
"if",
"metric",
".",
"warning",
"and",
"self",
".",
"send_warning",
":",
"name_value",
"[",
"name",
"+",
"'_warn'",
"]",
"=",
"metric",
".",
"warning",
"if",
"metric",
".",
"critical",
"and",
"self",
".",
"send_critical",
":",
"name_value",
"[",
"name",
"+",
"'_crit'",
"]",
"=",
"metric",
".",
"critical",
"if",
"metric",
".",
"min",
"and",
"self",
".",
"send_min",
":",
"name_value",
"[",
"name",
"+",
"'_min'",
"]",
"=",
"metric",
".",
"min",
"if",
"metric",
".",
"max",
"and",
"self",
".",
"send_max",
":",
"name_value",
"[",
"name",
"+",
"'_max'",
"]",
"=",
"metric",
".",
"max",
"for",
"key",
",",
"value",
"in",
"name_value",
".",
"items",
"(",
")",
":",
"result",
".",
"append",
"(",
"(",
"key",
",",
"value",
",",
"metric",
".",
"uom",
")",
")",
"logger",
".",
"debug",
"(",
"\"Metrics: %s - %s\"",
",",
"service",
",",
"result",
")",
"return",
"result"
] |
Decode the performance data to build a metrics list
|
[
"Decode",
"the",
"performance",
"data",
"to",
"build",
"a",
"metrics",
"list"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/modules/inner_metrics.py#L364-L402
|
train
|
Alignak-monitoring/alignak
|
alignak/modules/inner_metrics.py
|
InnerMetrics.send_to_tsdb
|
def send_to_tsdb(self, realm, host, service, metrics, ts, path):
"""Send performance data to time series database
Indeed this function stores metrics in the internal cache and checks if the flushing
is necessary and then flushes.
:param realm: concerned realm
:type: string
:param host: concerned host
:type: string
:param service: concerned service
:type: string
:param metrics: list of metrics couple (name, value)
:type: list
:param ts: timestamp
:type: int
:param path: full path (eg. Graphite) for the received metrics
:type: string
"""
if ts is None:
ts = int(time.time())
data = {
"measurement": service,
"tags": {
"host": host,
"service": service,
"realm": '.'.join(realm) if isinstance(realm, list) else realm,
"path": path
},
"time": ts,
"fields": {}
}
if path is not None:
data['tags'].update({"path": path})
for metric, value, _ in metrics:
data['fields'].update({metric: value})
# Flush if necessary
logger.debug("Data: %s", data)
self.my_metrics.append(data)
if self.metrics_count >= self.metrics_flush_count:
# self.carbon.add_data_list(self.my_metrics)
self.flush()
|
python
|
def send_to_tsdb(self, realm, host, service, metrics, ts, path):
"""Send performance data to time series database
Indeed this function stores metrics in the internal cache and checks if the flushing
is necessary and then flushes.
:param realm: concerned realm
:type: string
:param host: concerned host
:type: string
:param service: concerned service
:type: string
:param metrics: list of metrics couple (name, value)
:type: list
:param ts: timestamp
:type: int
:param path: full path (eg. Graphite) for the received metrics
:type: string
"""
if ts is None:
ts = int(time.time())
data = {
"measurement": service,
"tags": {
"host": host,
"service": service,
"realm": '.'.join(realm) if isinstance(realm, list) else realm,
"path": path
},
"time": ts,
"fields": {}
}
if path is not None:
data['tags'].update({"path": path})
for metric, value, _ in metrics:
data['fields'].update({metric: value})
# Flush if necessary
logger.debug("Data: %s", data)
self.my_metrics.append(data)
if self.metrics_count >= self.metrics_flush_count:
# self.carbon.add_data_list(self.my_metrics)
self.flush()
|
[
"def",
"send_to_tsdb",
"(",
"self",
",",
"realm",
",",
"host",
",",
"service",
",",
"metrics",
",",
"ts",
",",
"path",
")",
":",
"if",
"ts",
"is",
"None",
":",
"ts",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"data",
"=",
"{",
"\"measurement\"",
":",
"service",
",",
"\"tags\"",
":",
"{",
"\"host\"",
":",
"host",
",",
"\"service\"",
":",
"service",
",",
"\"realm\"",
":",
"'.'",
".",
"join",
"(",
"realm",
")",
"if",
"isinstance",
"(",
"realm",
",",
"list",
")",
"else",
"realm",
",",
"\"path\"",
":",
"path",
"}",
",",
"\"time\"",
":",
"ts",
",",
"\"fields\"",
":",
"{",
"}",
"}",
"if",
"path",
"is",
"not",
"None",
":",
"data",
"[",
"'tags'",
"]",
".",
"update",
"(",
"{",
"\"path\"",
":",
"path",
"}",
")",
"for",
"metric",
",",
"value",
",",
"_",
"in",
"metrics",
":",
"data",
"[",
"'fields'",
"]",
".",
"update",
"(",
"{",
"metric",
":",
"value",
"}",
")",
"# Flush if necessary",
"logger",
".",
"debug",
"(",
"\"Data: %s\"",
",",
"data",
")",
"self",
".",
"my_metrics",
".",
"append",
"(",
"data",
")",
"if",
"self",
".",
"metrics_count",
">=",
"self",
".",
"metrics_flush_count",
":",
"# self.carbon.add_data_list(self.my_metrics)",
"self",
".",
"flush",
"(",
")"
] |
Send performance data to time series database
Indeed this function stores metrics in the internal cache and checks if the flushing
is necessary and then flushes.
:param realm: concerned realm
:type: string
:param host: concerned host
:type: string
:param service: concerned service
:type: string
:param metrics: list of metrics couple (name, value)
:type: list
:param ts: timestamp
:type: int
:param path: full path (eg. Graphite) for the received metrics
:type: string
|
[
"Send",
"performance",
"data",
"to",
"time",
"series",
"database"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/modules/inner_metrics.py#L546-L592
|
train
|
Alignak-monitoring/alignak
|
alignak/modules/inner_metrics.py
|
InnerMetrics.manage_initial_service_status_brok
|
def manage_initial_service_status_brok(self, b):
"""Prepare the known services cache"""
host_name = b.data['host_name']
service_description = b.data['service_description']
service_id = host_name+"/"+service_description
logger.debug("got initial service status: %s", service_id)
if host_name not in self.hosts_cache:
logger.error("initial service status, host is unknown: %s.", service_id)
return
self.services_cache[service_id] = {
}
if 'customs' in b.data:
self.services_cache[service_id]['_GRAPHITE_POST'] = \
sanitize_name(b.data['customs'].get('_GRAPHITE_POST', None))
logger.debug("initial service status received: %s", service_id)
|
python
|
def manage_initial_service_status_brok(self, b):
"""Prepare the known services cache"""
host_name = b.data['host_name']
service_description = b.data['service_description']
service_id = host_name+"/"+service_description
logger.debug("got initial service status: %s", service_id)
if host_name not in self.hosts_cache:
logger.error("initial service status, host is unknown: %s.", service_id)
return
self.services_cache[service_id] = {
}
if 'customs' in b.data:
self.services_cache[service_id]['_GRAPHITE_POST'] = \
sanitize_name(b.data['customs'].get('_GRAPHITE_POST', None))
logger.debug("initial service status received: %s", service_id)
|
[
"def",
"manage_initial_service_status_brok",
"(",
"self",
",",
"b",
")",
":",
"host_name",
"=",
"b",
".",
"data",
"[",
"'host_name'",
"]",
"service_description",
"=",
"b",
".",
"data",
"[",
"'service_description'",
"]",
"service_id",
"=",
"host_name",
"+",
"\"/\"",
"+",
"service_description",
"logger",
".",
"debug",
"(",
"\"got initial service status: %s\"",
",",
"service_id",
")",
"if",
"host_name",
"not",
"in",
"self",
".",
"hosts_cache",
":",
"logger",
".",
"error",
"(",
"\"initial service status, host is unknown: %s.\"",
",",
"service_id",
")",
"return",
"self",
".",
"services_cache",
"[",
"service_id",
"]",
"=",
"{",
"}",
"if",
"'customs'",
"in",
"b",
".",
"data",
":",
"self",
".",
"services_cache",
"[",
"service_id",
"]",
"[",
"'_GRAPHITE_POST'",
"]",
"=",
"sanitize_name",
"(",
"b",
".",
"data",
"[",
"'customs'",
"]",
".",
"get",
"(",
"'_GRAPHITE_POST'",
",",
"None",
")",
")",
"logger",
".",
"debug",
"(",
"\"initial service status received: %s\"",
",",
"service_id",
")"
] |
Prepare the known services cache
|
[
"Prepare",
"the",
"known",
"services",
"cache"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/modules/inner_metrics.py#L594-L611
|
train
|
Alignak-monitoring/alignak
|
alignak/modules/inner_metrics.py
|
InnerMetrics.manage_initial_host_status_brok
|
def manage_initial_host_status_brok(self, b):
"""Prepare the known hosts cache"""
host_name = b.data['host_name']
logger.debug("got initial host status: %s", host_name)
self.hosts_cache[host_name] = {
'realm_name':
sanitize_name(b.data.get('realm_name', b.data.get('realm', 'All'))),
}
if 'customs' in b.data:
self.hosts_cache[host_name]['_GRAPHITE_PRE'] = \
sanitize_name(b.data['customs'].get('_GRAPHITE_PRE', None))
self.hosts_cache[host_name]['_GRAPHITE_GROUP'] = \
sanitize_name(b.data['customs'].get('_GRAPHITE_GROUP', None))
logger.debug("initial host status received: %s", host_name)
|
python
|
def manage_initial_host_status_brok(self, b):
"""Prepare the known hosts cache"""
host_name = b.data['host_name']
logger.debug("got initial host status: %s", host_name)
self.hosts_cache[host_name] = {
'realm_name':
sanitize_name(b.data.get('realm_name', b.data.get('realm', 'All'))),
}
if 'customs' in b.data:
self.hosts_cache[host_name]['_GRAPHITE_PRE'] = \
sanitize_name(b.data['customs'].get('_GRAPHITE_PRE', None))
self.hosts_cache[host_name]['_GRAPHITE_GROUP'] = \
sanitize_name(b.data['customs'].get('_GRAPHITE_GROUP', None))
logger.debug("initial host status received: %s", host_name)
|
[
"def",
"manage_initial_host_status_brok",
"(",
"self",
",",
"b",
")",
":",
"host_name",
"=",
"b",
".",
"data",
"[",
"'host_name'",
"]",
"logger",
".",
"debug",
"(",
"\"got initial host status: %s\"",
",",
"host_name",
")",
"self",
".",
"hosts_cache",
"[",
"host_name",
"]",
"=",
"{",
"'realm_name'",
":",
"sanitize_name",
"(",
"b",
".",
"data",
".",
"get",
"(",
"'realm_name'",
",",
"b",
".",
"data",
".",
"get",
"(",
"'realm'",
",",
"'All'",
")",
")",
")",
",",
"}",
"if",
"'customs'",
"in",
"b",
".",
"data",
":",
"self",
".",
"hosts_cache",
"[",
"host_name",
"]",
"[",
"'_GRAPHITE_PRE'",
"]",
"=",
"sanitize_name",
"(",
"b",
".",
"data",
"[",
"'customs'",
"]",
".",
"get",
"(",
"'_GRAPHITE_PRE'",
",",
"None",
")",
")",
"self",
".",
"hosts_cache",
"[",
"host_name",
"]",
"[",
"'_GRAPHITE_GROUP'",
"]",
"=",
"sanitize_name",
"(",
"b",
".",
"data",
"[",
"'customs'",
"]",
".",
"get",
"(",
"'_GRAPHITE_GROUP'",
",",
"None",
")",
")",
"logger",
".",
"debug",
"(",
"\"initial host status received: %s\"",
",",
"host_name",
")"
] |
Prepare the known hosts cache
|
[
"Prepare",
"the",
"known",
"hosts",
"cache"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/modules/inner_metrics.py#L613-L627
|
train
|
Alignak-monitoring/alignak
|
alignak/modules/inner_metrics.py
|
InnerMetrics.manage_service_check_result_brok
|
def manage_service_check_result_brok(self, b): # pylint: disable=too-many-branches
"""A service check result brok has just arrived ..."""
host_name = b.data.get('host_name', None)
service_description = b.data.get('service_description', None)
if not host_name or not service_description:
return
service_id = host_name+"/"+service_description
logger.debug("service check result: %s", service_id)
# If host and service initial status broks have not been received, ignore ...
if not self.ignore_unknown and host_name not in self.hosts_cache:
logger.warning("received service check result for an unknown host: %s", service_id)
return
if service_id not in self.services_cache and not self.ignore_unknown:
logger.warning("received service check result for an unknown service: %s", service_id)
return
# Decode received metrics
metrics = self.get_metrics_from_perfdata(service_description, b.data['perf_data'])
if not metrics:
logger.debug("no metrics to send ...")
return
# If checks latency is ignored
if self.ignore_latency_limit >= b.data['latency'] > 0:
check_time = int(b.data['last_chk']) - int(b.data['latency'])
else:
check_time = int(b.data['last_chk'])
# Custom hosts variables
hname = sanitize_name(host_name)
if host_name in self.hosts_cache:
if self.hosts_cache[host_name].get('_GRAPHITE_GROUP', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_GROUP'), hname))
if self.hosts_cache[host_name].get('_GRAPHITE_PRE', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_PRE'), hname))
# Custom services variables
desc = sanitize_name(service_description)
if service_id in self.services_cache:
if self.services_cache[service_id].get('_GRAPHITE_POST', None):
desc = ".".join((desc, self.services_cache[service_id].get('_GRAPHITE_POST', None)))
# Graphite data source
if self.graphite_data_source:
path = '.'.join((hname, self.graphite_data_source, desc))
else:
path = '.'.join((hname, desc))
# Realm as a prefix
if self.realms_prefix and self.hosts_cache[host_name].get('realm_name', None):
path = '.'.join((self.hosts_cache[host_name].get('realm_name'), path))
realm_name = None
if host_name in self.hosts_cache:
realm_name = self.hosts_cache[host_name].get('realm_name', None)
# Send metrics
self.send_to_tsdb(realm_name, host_name, service_description, metrics, check_time, path)
|
python
|
def manage_service_check_result_brok(self, b): # pylint: disable=too-many-branches
"""A service check result brok has just arrived ..."""
host_name = b.data.get('host_name', None)
service_description = b.data.get('service_description', None)
if not host_name or not service_description:
return
service_id = host_name+"/"+service_description
logger.debug("service check result: %s", service_id)
# If host and service initial status broks have not been received, ignore ...
if not self.ignore_unknown and host_name not in self.hosts_cache:
logger.warning("received service check result for an unknown host: %s", service_id)
return
if service_id not in self.services_cache and not self.ignore_unknown:
logger.warning("received service check result for an unknown service: %s", service_id)
return
# Decode received metrics
metrics = self.get_metrics_from_perfdata(service_description, b.data['perf_data'])
if not metrics:
logger.debug("no metrics to send ...")
return
# If checks latency is ignored
if self.ignore_latency_limit >= b.data['latency'] > 0:
check_time = int(b.data['last_chk']) - int(b.data['latency'])
else:
check_time = int(b.data['last_chk'])
# Custom hosts variables
hname = sanitize_name(host_name)
if host_name in self.hosts_cache:
if self.hosts_cache[host_name].get('_GRAPHITE_GROUP', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_GROUP'), hname))
if self.hosts_cache[host_name].get('_GRAPHITE_PRE', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_PRE'), hname))
# Custom services variables
desc = sanitize_name(service_description)
if service_id in self.services_cache:
if self.services_cache[service_id].get('_GRAPHITE_POST', None):
desc = ".".join((desc, self.services_cache[service_id].get('_GRAPHITE_POST', None)))
# Graphite data source
if self.graphite_data_source:
path = '.'.join((hname, self.graphite_data_source, desc))
else:
path = '.'.join((hname, desc))
# Realm as a prefix
if self.realms_prefix and self.hosts_cache[host_name].get('realm_name', None):
path = '.'.join((self.hosts_cache[host_name].get('realm_name'), path))
realm_name = None
if host_name in self.hosts_cache:
realm_name = self.hosts_cache[host_name].get('realm_name', None)
# Send metrics
self.send_to_tsdb(realm_name, host_name, service_description, metrics, check_time, path)
|
[
"def",
"manage_service_check_result_brok",
"(",
"self",
",",
"b",
")",
":",
"# pylint: disable=too-many-branches",
"host_name",
"=",
"b",
".",
"data",
".",
"get",
"(",
"'host_name'",
",",
"None",
")",
"service_description",
"=",
"b",
".",
"data",
".",
"get",
"(",
"'service_description'",
",",
"None",
")",
"if",
"not",
"host_name",
"or",
"not",
"service_description",
":",
"return",
"service_id",
"=",
"host_name",
"+",
"\"/\"",
"+",
"service_description",
"logger",
".",
"debug",
"(",
"\"service check result: %s\"",
",",
"service_id",
")",
"# If host and service initial status broks have not been received, ignore ...",
"if",
"not",
"self",
".",
"ignore_unknown",
"and",
"host_name",
"not",
"in",
"self",
".",
"hosts_cache",
":",
"logger",
".",
"warning",
"(",
"\"received service check result for an unknown host: %s\"",
",",
"service_id",
")",
"return",
"if",
"service_id",
"not",
"in",
"self",
".",
"services_cache",
"and",
"not",
"self",
".",
"ignore_unknown",
":",
"logger",
".",
"warning",
"(",
"\"received service check result for an unknown service: %s\"",
",",
"service_id",
")",
"return",
"# Decode received metrics",
"metrics",
"=",
"self",
".",
"get_metrics_from_perfdata",
"(",
"service_description",
",",
"b",
".",
"data",
"[",
"'perf_data'",
"]",
")",
"if",
"not",
"metrics",
":",
"logger",
".",
"debug",
"(",
"\"no metrics to send ...\"",
")",
"return",
"# If checks latency is ignored",
"if",
"self",
".",
"ignore_latency_limit",
">=",
"b",
".",
"data",
"[",
"'latency'",
"]",
">",
"0",
":",
"check_time",
"=",
"int",
"(",
"b",
".",
"data",
"[",
"'last_chk'",
"]",
")",
"-",
"int",
"(",
"b",
".",
"data",
"[",
"'latency'",
"]",
")",
"else",
":",
"check_time",
"=",
"int",
"(",
"b",
".",
"data",
"[",
"'last_chk'",
"]",
")",
"# Custom hosts variables",
"hname",
"=",
"sanitize_name",
"(",
"host_name",
")",
"if",
"host_name",
"in",
"self",
".",
"hosts_cache",
":",
"if",
"self",
".",
"hosts_cache",
"[",
"host_name",
"]",
".",
"get",
"(",
"'_GRAPHITE_GROUP'",
",",
"None",
")",
":",
"hname",
"=",
"\".\"",
".",
"join",
"(",
"(",
"self",
".",
"hosts_cache",
"[",
"host_name",
"]",
".",
"get",
"(",
"'_GRAPHITE_GROUP'",
")",
",",
"hname",
")",
")",
"if",
"self",
".",
"hosts_cache",
"[",
"host_name",
"]",
".",
"get",
"(",
"'_GRAPHITE_PRE'",
",",
"None",
")",
":",
"hname",
"=",
"\".\"",
".",
"join",
"(",
"(",
"self",
".",
"hosts_cache",
"[",
"host_name",
"]",
".",
"get",
"(",
"'_GRAPHITE_PRE'",
")",
",",
"hname",
")",
")",
"# Custom services variables",
"desc",
"=",
"sanitize_name",
"(",
"service_description",
")",
"if",
"service_id",
"in",
"self",
".",
"services_cache",
":",
"if",
"self",
".",
"services_cache",
"[",
"service_id",
"]",
".",
"get",
"(",
"'_GRAPHITE_POST'",
",",
"None",
")",
":",
"desc",
"=",
"\".\"",
".",
"join",
"(",
"(",
"desc",
",",
"self",
".",
"services_cache",
"[",
"service_id",
"]",
".",
"get",
"(",
"'_GRAPHITE_POST'",
",",
"None",
")",
")",
")",
"# Graphite data source",
"if",
"self",
".",
"graphite_data_source",
":",
"path",
"=",
"'.'",
".",
"join",
"(",
"(",
"hname",
",",
"self",
".",
"graphite_data_source",
",",
"desc",
")",
")",
"else",
":",
"path",
"=",
"'.'",
".",
"join",
"(",
"(",
"hname",
",",
"desc",
")",
")",
"# Realm as a prefix",
"if",
"self",
".",
"realms_prefix",
"and",
"self",
".",
"hosts_cache",
"[",
"host_name",
"]",
".",
"get",
"(",
"'realm_name'",
",",
"None",
")",
":",
"path",
"=",
"'.'",
".",
"join",
"(",
"(",
"self",
".",
"hosts_cache",
"[",
"host_name",
"]",
".",
"get",
"(",
"'realm_name'",
")",
",",
"path",
")",
")",
"realm_name",
"=",
"None",
"if",
"host_name",
"in",
"self",
".",
"hosts_cache",
":",
"realm_name",
"=",
"self",
".",
"hosts_cache",
"[",
"host_name",
"]",
".",
"get",
"(",
"'realm_name'",
",",
"None",
")",
"# Send metrics",
"self",
".",
"send_to_tsdb",
"(",
"realm_name",
",",
"host_name",
",",
"service_description",
",",
"metrics",
",",
"check_time",
",",
"path",
")"
] |
A service check result brok has just arrived ...
|
[
"A",
"service",
"check",
"result",
"brok",
"has",
"just",
"arrived",
"..."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/modules/inner_metrics.py#L629-L688
|
train
|
Alignak-monitoring/alignak
|
alignak/modules/inner_metrics.py
|
InnerMetrics.manage_host_check_result_brok
|
def manage_host_check_result_brok(self, b): # pylint: disable=too-many-branches
"""An host check result brok has just arrived..."""
host_name = b.data.get('host_name', None)
if not host_name:
return
logger.debug("host check result: %s", host_name)
# If host initial status brok has not been received, ignore ...
if host_name not in self.hosts_cache and not self.ignore_unknown:
logger.warning("received host check result for an unknown host: %s", host_name)
return
# Decode received metrics
metrics = self.get_metrics_from_perfdata('host_check', b.data['perf_data'])
if not metrics:
logger.debug("no metrics to send ...")
return
# If checks latency is ignored
if self.ignore_latency_limit >= b.data['latency'] > 0:
check_time = int(b.data['last_chk']) - int(b.data['latency'])
else:
check_time = int(b.data['last_chk'])
# Custom hosts variables
hname = sanitize_name(host_name)
if host_name in self.hosts_cache:
if self.hosts_cache[host_name].get('_GRAPHITE_GROUP', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_GROUP'), hname))
if self.hosts_cache[host_name].get('_GRAPHITE_PRE', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_PRE'), hname))
# Graphite data source
if self.graphite_data_source:
path = '.'.join((hname, self.graphite_data_source))
if self.hostcheck:
path = '.'.join((hname, self.graphite_data_source, self.hostcheck))
else:
path = '.'.join((hname, self.hostcheck))
# Realm as a prefix
if self.realms_prefix and self.hosts_cache[host_name].get('realm_name', None):
path = '.'.join((self.hosts_cache[host_name].get('realm_name'), path))
realm_name = None
if host_name in self.hosts_cache:
realm_name = self.hosts_cache[host_name].get('realm_name', None)
# Send metrics
self.send_to_tsdb(realm_name, host_name, self.hostcheck, metrics, check_time, path)
|
python
|
def manage_host_check_result_brok(self, b): # pylint: disable=too-many-branches
"""An host check result brok has just arrived..."""
host_name = b.data.get('host_name', None)
if not host_name:
return
logger.debug("host check result: %s", host_name)
# If host initial status brok has not been received, ignore ...
if host_name not in self.hosts_cache and not self.ignore_unknown:
logger.warning("received host check result for an unknown host: %s", host_name)
return
# Decode received metrics
metrics = self.get_metrics_from_perfdata('host_check', b.data['perf_data'])
if not metrics:
logger.debug("no metrics to send ...")
return
# If checks latency is ignored
if self.ignore_latency_limit >= b.data['latency'] > 0:
check_time = int(b.data['last_chk']) - int(b.data['latency'])
else:
check_time = int(b.data['last_chk'])
# Custom hosts variables
hname = sanitize_name(host_name)
if host_name in self.hosts_cache:
if self.hosts_cache[host_name].get('_GRAPHITE_GROUP', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_GROUP'), hname))
if self.hosts_cache[host_name].get('_GRAPHITE_PRE', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_PRE'), hname))
# Graphite data source
if self.graphite_data_source:
path = '.'.join((hname, self.graphite_data_source))
if self.hostcheck:
path = '.'.join((hname, self.graphite_data_source, self.hostcheck))
else:
path = '.'.join((hname, self.hostcheck))
# Realm as a prefix
if self.realms_prefix and self.hosts_cache[host_name].get('realm_name', None):
path = '.'.join((self.hosts_cache[host_name].get('realm_name'), path))
realm_name = None
if host_name in self.hosts_cache:
realm_name = self.hosts_cache[host_name].get('realm_name', None)
# Send metrics
self.send_to_tsdb(realm_name, host_name, self.hostcheck, metrics, check_time, path)
|
[
"def",
"manage_host_check_result_brok",
"(",
"self",
",",
"b",
")",
":",
"# pylint: disable=too-many-branches",
"host_name",
"=",
"b",
".",
"data",
".",
"get",
"(",
"'host_name'",
",",
"None",
")",
"if",
"not",
"host_name",
":",
"return",
"logger",
".",
"debug",
"(",
"\"host check result: %s\"",
",",
"host_name",
")",
"# If host initial status brok has not been received, ignore ...",
"if",
"host_name",
"not",
"in",
"self",
".",
"hosts_cache",
"and",
"not",
"self",
".",
"ignore_unknown",
":",
"logger",
".",
"warning",
"(",
"\"received host check result for an unknown host: %s\"",
",",
"host_name",
")",
"return",
"# Decode received metrics",
"metrics",
"=",
"self",
".",
"get_metrics_from_perfdata",
"(",
"'host_check'",
",",
"b",
".",
"data",
"[",
"'perf_data'",
"]",
")",
"if",
"not",
"metrics",
":",
"logger",
".",
"debug",
"(",
"\"no metrics to send ...\"",
")",
"return",
"# If checks latency is ignored",
"if",
"self",
".",
"ignore_latency_limit",
">=",
"b",
".",
"data",
"[",
"'latency'",
"]",
">",
"0",
":",
"check_time",
"=",
"int",
"(",
"b",
".",
"data",
"[",
"'last_chk'",
"]",
")",
"-",
"int",
"(",
"b",
".",
"data",
"[",
"'latency'",
"]",
")",
"else",
":",
"check_time",
"=",
"int",
"(",
"b",
".",
"data",
"[",
"'last_chk'",
"]",
")",
"# Custom hosts variables",
"hname",
"=",
"sanitize_name",
"(",
"host_name",
")",
"if",
"host_name",
"in",
"self",
".",
"hosts_cache",
":",
"if",
"self",
".",
"hosts_cache",
"[",
"host_name",
"]",
".",
"get",
"(",
"'_GRAPHITE_GROUP'",
",",
"None",
")",
":",
"hname",
"=",
"\".\"",
".",
"join",
"(",
"(",
"self",
".",
"hosts_cache",
"[",
"host_name",
"]",
".",
"get",
"(",
"'_GRAPHITE_GROUP'",
")",
",",
"hname",
")",
")",
"if",
"self",
".",
"hosts_cache",
"[",
"host_name",
"]",
".",
"get",
"(",
"'_GRAPHITE_PRE'",
",",
"None",
")",
":",
"hname",
"=",
"\".\"",
".",
"join",
"(",
"(",
"self",
".",
"hosts_cache",
"[",
"host_name",
"]",
".",
"get",
"(",
"'_GRAPHITE_PRE'",
")",
",",
"hname",
")",
")",
"# Graphite data source",
"if",
"self",
".",
"graphite_data_source",
":",
"path",
"=",
"'.'",
".",
"join",
"(",
"(",
"hname",
",",
"self",
".",
"graphite_data_source",
")",
")",
"if",
"self",
".",
"hostcheck",
":",
"path",
"=",
"'.'",
".",
"join",
"(",
"(",
"hname",
",",
"self",
".",
"graphite_data_source",
",",
"self",
".",
"hostcheck",
")",
")",
"else",
":",
"path",
"=",
"'.'",
".",
"join",
"(",
"(",
"hname",
",",
"self",
".",
"hostcheck",
")",
")",
"# Realm as a prefix",
"if",
"self",
".",
"realms_prefix",
"and",
"self",
".",
"hosts_cache",
"[",
"host_name",
"]",
".",
"get",
"(",
"'realm_name'",
",",
"None",
")",
":",
"path",
"=",
"'.'",
".",
"join",
"(",
"(",
"self",
".",
"hosts_cache",
"[",
"host_name",
"]",
".",
"get",
"(",
"'realm_name'",
")",
",",
"path",
")",
")",
"realm_name",
"=",
"None",
"if",
"host_name",
"in",
"self",
".",
"hosts_cache",
":",
"realm_name",
"=",
"self",
".",
"hosts_cache",
"[",
"host_name",
"]",
".",
"get",
"(",
"'realm_name'",
",",
"None",
")",
"# Send metrics",
"self",
".",
"send_to_tsdb",
"(",
"realm_name",
",",
"host_name",
",",
"self",
".",
"hostcheck",
",",
"metrics",
",",
"check_time",
",",
"path",
")"
] |
An host check result brok has just arrived...
|
[
"An",
"host",
"check",
"result",
"brok",
"has",
"just",
"arrived",
"..."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/modules/inner_metrics.py#L690-L740
|
train
|
Alignak-monitoring/alignak
|
alignak/comment.py
|
Comment.get_comment_brok
|
def get_comment_brok(self, host_name, service_name=''):
"""Get a comment brok
:param host_name:
:param service_name:
:return: brok with wanted data
:rtype: alignak.brok.Brok
"""
data = self.serialize()
data['host'] = host_name
if service_name:
data['service'] = service_name
return Brok({'type': 'comment', 'data': data})
|
python
|
def get_comment_brok(self, host_name, service_name=''):
"""Get a comment brok
:param host_name:
:param service_name:
:return: brok with wanted data
:rtype: alignak.brok.Brok
"""
data = self.serialize()
data['host'] = host_name
if service_name:
data['service'] = service_name
return Brok({'type': 'comment', 'data': data})
|
[
"def",
"get_comment_brok",
"(",
"self",
",",
"host_name",
",",
"service_name",
"=",
"''",
")",
":",
"data",
"=",
"self",
".",
"serialize",
"(",
")",
"data",
"[",
"'host'",
"]",
"=",
"host_name",
"if",
"service_name",
":",
"data",
"[",
"'service'",
"]",
"=",
"service_name",
"return",
"Brok",
"(",
"{",
"'type'",
":",
"'comment'",
",",
"'data'",
":",
"data",
"}",
")"
] |
Get a comment brok
:param host_name:
:param service_name:
:return: brok with wanted data
:rtype: alignak.brok.Brok
|
[
"Get",
"a",
"comment",
"brok"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/comment.py#L124-L137
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/notificationway.py
|
NotificationWays.new_inner_member
|
def new_inner_member(self, name, params):
"""Create new instance of NotificationWay with given name and parameters
and add it to the item list
:param name: notification way name
:type name: str
:param params: notification wat parameters
:type params: dict
:return: None
"""
params['notificationway_name'] = name
self.add_item(NotificationWay(params))
|
python
|
def new_inner_member(self, name, params):
"""Create new instance of NotificationWay with given name and parameters
and add it to the item list
:param name: notification way name
:type name: str
:param params: notification wat parameters
:type params: dict
:return: None
"""
params['notificationway_name'] = name
self.add_item(NotificationWay(params))
|
[
"def",
"new_inner_member",
"(",
"self",
",",
"name",
",",
"params",
")",
":",
"params",
"[",
"'notificationway_name'",
"]",
"=",
"name",
"self",
".",
"add_item",
"(",
"NotificationWay",
"(",
"params",
")",
")"
] |
Create new instance of NotificationWay with given name and parameters
and add it to the item list
:param name: notification way name
:type name: str
:param params: notification wat parameters
:type params: dict
:return: None
|
[
"Create",
"new",
"instance",
"of",
"NotificationWay",
"with",
"given",
"name",
"and",
"parameters",
"and",
"add",
"it",
"to",
"the",
"item",
"list"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/notificationway.py#L389-L400
|
train
|
Alignak-monitoring/alignak
|
alignak/misc/serialization.py
|
serialize
|
def serialize(obj, no_dump=False):
"""
Serialize an object.
Returns a dict containing an `_error` property if a MemoryError happens during the
object serialization. See #369.
:param obj: the object to serialize
:type obj: alignak.objects.item.Item | dict | list | str
:param no_dump: if True return dict, otherwise return a json
:type no_dump: bool
:return: dict or json dumps dict with the following structure ::
{'__sys_python_module__': "%s.%s" % (o_cls.__module__, o_cls.__name__)
'content' : obj.serialize()}
:rtype: dict | str
"""
# print("Serialize (%s): %s" % (no_dump, obj))
if hasattr(obj, "serialize") and isinstance(obj.serialize, collections.Callable):
o_dict = {
'__sys_python_module__': "%s.%s" % (obj.__class__.__module__, obj.__class__.__name__),
'content': obj.serialize()
}
elif isinstance(obj, dict):
o_dict = {}
for key, value in list(obj.items()):
o_dict[key] = serialize(value, True)
elif isinstance(obj, (list, set)):
o_dict = [serialize(item, True) for item in obj]
else:
o_dict = obj
if no_dump:
return o_dict
result = None
try:
result = json.dumps(o_dict, ensure_ascii=False)
except MemoryError:
return {'_error': 'Not enough memory on this computer to correctly manage Alignak '
'objects serialization! '
'Sorry for this, please log an issue in the project repository.'}
return result
|
python
|
def serialize(obj, no_dump=False):
"""
Serialize an object.
Returns a dict containing an `_error` property if a MemoryError happens during the
object serialization. See #369.
:param obj: the object to serialize
:type obj: alignak.objects.item.Item | dict | list | str
:param no_dump: if True return dict, otherwise return a json
:type no_dump: bool
:return: dict or json dumps dict with the following structure ::
{'__sys_python_module__': "%s.%s" % (o_cls.__module__, o_cls.__name__)
'content' : obj.serialize()}
:rtype: dict | str
"""
# print("Serialize (%s): %s" % (no_dump, obj))
if hasattr(obj, "serialize") and isinstance(obj.serialize, collections.Callable):
o_dict = {
'__sys_python_module__': "%s.%s" % (obj.__class__.__module__, obj.__class__.__name__),
'content': obj.serialize()
}
elif isinstance(obj, dict):
o_dict = {}
for key, value in list(obj.items()):
o_dict[key] = serialize(value, True)
elif isinstance(obj, (list, set)):
o_dict = [serialize(item, True) for item in obj]
else:
o_dict = obj
if no_dump:
return o_dict
result = None
try:
result = json.dumps(o_dict, ensure_ascii=False)
except MemoryError:
return {'_error': 'Not enough memory on this computer to correctly manage Alignak '
'objects serialization! '
'Sorry for this, please log an issue in the project repository.'}
return result
|
[
"def",
"serialize",
"(",
"obj",
",",
"no_dump",
"=",
"False",
")",
":",
"# print(\"Serialize (%s): %s\" % (no_dump, obj))",
"if",
"hasattr",
"(",
"obj",
",",
"\"serialize\"",
")",
"and",
"isinstance",
"(",
"obj",
".",
"serialize",
",",
"collections",
".",
"Callable",
")",
":",
"o_dict",
"=",
"{",
"'__sys_python_module__'",
":",
"\"%s.%s\"",
"%",
"(",
"obj",
".",
"__class__",
".",
"__module__",
",",
"obj",
".",
"__class__",
".",
"__name__",
")",
",",
"'content'",
":",
"obj",
".",
"serialize",
"(",
")",
"}",
"elif",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"o_dict",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"list",
"(",
"obj",
".",
"items",
"(",
")",
")",
":",
"o_dict",
"[",
"key",
"]",
"=",
"serialize",
"(",
"value",
",",
"True",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"(",
"list",
",",
"set",
")",
")",
":",
"o_dict",
"=",
"[",
"serialize",
"(",
"item",
",",
"True",
")",
"for",
"item",
"in",
"obj",
"]",
"else",
":",
"o_dict",
"=",
"obj",
"if",
"no_dump",
":",
"return",
"o_dict",
"result",
"=",
"None",
"try",
":",
"result",
"=",
"json",
".",
"dumps",
"(",
"o_dict",
",",
"ensure_ascii",
"=",
"False",
")",
"except",
"MemoryError",
":",
"return",
"{",
"'_error'",
":",
"'Not enough memory on this computer to correctly manage Alignak '",
"'objects serialization! '",
"'Sorry for this, please log an issue in the project repository.'",
"}",
"return",
"result"
] |
Serialize an object.
Returns a dict containing an `_error` property if a MemoryError happens during the
object serialization. See #369.
:param obj: the object to serialize
:type obj: alignak.objects.item.Item | dict | list | str
:param no_dump: if True return dict, otherwise return a json
:type no_dump: bool
:return: dict or json dumps dict with the following structure ::
{'__sys_python_module__': "%s.%s" % (o_cls.__module__, o_cls.__name__)
'content' : obj.serialize()}
:rtype: dict | str
|
[
"Serialize",
"an",
"object",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/misc/serialization.py#L31-L78
|
train
|
Alignak-monitoring/alignak
|
alignak/brok.py
|
Brok.get_event
|
def get_event(self):
"""This function returns an Event from a Brok
If the type is monitoring_log then the Brok contains a monitoring event
(alert, notification, ...) information. This function will return a tuple
with the creation time, the level and message information
:return: tuple with date, level and message
:rtype: tuple
"""
self.prepare()
return (self.creation_time, self.data['level'], self.data['message'])
|
python
|
def get_event(self):
"""This function returns an Event from a Brok
If the type is monitoring_log then the Brok contains a monitoring event
(alert, notification, ...) information. This function will return a tuple
with the creation time, the level and message information
:return: tuple with date, level and message
:rtype: tuple
"""
self.prepare()
return (self.creation_time, self.data['level'], self.data['message'])
|
[
"def",
"get_event",
"(",
"self",
")",
":",
"self",
".",
"prepare",
"(",
")",
"return",
"(",
"self",
".",
"creation_time",
",",
"self",
".",
"data",
"[",
"'level'",
"]",
",",
"self",
".",
"data",
"[",
"'message'",
"]",
")"
] |
This function returns an Event from a Brok
If the type is monitoring_log then the Brok contains a monitoring event
(alert, notification, ...) information. This function will return a tuple
with the creation time, the level and message information
:return: tuple with date, level and message
:rtype: tuple
|
[
"This",
"function",
"returns",
"an",
"Event",
"from",
"a",
"Brok"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/brok.py#L116-L127
|
train
|
Alignak-monitoring/alignak
|
alignak/brok.py
|
Brok.prepare
|
def prepare(self):
"""Un-serialize data from data attribute and add instance_id key if necessary
:return: None
"""
# Maybe the Brok is a old daemon one or was already prepared
# if so, the data is already ok
if hasattr(self, 'prepared') and not self.prepared:
self.data = unserialize(self.data)
if self.instance_id:
self.data['instance_id'] = self.instance_id
self.prepared = True
|
python
|
def prepare(self):
"""Un-serialize data from data attribute and add instance_id key if necessary
:return: None
"""
# Maybe the Brok is a old daemon one or was already prepared
# if so, the data is already ok
if hasattr(self, 'prepared') and not self.prepared:
self.data = unserialize(self.data)
if self.instance_id:
self.data['instance_id'] = self.instance_id
self.prepared = True
|
[
"def",
"prepare",
"(",
"self",
")",
":",
"# Maybe the Brok is a old daemon one or was already prepared",
"# if so, the data is already ok",
"if",
"hasattr",
"(",
"self",
",",
"'prepared'",
")",
"and",
"not",
"self",
".",
"prepared",
":",
"self",
".",
"data",
"=",
"unserialize",
"(",
"self",
".",
"data",
")",
"if",
"self",
".",
"instance_id",
":",
"self",
".",
"data",
"[",
"'instance_id'",
"]",
"=",
"self",
".",
"instance_id",
"self",
".",
"prepared",
"=",
"True"
] |
Un-serialize data from data attribute and add instance_id key if necessary
:return: None
|
[
"Un",
"-",
"serialize",
"data",
"from",
"data",
"attribute",
"and",
"add",
"instance_id",
"key",
"if",
"necessary"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/brok.py#L144-L155
|
train
|
Alignak-monitoring/alignak
|
alignak/complexexpression.py
|
ComplexExpressionNode.resolve_elements
|
def resolve_elements(self):
"""Get element of this node recursively
Compute rules with OR or AND rule then NOT rules.
:return: set of element
:rtype: set
"""
# If it's a leaf, we just need to dump a set with the content of the node
if self.leaf:
if not self.content:
return set()
return set(self.content)
# first got the not ones in a list, and the other in the other list
not_nodes = [s for s in self.sons if s.not_value]
positiv_nodes = [s for s in self.sons if not s.not_value] # ok a not not is hard to read..
# By default we are using a OR rule
if not self.operand:
self.operand = '|'
res = set()
# The operand will change the positiv loop only
i = 0
for node in positiv_nodes:
node_members = node.resolve_elements()
if self.operand == '|':
res = res.union(node_members)
elif self.operand == '&':
# The first elements of an AND rule should be used
if i == 0:
res = node_members
else:
res = res.intersection(node_members)
i += 1
# And we finally remove all NOT elements from the result
for node in not_nodes:
node_members = node.resolve_elements()
res = res.difference(node_members)
return res
|
python
|
def resolve_elements(self):
"""Get element of this node recursively
Compute rules with OR or AND rule then NOT rules.
:return: set of element
:rtype: set
"""
# If it's a leaf, we just need to dump a set with the content of the node
if self.leaf:
if not self.content:
return set()
return set(self.content)
# first got the not ones in a list, and the other in the other list
not_nodes = [s for s in self.sons if s.not_value]
positiv_nodes = [s for s in self.sons if not s.not_value] # ok a not not is hard to read..
# By default we are using a OR rule
if not self.operand:
self.operand = '|'
res = set()
# The operand will change the positiv loop only
i = 0
for node in positiv_nodes:
node_members = node.resolve_elements()
if self.operand == '|':
res = res.union(node_members)
elif self.operand == '&':
# The first elements of an AND rule should be used
if i == 0:
res = node_members
else:
res = res.intersection(node_members)
i += 1
# And we finally remove all NOT elements from the result
for node in not_nodes:
node_members = node.resolve_elements()
res = res.difference(node_members)
return res
|
[
"def",
"resolve_elements",
"(",
"self",
")",
":",
"# If it's a leaf, we just need to dump a set with the content of the node",
"if",
"self",
".",
"leaf",
":",
"if",
"not",
"self",
".",
"content",
":",
"return",
"set",
"(",
")",
"return",
"set",
"(",
"self",
".",
"content",
")",
"# first got the not ones in a list, and the other in the other list",
"not_nodes",
"=",
"[",
"s",
"for",
"s",
"in",
"self",
".",
"sons",
"if",
"s",
".",
"not_value",
"]",
"positiv_nodes",
"=",
"[",
"s",
"for",
"s",
"in",
"self",
".",
"sons",
"if",
"not",
"s",
".",
"not_value",
"]",
"# ok a not not is hard to read..",
"# By default we are using a OR rule",
"if",
"not",
"self",
".",
"operand",
":",
"self",
".",
"operand",
"=",
"'|'",
"res",
"=",
"set",
"(",
")",
"# The operand will change the positiv loop only",
"i",
"=",
"0",
"for",
"node",
"in",
"positiv_nodes",
":",
"node_members",
"=",
"node",
".",
"resolve_elements",
"(",
")",
"if",
"self",
".",
"operand",
"==",
"'|'",
":",
"res",
"=",
"res",
".",
"union",
"(",
"node_members",
")",
"elif",
"self",
".",
"operand",
"==",
"'&'",
":",
"# The first elements of an AND rule should be used",
"if",
"i",
"==",
"0",
":",
"res",
"=",
"node_members",
"else",
":",
"res",
"=",
"res",
".",
"intersection",
"(",
"node_members",
")",
"i",
"+=",
"1",
"# And we finally remove all NOT elements from the result",
"for",
"node",
"in",
"not_nodes",
":",
"node_members",
"=",
"node",
".",
"resolve_elements",
"(",
")",
"res",
"=",
"res",
".",
"difference",
"(",
"node_members",
")",
"return",
"res"
] |
Get element of this node recursively
Compute rules with OR or AND rule then NOT rules.
:return: set of element
:rtype: set
|
[
"Get",
"element",
"of",
"this",
"node",
"recursively",
"Compute",
"rules",
"with",
"OR",
"or",
"AND",
"rule",
"then",
"NOT",
"rules",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/complexexpression.py#L72-L114
|
train
|
Alignak-monitoring/alignak
|
alignak/complexexpression.py
|
ComplexExpressionFactory.eval_cor_pattern
|
def eval_cor_pattern(self, pattern): # pylint:disable=too-many-branches
"""Parse and build recursively a tree of ComplexExpressionNode from pattern
:param pattern: pattern to parse
:type pattern: str
:return: root node of parsed tree
:type: alignak.complexexpression.ComplexExpressionNode
"""
pattern = pattern.strip()
complex_node = False
# Look if it's a complex pattern (with rule) or
# if it's a leaf of it, like a host/service
for char in '()+&|,':
if char in pattern:
complex_node = True
node = ComplexExpressionNode()
# if it's a single expression like !linux or production
# (where "linux" and "production" are hostgroup names)
# we will get the objects from it and return a leaf node
if not complex_node:
# If it's a not value, tag the node and find
# the name without this ! operator
if pattern.startswith('!'):
node.not_value = True
pattern = pattern[1:]
node.operand = self.ctx
node.leaf = True
obj, error = self.find_object(pattern)
if obj is not None:
node.content = obj
else:
node.configuration_errors.append(error)
return node
in_par = False
tmp = ''
stacked_par = 0
for char in pattern:
if char in (',', '|'):
# Maybe we are in a par, if so, just stack it
if in_par:
tmp += char
else:
# Oh we got a real cut in an expression, if so, cut it
tmp = tmp.strip()
node.operand = '|'
if tmp != '':
son = self.eval_cor_pattern(tmp)
node.sons.append(son)
tmp = ''
elif char in ('&', '+'):
# Maybe we are in a par, if so, just stack it
if in_par:
tmp += char
else:
# Oh we got a real cut in an expression, if so, cut it
tmp = tmp.strip()
node.operand = '&'
if tmp != '':
son = self.eval_cor_pattern(tmp)
node.sons.append(son)
tmp = ''
elif char == '(':
stacked_par += 1
in_par = True
tmp = tmp.strip()
# Maybe we just start a par, but we got some things in tmp
# that should not be good in fact !
if stacked_par == 1 and tmp != '':
# TODO : real error
print("ERROR : bad expression near", tmp)
continue
# If we are already in a par, add this (
# but not if it's the first one so
if stacked_par > 1:
tmp += char
elif char == ')':
stacked_par -= 1
if stacked_par < 0:
# TODO : real error
print("Error : bad expression near", tmp, "too much ')'")
continue
if stacked_par == 0:
tmp = tmp.strip()
son = self.eval_cor_pattern(tmp)
node.sons.append(son)
in_par = False
# OK now clean the tmp so we start clean
tmp = ''
continue
# ok here we are still in a huge par, we just close one sub one
tmp += char
# Maybe it's a classic character, if so, continue
else:
tmp += char
# Be sure to manage the trainling part when the line is done
tmp = tmp.strip()
if tmp != '':
son = self.eval_cor_pattern(tmp)
node.sons.append(son)
return node
|
python
|
def eval_cor_pattern(self, pattern): # pylint:disable=too-many-branches
"""Parse and build recursively a tree of ComplexExpressionNode from pattern
:param pattern: pattern to parse
:type pattern: str
:return: root node of parsed tree
:type: alignak.complexexpression.ComplexExpressionNode
"""
pattern = pattern.strip()
complex_node = False
# Look if it's a complex pattern (with rule) or
# if it's a leaf of it, like a host/service
for char in '()+&|,':
if char in pattern:
complex_node = True
node = ComplexExpressionNode()
# if it's a single expression like !linux or production
# (where "linux" and "production" are hostgroup names)
# we will get the objects from it and return a leaf node
if not complex_node:
# If it's a not value, tag the node and find
# the name without this ! operator
if pattern.startswith('!'):
node.not_value = True
pattern = pattern[1:]
node.operand = self.ctx
node.leaf = True
obj, error = self.find_object(pattern)
if obj is not None:
node.content = obj
else:
node.configuration_errors.append(error)
return node
in_par = False
tmp = ''
stacked_par = 0
for char in pattern:
if char in (',', '|'):
# Maybe we are in a par, if so, just stack it
if in_par:
tmp += char
else:
# Oh we got a real cut in an expression, if so, cut it
tmp = tmp.strip()
node.operand = '|'
if tmp != '':
son = self.eval_cor_pattern(tmp)
node.sons.append(son)
tmp = ''
elif char in ('&', '+'):
# Maybe we are in a par, if so, just stack it
if in_par:
tmp += char
else:
# Oh we got a real cut in an expression, if so, cut it
tmp = tmp.strip()
node.operand = '&'
if tmp != '':
son = self.eval_cor_pattern(tmp)
node.sons.append(son)
tmp = ''
elif char == '(':
stacked_par += 1
in_par = True
tmp = tmp.strip()
# Maybe we just start a par, but we got some things in tmp
# that should not be good in fact !
if stacked_par == 1 and tmp != '':
# TODO : real error
print("ERROR : bad expression near", tmp)
continue
# If we are already in a par, add this (
# but not if it's the first one so
if stacked_par > 1:
tmp += char
elif char == ')':
stacked_par -= 1
if stacked_par < 0:
# TODO : real error
print("Error : bad expression near", tmp, "too much ')'")
continue
if stacked_par == 0:
tmp = tmp.strip()
son = self.eval_cor_pattern(tmp)
node.sons.append(son)
in_par = False
# OK now clean the tmp so we start clean
tmp = ''
continue
# ok here we are still in a huge par, we just close one sub one
tmp += char
# Maybe it's a classic character, if so, continue
else:
tmp += char
# Be sure to manage the trainling part when the line is done
tmp = tmp.strip()
if tmp != '':
son = self.eval_cor_pattern(tmp)
node.sons.append(son)
return node
|
[
"def",
"eval_cor_pattern",
"(",
"self",
",",
"pattern",
")",
":",
"# pylint:disable=too-many-branches",
"pattern",
"=",
"pattern",
".",
"strip",
"(",
")",
"complex_node",
"=",
"False",
"# Look if it's a complex pattern (with rule) or",
"# if it's a leaf of it, like a host/service",
"for",
"char",
"in",
"'()+&|,'",
":",
"if",
"char",
"in",
"pattern",
":",
"complex_node",
"=",
"True",
"node",
"=",
"ComplexExpressionNode",
"(",
")",
"# if it's a single expression like !linux or production",
"# (where \"linux\" and \"production\" are hostgroup names)",
"# we will get the objects from it and return a leaf node",
"if",
"not",
"complex_node",
":",
"# If it's a not value, tag the node and find",
"# the name without this ! operator",
"if",
"pattern",
".",
"startswith",
"(",
"'!'",
")",
":",
"node",
".",
"not_value",
"=",
"True",
"pattern",
"=",
"pattern",
"[",
"1",
":",
"]",
"node",
".",
"operand",
"=",
"self",
".",
"ctx",
"node",
".",
"leaf",
"=",
"True",
"obj",
",",
"error",
"=",
"self",
".",
"find_object",
"(",
"pattern",
")",
"if",
"obj",
"is",
"not",
"None",
":",
"node",
".",
"content",
"=",
"obj",
"else",
":",
"node",
".",
"configuration_errors",
".",
"append",
"(",
"error",
")",
"return",
"node",
"in_par",
"=",
"False",
"tmp",
"=",
"''",
"stacked_par",
"=",
"0",
"for",
"char",
"in",
"pattern",
":",
"if",
"char",
"in",
"(",
"','",
",",
"'|'",
")",
":",
"# Maybe we are in a par, if so, just stack it",
"if",
"in_par",
":",
"tmp",
"+=",
"char",
"else",
":",
"# Oh we got a real cut in an expression, if so, cut it",
"tmp",
"=",
"tmp",
".",
"strip",
"(",
")",
"node",
".",
"operand",
"=",
"'|'",
"if",
"tmp",
"!=",
"''",
":",
"son",
"=",
"self",
".",
"eval_cor_pattern",
"(",
"tmp",
")",
"node",
".",
"sons",
".",
"append",
"(",
"son",
")",
"tmp",
"=",
"''",
"elif",
"char",
"in",
"(",
"'&'",
",",
"'+'",
")",
":",
"# Maybe we are in a par, if so, just stack it",
"if",
"in_par",
":",
"tmp",
"+=",
"char",
"else",
":",
"# Oh we got a real cut in an expression, if so, cut it",
"tmp",
"=",
"tmp",
".",
"strip",
"(",
")",
"node",
".",
"operand",
"=",
"'&'",
"if",
"tmp",
"!=",
"''",
":",
"son",
"=",
"self",
".",
"eval_cor_pattern",
"(",
"tmp",
")",
"node",
".",
"sons",
".",
"append",
"(",
"son",
")",
"tmp",
"=",
"''",
"elif",
"char",
"==",
"'('",
":",
"stacked_par",
"+=",
"1",
"in_par",
"=",
"True",
"tmp",
"=",
"tmp",
".",
"strip",
"(",
")",
"# Maybe we just start a par, but we got some things in tmp",
"# that should not be good in fact !",
"if",
"stacked_par",
"==",
"1",
"and",
"tmp",
"!=",
"''",
":",
"# TODO : real error",
"print",
"(",
"\"ERROR : bad expression near\"",
",",
"tmp",
")",
"continue",
"# If we are already in a par, add this (",
"# but not if it's the first one so",
"if",
"stacked_par",
">",
"1",
":",
"tmp",
"+=",
"char",
"elif",
"char",
"==",
"')'",
":",
"stacked_par",
"-=",
"1",
"if",
"stacked_par",
"<",
"0",
":",
"# TODO : real error",
"print",
"(",
"\"Error : bad expression near\"",
",",
"tmp",
",",
"\"too much ')'\"",
")",
"continue",
"if",
"stacked_par",
"==",
"0",
":",
"tmp",
"=",
"tmp",
".",
"strip",
"(",
")",
"son",
"=",
"self",
".",
"eval_cor_pattern",
"(",
"tmp",
")",
"node",
".",
"sons",
".",
"append",
"(",
"son",
")",
"in_par",
"=",
"False",
"# OK now clean the tmp so we start clean",
"tmp",
"=",
"''",
"continue",
"# ok here we are still in a huge par, we just close one sub one",
"tmp",
"+=",
"char",
"# Maybe it's a classic character, if so, continue",
"else",
":",
"tmp",
"+=",
"char",
"# Be sure to manage the trainling part when the line is done",
"tmp",
"=",
"tmp",
".",
"strip",
"(",
")",
"if",
"tmp",
"!=",
"''",
":",
"son",
"=",
"self",
".",
"eval_cor_pattern",
"(",
"tmp",
")",
"node",
".",
"sons",
".",
"append",
"(",
"son",
")",
"return",
"node"
] |
Parse and build recursively a tree of ComplexExpressionNode from pattern
:param pattern: pattern to parse
:type pattern: str
:return: root node of parsed tree
:type: alignak.complexexpression.ComplexExpressionNode
|
[
"Parse",
"and",
"build",
"recursively",
"a",
"tree",
"of",
"ComplexExpressionNode",
"from",
"pattern"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/complexexpression.py#L147-L261
|
train
|
Alignak-monitoring/alignak
|
alignak/complexexpression.py
|
ComplexExpressionFactory.find_object
|
def find_object(self, pattern):
"""Get a list of host corresponding to the pattern regarding the context
:param pattern: pattern to find
:type pattern: str
:return: Host list matching pattern (hostgroup name, template, all)
:rtype: list[alignak.objects.host.Host]
"""
obj = None
error = None
pattern = pattern.strip()
if pattern == '*':
obj = [h.host_name for h in list(self.all_elements.items.values())
if getattr(h, 'host_name', '') != '' and not h.is_tpl()]
return obj, error
# Ok a more classic way
if self.ctx == 'hostgroups':
# Ok try to find this hostgroup
hgr = self.grps.find_by_name(pattern)
# Maybe it's an known one?
if not hgr:
error = "Error : cannot find the %s of the expression '%s'" % (self.ctx, pattern)
return hgr, error
# Ok the group is found, get the elements!
elts = hgr.get_hosts()
elts = strip_and_uniq(elts)
# Maybe the hostgroup memebrs is '*', if so expand with all hosts
if '*' in elts:
elts.extend([h.host_name for h in list(self.all_elements.items.values())
if getattr(h, 'host_name', '') != '' and not h.is_tpl()])
# And remove this strange hostname too :)
elts.remove('*')
return elts, error
obj = self.grps.find_hosts_that_use_template(pattern)
return obj, error
|
python
|
def find_object(self, pattern):
"""Get a list of host corresponding to the pattern regarding the context
:param pattern: pattern to find
:type pattern: str
:return: Host list matching pattern (hostgroup name, template, all)
:rtype: list[alignak.objects.host.Host]
"""
obj = None
error = None
pattern = pattern.strip()
if pattern == '*':
obj = [h.host_name for h in list(self.all_elements.items.values())
if getattr(h, 'host_name', '') != '' and not h.is_tpl()]
return obj, error
# Ok a more classic way
if self.ctx == 'hostgroups':
# Ok try to find this hostgroup
hgr = self.grps.find_by_name(pattern)
# Maybe it's an known one?
if not hgr:
error = "Error : cannot find the %s of the expression '%s'" % (self.ctx, pattern)
return hgr, error
# Ok the group is found, get the elements!
elts = hgr.get_hosts()
elts = strip_and_uniq(elts)
# Maybe the hostgroup memebrs is '*', if so expand with all hosts
if '*' in elts:
elts.extend([h.host_name for h in list(self.all_elements.items.values())
if getattr(h, 'host_name', '') != '' and not h.is_tpl()])
# And remove this strange hostname too :)
elts.remove('*')
return elts, error
obj = self.grps.find_hosts_that_use_template(pattern)
return obj, error
|
[
"def",
"find_object",
"(",
"self",
",",
"pattern",
")",
":",
"obj",
"=",
"None",
"error",
"=",
"None",
"pattern",
"=",
"pattern",
".",
"strip",
"(",
")",
"if",
"pattern",
"==",
"'*'",
":",
"obj",
"=",
"[",
"h",
".",
"host_name",
"for",
"h",
"in",
"list",
"(",
"self",
".",
"all_elements",
".",
"items",
".",
"values",
"(",
")",
")",
"if",
"getattr",
"(",
"h",
",",
"'host_name'",
",",
"''",
")",
"!=",
"''",
"and",
"not",
"h",
".",
"is_tpl",
"(",
")",
"]",
"return",
"obj",
",",
"error",
"# Ok a more classic way",
"if",
"self",
".",
"ctx",
"==",
"'hostgroups'",
":",
"# Ok try to find this hostgroup",
"hgr",
"=",
"self",
".",
"grps",
".",
"find_by_name",
"(",
"pattern",
")",
"# Maybe it's an known one?",
"if",
"not",
"hgr",
":",
"error",
"=",
"\"Error : cannot find the %s of the expression '%s'\"",
"%",
"(",
"self",
".",
"ctx",
",",
"pattern",
")",
"return",
"hgr",
",",
"error",
"# Ok the group is found, get the elements!",
"elts",
"=",
"hgr",
".",
"get_hosts",
"(",
")",
"elts",
"=",
"strip_and_uniq",
"(",
"elts",
")",
"# Maybe the hostgroup memebrs is '*', if so expand with all hosts",
"if",
"'*'",
"in",
"elts",
":",
"elts",
".",
"extend",
"(",
"[",
"h",
".",
"host_name",
"for",
"h",
"in",
"list",
"(",
"self",
".",
"all_elements",
".",
"items",
".",
"values",
"(",
")",
")",
"if",
"getattr",
"(",
"h",
",",
"'host_name'",
",",
"''",
")",
"!=",
"''",
"and",
"not",
"h",
".",
"is_tpl",
"(",
")",
"]",
")",
"# And remove this strange hostname too :)",
"elts",
".",
"remove",
"(",
"'*'",
")",
"return",
"elts",
",",
"error",
"obj",
"=",
"self",
".",
"grps",
".",
"find_hosts_that_use_template",
"(",
"pattern",
")",
"return",
"obj",
",",
"error"
] |
Get a list of host corresponding to the pattern regarding the context
:param pattern: pattern to find
:type pattern: str
:return: Host list matching pattern (hostgroup name, template, all)
:rtype: list[alignak.objects.host.Host]
|
[
"Get",
"a",
"list",
"of",
"host",
"corresponding",
"to",
"the",
"pattern",
"regarding",
"the",
"context"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/complexexpression.py#L263-L303
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.all_my_hosts_and_services
|
def all_my_hosts_and_services(self):
"""Create an iterator for all my known hosts and services
:return: None
"""
for what in (self.hosts, self.services):
for item in what:
yield item
|
python
|
def all_my_hosts_and_services(self):
"""Create an iterator for all my known hosts and services
:return: None
"""
for what in (self.hosts, self.services):
for item in what:
yield item
|
[
"def",
"all_my_hosts_and_services",
"(",
"self",
")",
":",
"for",
"what",
"in",
"(",
"self",
".",
"hosts",
",",
"self",
".",
"services",
")",
":",
"for",
"item",
"in",
"what",
":",
"yield",
"item"
] |
Create an iterator for all my known hosts and services
:return: None
|
[
"Create",
"an",
"iterator",
"for",
"all",
"my",
"known",
"hosts",
"and",
"services"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L265-L272
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.load_conf
|
def load_conf(self, instance_id, instance_name, conf):
"""Load configuration received from Arbiter and pushed by our Scheduler daemon
:param instance_name: scheduler instance name
:type instance_name: str
:param instance_id: scheduler instance id
:type instance_id: str
:param conf: configuration to load
:type conf: alignak.objects.config.Config
:return: None
"""
self.pushed_conf = conf
logger.info("loading my configuration (%s / %s):",
instance_id, self.pushed_conf.instance_id)
logger.debug("Properties:")
for key in sorted(self.pushed_conf.properties):
logger.debug("- %s: %s", key, getattr(self.pushed_conf, key, []))
logger.debug("Macros:")
for key in sorted(self.pushed_conf.macros):
logger.debug("- %s: %s", key, getattr(self.pushed_conf.macros, key, []))
logger.debug("Objects types:")
for _, _, strclss, _, _ in list(self.pushed_conf.types_creations.values()):
if strclss in ['arbiters', 'schedulers', 'brokers',
'pollers', 'reactionners', 'receivers']:
continue
setattr(self, strclss, getattr(self.pushed_conf, strclss, []))
# Internal statistics
logger.debug("- %d %s", len(getattr(self, strclss)), strclss)
statsmgr.gauge('configuration.%s' % strclss, len(getattr(self, strclss)))
# We need reversed list for searching in the retention file read
# todo: check what it is about...
self.services.optimize_service_search(self.hosts)
# Just deprecated
# # Compile the triggers
# if getattr(self, 'triggers', None):
# logger.info("compiling the triggers...")
# self.triggers.compile()
# self.triggers.load_objects(self)
# else:
# logger.info("No triggers")
# From the Arbiter configuration. Used for satellites to differentiate the schedulers
self.alignak_name = self.pushed_conf.alignak_name
self.instance_id = instance_id
self.instance_name = instance_name
self.push_flavor = getattr(self.pushed_conf, 'push_flavor', 'None')
logger.info("Set my scheduler instance: %s - %s - %s",
self.instance_id, self.instance_name, self.push_flavor)
# Tag our monitored hosts/services with our instance_id
for item in self.all_my_hosts_and_services():
item.instance_id = self.instance_id
|
python
|
def load_conf(self, instance_id, instance_name, conf):
"""Load configuration received from Arbiter and pushed by our Scheduler daemon
:param instance_name: scheduler instance name
:type instance_name: str
:param instance_id: scheduler instance id
:type instance_id: str
:param conf: configuration to load
:type conf: alignak.objects.config.Config
:return: None
"""
self.pushed_conf = conf
logger.info("loading my configuration (%s / %s):",
instance_id, self.pushed_conf.instance_id)
logger.debug("Properties:")
for key in sorted(self.pushed_conf.properties):
logger.debug("- %s: %s", key, getattr(self.pushed_conf, key, []))
logger.debug("Macros:")
for key in sorted(self.pushed_conf.macros):
logger.debug("- %s: %s", key, getattr(self.pushed_conf.macros, key, []))
logger.debug("Objects types:")
for _, _, strclss, _, _ in list(self.pushed_conf.types_creations.values()):
if strclss in ['arbiters', 'schedulers', 'brokers',
'pollers', 'reactionners', 'receivers']:
continue
setattr(self, strclss, getattr(self.pushed_conf, strclss, []))
# Internal statistics
logger.debug("- %d %s", len(getattr(self, strclss)), strclss)
statsmgr.gauge('configuration.%s' % strclss, len(getattr(self, strclss)))
# We need reversed list for searching in the retention file read
# todo: check what it is about...
self.services.optimize_service_search(self.hosts)
# Just deprecated
# # Compile the triggers
# if getattr(self, 'triggers', None):
# logger.info("compiling the triggers...")
# self.triggers.compile()
# self.triggers.load_objects(self)
# else:
# logger.info("No triggers")
# From the Arbiter configuration. Used for satellites to differentiate the schedulers
self.alignak_name = self.pushed_conf.alignak_name
self.instance_id = instance_id
self.instance_name = instance_name
self.push_flavor = getattr(self.pushed_conf, 'push_flavor', 'None')
logger.info("Set my scheduler instance: %s - %s - %s",
self.instance_id, self.instance_name, self.push_flavor)
# Tag our monitored hosts/services with our instance_id
for item in self.all_my_hosts_and_services():
item.instance_id = self.instance_id
|
[
"def",
"load_conf",
"(",
"self",
",",
"instance_id",
",",
"instance_name",
",",
"conf",
")",
":",
"self",
".",
"pushed_conf",
"=",
"conf",
"logger",
".",
"info",
"(",
"\"loading my configuration (%s / %s):\"",
",",
"instance_id",
",",
"self",
".",
"pushed_conf",
".",
"instance_id",
")",
"logger",
".",
"debug",
"(",
"\"Properties:\"",
")",
"for",
"key",
"in",
"sorted",
"(",
"self",
".",
"pushed_conf",
".",
"properties",
")",
":",
"logger",
".",
"debug",
"(",
"\"- %s: %s\"",
",",
"key",
",",
"getattr",
"(",
"self",
".",
"pushed_conf",
",",
"key",
",",
"[",
"]",
")",
")",
"logger",
".",
"debug",
"(",
"\"Macros:\"",
")",
"for",
"key",
"in",
"sorted",
"(",
"self",
".",
"pushed_conf",
".",
"macros",
")",
":",
"logger",
".",
"debug",
"(",
"\"- %s: %s\"",
",",
"key",
",",
"getattr",
"(",
"self",
".",
"pushed_conf",
".",
"macros",
",",
"key",
",",
"[",
"]",
")",
")",
"logger",
".",
"debug",
"(",
"\"Objects types:\"",
")",
"for",
"_",
",",
"_",
",",
"strclss",
",",
"_",
",",
"_",
"in",
"list",
"(",
"self",
".",
"pushed_conf",
".",
"types_creations",
".",
"values",
"(",
")",
")",
":",
"if",
"strclss",
"in",
"[",
"'arbiters'",
",",
"'schedulers'",
",",
"'brokers'",
",",
"'pollers'",
",",
"'reactionners'",
",",
"'receivers'",
"]",
":",
"continue",
"setattr",
"(",
"self",
",",
"strclss",
",",
"getattr",
"(",
"self",
".",
"pushed_conf",
",",
"strclss",
",",
"[",
"]",
")",
")",
"# Internal statistics",
"logger",
".",
"debug",
"(",
"\"- %d %s\"",
",",
"len",
"(",
"getattr",
"(",
"self",
",",
"strclss",
")",
")",
",",
"strclss",
")",
"statsmgr",
".",
"gauge",
"(",
"'configuration.%s'",
"%",
"strclss",
",",
"len",
"(",
"getattr",
"(",
"self",
",",
"strclss",
")",
")",
")",
"# We need reversed list for searching in the retention file read",
"# todo: check what it is about...",
"self",
".",
"services",
".",
"optimize_service_search",
"(",
"self",
".",
"hosts",
")",
"# Just deprecated",
"# # Compile the triggers",
"# if getattr(self, 'triggers', None):",
"# logger.info(\"compiling the triggers...\")",
"# self.triggers.compile()",
"# self.triggers.load_objects(self)",
"# else:",
"# logger.info(\"No triggers\")",
"# From the Arbiter configuration. Used for satellites to differentiate the schedulers",
"self",
".",
"alignak_name",
"=",
"self",
".",
"pushed_conf",
".",
"alignak_name",
"self",
".",
"instance_id",
"=",
"instance_id",
"self",
".",
"instance_name",
"=",
"instance_name",
"self",
".",
"push_flavor",
"=",
"getattr",
"(",
"self",
".",
"pushed_conf",
",",
"'push_flavor'",
",",
"'None'",
")",
"logger",
".",
"info",
"(",
"\"Set my scheduler instance: %s - %s - %s\"",
",",
"self",
".",
"instance_id",
",",
"self",
".",
"instance_name",
",",
"self",
".",
"push_flavor",
")",
"# Tag our monitored hosts/services with our instance_id",
"for",
"item",
"in",
"self",
".",
"all_my_hosts_and_services",
"(",
")",
":",
"item",
".",
"instance_id",
"=",
"self",
".",
"instance_id"
] |
Load configuration received from Arbiter and pushed by our Scheduler daemon
:param instance_name: scheduler instance name
:type instance_name: str
:param instance_id: scheduler instance id
:type instance_id: str
:param conf: configuration to load
:type conf: alignak.objects.config.Config
:return: None
|
[
"Load",
"configuration",
"received",
"from",
"Arbiter",
"and",
"pushed",
"by",
"our",
"Scheduler",
"daemon"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L274-L329
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.update_recurrent_works_tick
|
def update_recurrent_works_tick(self, conf):
"""Modify the tick value for the scheduler recurrent work
A tick is an amount of loop of the scheduler before executing the recurrent work
The provided configuration may contain some tick-function_name keys that contain
a tick value to be updated. Those parameters are defined in the alignak environment file.
Indeed this function is called with the Scheduler daemon object. Note that the ``conf``
parameter may also be a dictionary.
:param conf: the daemon link configuration to search in
:type conf: alignak.daemons.schedulerdaemon.Alignak
:return: None
"""
for key in self.recurrent_works:
(name, fun, _) = self.recurrent_works[key]
if isinstance(conf, dict):
new_tick = conf.get('tick_%s' % name, None)
else:
new_tick = getattr(conf, 'tick_%s' % name, None)
if new_tick is not None:
logger.debug("Requesting to change the default tick to %d for the action %s",
int(new_tick), name)
else:
continue
# Update the default scheduler tick for this function
try:
new_tick = int(new_tick)
logger.info("Changing the default tick to %d for the action %s", new_tick, name)
self.recurrent_works[key] = (name, fun, new_tick)
except ValueError:
logger.warning("Changing the default tick for '%s' to '%s' failed!", new_tick, name)
|
python
|
def update_recurrent_works_tick(self, conf):
"""Modify the tick value for the scheduler recurrent work
A tick is an amount of loop of the scheduler before executing the recurrent work
The provided configuration may contain some tick-function_name keys that contain
a tick value to be updated. Those parameters are defined in the alignak environment file.
Indeed this function is called with the Scheduler daemon object. Note that the ``conf``
parameter may also be a dictionary.
:param conf: the daemon link configuration to search in
:type conf: alignak.daemons.schedulerdaemon.Alignak
:return: None
"""
for key in self.recurrent_works:
(name, fun, _) = self.recurrent_works[key]
if isinstance(conf, dict):
new_tick = conf.get('tick_%s' % name, None)
else:
new_tick = getattr(conf, 'tick_%s' % name, None)
if new_tick is not None:
logger.debug("Requesting to change the default tick to %d for the action %s",
int(new_tick), name)
else:
continue
# Update the default scheduler tick for this function
try:
new_tick = int(new_tick)
logger.info("Changing the default tick to %d for the action %s", new_tick, name)
self.recurrent_works[key] = (name, fun, new_tick)
except ValueError:
logger.warning("Changing the default tick for '%s' to '%s' failed!", new_tick, name)
|
[
"def",
"update_recurrent_works_tick",
"(",
"self",
",",
"conf",
")",
":",
"for",
"key",
"in",
"self",
".",
"recurrent_works",
":",
"(",
"name",
",",
"fun",
",",
"_",
")",
"=",
"self",
".",
"recurrent_works",
"[",
"key",
"]",
"if",
"isinstance",
"(",
"conf",
",",
"dict",
")",
":",
"new_tick",
"=",
"conf",
".",
"get",
"(",
"'tick_%s'",
"%",
"name",
",",
"None",
")",
"else",
":",
"new_tick",
"=",
"getattr",
"(",
"conf",
",",
"'tick_%s'",
"%",
"name",
",",
"None",
")",
"if",
"new_tick",
"is",
"not",
"None",
":",
"logger",
".",
"debug",
"(",
"\"Requesting to change the default tick to %d for the action %s\"",
",",
"int",
"(",
"new_tick",
")",
",",
"name",
")",
"else",
":",
"continue",
"# Update the default scheduler tick for this function",
"try",
":",
"new_tick",
"=",
"int",
"(",
"new_tick",
")",
"logger",
".",
"info",
"(",
"\"Changing the default tick to %d for the action %s\"",
",",
"new_tick",
",",
"name",
")",
"self",
".",
"recurrent_works",
"[",
"key",
"]",
"=",
"(",
"name",
",",
"fun",
",",
"new_tick",
")",
"except",
"ValueError",
":",
"logger",
".",
"warning",
"(",
"\"Changing the default tick for '%s' to '%s' failed!\"",
",",
"new_tick",
",",
"name",
")"
] |
Modify the tick value for the scheduler recurrent work
A tick is an amount of loop of the scheduler before executing the recurrent work
The provided configuration may contain some tick-function_name keys that contain
a tick value to be updated. Those parameters are defined in the alignak environment file.
Indeed this function is called with the Scheduler daemon object. Note that the ``conf``
parameter may also be a dictionary.
:param conf: the daemon link configuration to search in
:type conf: alignak.daemons.schedulerdaemon.Alignak
:return: None
|
[
"Modify",
"the",
"tick",
"value",
"for",
"the",
"scheduler",
"recurrent",
"work"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L331-L364
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.dump_config
|
def dump_config(self):
"""Dump scheduler configuration into a temporary file
The dumped content is JSON formatted
:return: None
"""
path = os.path.join(tempfile.gettempdir(),
'dump-cfg-scheduler-%s-%d.json' % (self.name, int(time.time())))
try:
self.pushed_conf.dump(path)
except (OSError, IndexError) as exp: # pragma: no cover, should never happen...
logger.critical("Error when writing the configuration dump file %s: %s",
path, str(exp))
|
python
|
def dump_config(self):
"""Dump scheduler configuration into a temporary file
The dumped content is JSON formatted
:return: None
"""
path = os.path.join(tempfile.gettempdir(),
'dump-cfg-scheduler-%s-%d.json' % (self.name, int(time.time())))
try:
self.pushed_conf.dump(path)
except (OSError, IndexError) as exp: # pragma: no cover, should never happen...
logger.critical("Error when writing the configuration dump file %s: %s",
path, str(exp))
|
[
"def",
"dump_config",
"(",
"self",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempfile",
".",
"gettempdir",
"(",
")",
",",
"'dump-cfg-scheduler-%s-%d.json'",
"%",
"(",
"self",
".",
"name",
",",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
")",
")",
"try",
":",
"self",
".",
"pushed_conf",
".",
"dump",
"(",
"path",
")",
"except",
"(",
"OSError",
",",
"IndexError",
")",
"as",
"exp",
":",
"# pragma: no cover, should never happen...",
"logger",
".",
"critical",
"(",
"\"Error when writing the configuration dump file %s: %s\"",
",",
"path",
",",
"str",
"(",
"exp",
")",
")"
] |
Dump scheduler configuration into a temporary file
The dumped content is JSON formatted
:return: None
|
[
"Dump",
"scheduler",
"configuration",
"into",
"a",
"temporary",
"file"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L421-L435
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.add_notification
|
def add_notification(self, notification):
"""Add a notification into actions list
:param notification: notification to add
:type notification: alignak.notification.Notification
:return: None
"""
if notification.uuid in self.actions:
logger.warning("Already existing notification: %s", notification)
return
logger.debug("Adding a notification: %s", notification)
self.actions[notification.uuid] = notification
self.nb_notifications += 1
# A notification which is not a master one asks for a brok
if notification.contact is not None:
self.add(notification.get_initial_status_brok())
|
python
|
def add_notification(self, notification):
"""Add a notification into actions list
:param notification: notification to add
:type notification: alignak.notification.Notification
:return: None
"""
if notification.uuid in self.actions:
logger.warning("Already existing notification: %s", notification)
return
logger.debug("Adding a notification: %s", notification)
self.actions[notification.uuid] = notification
self.nb_notifications += 1
# A notification which is not a master one asks for a brok
if notification.contact is not None:
self.add(notification.get_initial_status_brok())
|
[
"def",
"add_notification",
"(",
"self",
",",
"notification",
")",
":",
"if",
"notification",
".",
"uuid",
"in",
"self",
".",
"actions",
":",
"logger",
".",
"warning",
"(",
"\"Already existing notification: %s\"",
",",
"notification",
")",
"return",
"logger",
".",
"debug",
"(",
"\"Adding a notification: %s\"",
",",
"notification",
")",
"self",
".",
"actions",
"[",
"notification",
".",
"uuid",
"]",
"=",
"notification",
"self",
".",
"nb_notifications",
"+=",
"1",
"# A notification which is not a master one asks for a brok",
"if",
"notification",
".",
"contact",
"is",
"not",
"None",
":",
"self",
".",
"add",
"(",
"notification",
".",
"get_initial_status_brok",
"(",
")",
")"
] |
Add a notification into actions list
:param notification: notification to add
:type notification: alignak.notification.Notification
:return: None
|
[
"Add",
"a",
"notification",
"into",
"actions",
"list"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L503-L520
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.add_check
|
def add_check(self, check):
"""Add a check into the scheduler checks list
:param check: check to add
:type check: alignak.check.Check
:return: None
"""
if check is None:
return
if check.uuid in self.checks:
logger.debug("Already existing check: %s", check)
return
logger.debug("Adding a check: %s", check)
# Add a new check to the scheduler checks list
self.checks[check.uuid] = check
self.nb_checks += 1
# Raise a brok to inform about a next check is to come ...
# but only for items that are actively checked
item = self.find_item_by_id(check.ref)
if item.active_checks_enabled:
self.add(item.get_next_schedule_brok())
|
python
|
def add_check(self, check):
"""Add a check into the scheduler checks list
:param check: check to add
:type check: alignak.check.Check
:return: None
"""
if check is None:
return
if check.uuid in self.checks:
logger.debug("Already existing check: %s", check)
return
logger.debug("Adding a check: %s", check)
# Add a new check to the scheduler checks list
self.checks[check.uuid] = check
self.nb_checks += 1
# Raise a brok to inform about a next check is to come ...
# but only for items that are actively checked
item = self.find_item_by_id(check.ref)
if item.active_checks_enabled:
self.add(item.get_next_schedule_brok())
|
[
"def",
"add_check",
"(",
"self",
",",
"check",
")",
":",
"if",
"check",
"is",
"None",
":",
"return",
"if",
"check",
".",
"uuid",
"in",
"self",
".",
"checks",
":",
"logger",
".",
"debug",
"(",
"\"Already existing check: %s\"",
",",
"check",
")",
"return",
"logger",
".",
"debug",
"(",
"\"Adding a check: %s\"",
",",
"check",
")",
"# Add a new check to the scheduler checks list",
"self",
".",
"checks",
"[",
"check",
".",
"uuid",
"]",
"=",
"check",
"self",
".",
"nb_checks",
"+=",
"1",
"# Raise a brok to inform about a next check is to come ...",
"# but only for items that are actively checked",
"item",
"=",
"self",
".",
"find_item_by_id",
"(",
"check",
".",
"ref",
")",
"if",
"item",
".",
"active_checks_enabled",
":",
"self",
".",
"add",
"(",
"item",
".",
"get_next_schedule_brok",
"(",
")",
")"
] |
Add a check into the scheduler checks list
:param check: check to add
:type check: alignak.check.Check
:return: None
|
[
"Add",
"a",
"check",
"into",
"the",
"scheduler",
"checks",
"list"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L522-L544
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.add_event_handler
|
def add_event_handler(self, action):
"""Add a event handler into actions list
:param action: event handler to add
:type action: alignak.eventhandler.EventHandler
:return: None
"""
if action.uuid in self.actions:
logger.info("Already existing event handler: %s", action)
return
self.actions[action.uuid] = action
self.nb_event_handlers += 1
|
python
|
def add_event_handler(self, action):
"""Add a event handler into actions list
:param action: event handler to add
:type action: alignak.eventhandler.EventHandler
:return: None
"""
if action.uuid in self.actions:
logger.info("Already existing event handler: %s", action)
return
self.actions[action.uuid] = action
self.nb_event_handlers += 1
|
[
"def",
"add_event_handler",
"(",
"self",
",",
"action",
")",
":",
"if",
"action",
".",
"uuid",
"in",
"self",
".",
"actions",
":",
"logger",
".",
"info",
"(",
"\"Already existing event handler: %s\"",
",",
"action",
")",
"return",
"self",
".",
"actions",
"[",
"action",
".",
"uuid",
"]",
"=",
"action",
"self",
".",
"nb_event_handlers",
"+=",
"1"
] |
Add a event handler into actions list
:param action: event handler to add
:type action: alignak.eventhandler.EventHandler
:return: None
|
[
"Add",
"a",
"event",
"handler",
"into",
"actions",
"list"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L546-L558
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.hook_point
|
def hook_point(self, hook_name):
"""Generic function to call modules methods if such method is avalaible
:param hook_name: function name to call
:type hook_name: str
:return:None
"""
self.my_daemon.hook_point(hook_name=hook_name, handle=self)
|
python
|
def hook_point(self, hook_name):
"""Generic function to call modules methods if such method is avalaible
:param hook_name: function name to call
:type hook_name: str
:return:None
"""
self.my_daemon.hook_point(hook_name=hook_name, handle=self)
|
[
"def",
"hook_point",
"(",
"self",
",",
"hook_name",
")",
":",
"self",
".",
"my_daemon",
".",
"hook_point",
"(",
"hook_name",
"=",
"hook_name",
",",
"handle",
"=",
"self",
")"
] |
Generic function to call modules methods if such method is avalaible
:param hook_name: function name to call
:type hook_name: str
:return:None
|
[
"Generic",
"function",
"to",
"call",
"modules",
"methods",
"if",
"such",
"method",
"is",
"avalaible"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L600-L607
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.clean_queues
|
def clean_queues(self):
# pylint: disable=too-many-locals
"""Reduces internal list size to max allowed
* checks and broks : 5 * length of hosts + services
* actions : 5 * length of hosts + services + contacts
:return: None
"""
# If we set the interval at 0, we bail out
if getattr(self.pushed_conf, 'tick_clean_queues', 0) == 0:
logger.debug("No queues cleaning...")
return
max_checks = MULTIPLIER_MAX_CHECKS * (len(self.hosts) + len(self.services))
max_broks = MULTIPLIER_MAX_BROKS * (len(self.hosts) + len(self.services))
max_actions = MULTIPLIER_MAX_ACTIONS * len(self.contacts) * (len(self.hosts) +
len(self.services))
# For checks, it's not very simple:
# For checks, they may be referred to their host/service
# We do not just del them in the check list, but also in their service/host
# We want id of lower than max_id - 2*max_checks
self.nb_checks_dropped = 0
if max_checks and len(self.checks) > max_checks:
# keys does not ensure sorted keys. Max is slow but we have no other way.
to_del_checks = [c for c in list(self.checks.values())]
to_del_checks.sort(key=lambda x: x.creation_time)
to_del_checks = to_del_checks[:-max_checks]
self.nb_checks_dropped = len(to_del_checks)
if to_del_checks:
logger.warning("I have to drop some checks (%d)..., sorry :(",
self.nb_checks_dropped)
for chk in to_del_checks:
c_id = chk.uuid
items = getattr(self, chk.ref_type + 's')
elt = items[chk.ref]
# First remove the link in host/service
elt.remove_in_progress_check(chk)
# Then in dependent checks (I depend on, or check
# depend on me)
for dependent_checks in chk.depend_on_me:
dependent_checks.depend_on.remove(chk.uuid)
for c_temp in chk.depend_on:
c_temp.depend_on_me.remove(chk)
del self.checks[c_id] # Final Bye bye ...
# For broks and actions, it's more simple
# or broks, manage global but also all brokers
self.nb_broks_dropped = 0
for broker_link in list(self.my_daemon.brokers.values()):
if max_broks and len(broker_link.broks) > max_broks:
logger.warning("I have to drop some broks (%d > %d) for the broker %s "
"..., sorry :(", len(broker_link.broks), max_broks, broker_link)
kept_broks = sorted(broker_link.broks, key=lambda x: x.creation_time)
# Delete the oldest broks to keep the max_broks most recent...
# todo: is it a good choice !
broker_link.broks = kept_broks[0:max_broks]
self.nb_actions_dropped = 0
if max_actions and len(self.actions) > max_actions:
logger.warning("I have to del some actions (currently: %d, max: %d)..., sorry :(",
len(self.actions), max_actions)
to_del_actions = [c for c in list(self.actions.values())]
to_del_actions.sort(key=lambda x: x.creation_time)
to_del_actions = to_del_actions[:-max_actions]
self.nb_actions_dropped = len(to_del_actions)
for act in to_del_actions:
if act.is_a == 'notification':
self.find_item_by_id(act.ref).remove_in_progress_notification(act)
del self.actions[act.uuid]
|
python
|
def clean_queues(self):
# pylint: disable=too-many-locals
"""Reduces internal list size to max allowed
* checks and broks : 5 * length of hosts + services
* actions : 5 * length of hosts + services + contacts
:return: None
"""
# If we set the interval at 0, we bail out
if getattr(self.pushed_conf, 'tick_clean_queues', 0) == 0:
logger.debug("No queues cleaning...")
return
max_checks = MULTIPLIER_MAX_CHECKS * (len(self.hosts) + len(self.services))
max_broks = MULTIPLIER_MAX_BROKS * (len(self.hosts) + len(self.services))
max_actions = MULTIPLIER_MAX_ACTIONS * len(self.contacts) * (len(self.hosts) +
len(self.services))
# For checks, it's not very simple:
# For checks, they may be referred to their host/service
# We do not just del them in the check list, but also in their service/host
# We want id of lower than max_id - 2*max_checks
self.nb_checks_dropped = 0
if max_checks and len(self.checks) > max_checks:
# keys does not ensure sorted keys. Max is slow but we have no other way.
to_del_checks = [c for c in list(self.checks.values())]
to_del_checks.sort(key=lambda x: x.creation_time)
to_del_checks = to_del_checks[:-max_checks]
self.nb_checks_dropped = len(to_del_checks)
if to_del_checks:
logger.warning("I have to drop some checks (%d)..., sorry :(",
self.nb_checks_dropped)
for chk in to_del_checks:
c_id = chk.uuid
items = getattr(self, chk.ref_type + 's')
elt = items[chk.ref]
# First remove the link in host/service
elt.remove_in_progress_check(chk)
# Then in dependent checks (I depend on, or check
# depend on me)
for dependent_checks in chk.depend_on_me:
dependent_checks.depend_on.remove(chk.uuid)
for c_temp in chk.depend_on:
c_temp.depend_on_me.remove(chk)
del self.checks[c_id] # Final Bye bye ...
# For broks and actions, it's more simple
# or broks, manage global but also all brokers
self.nb_broks_dropped = 0
for broker_link in list(self.my_daemon.brokers.values()):
if max_broks and len(broker_link.broks) > max_broks:
logger.warning("I have to drop some broks (%d > %d) for the broker %s "
"..., sorry :(", len(broker_link.broks), max_broks, broker_link)
kept_broks = sorted(broker_link.broks, key=lambda x: x.creation_time)
# Delete the oldest broks to keep the max_broks most recent...
# todo: is it a good choice !
broker_link.broks = kept_broks[0:max_broks]
self.nb_actions_dropped = 0
if max_actions and len(self.actions) > max_actions:
logger.warning("I have to del some actions (currently: %d, max: %d)..., sorry :(",
len(self.actions), max_actions)
to_del_actions = [c for c in list(self.actions.values())]
to_del_actions.sort(key=lambda x: x.creation_time)
to_del_actions = to_del_actions[:-max_actions]
self.nb_actions_dropped = len(to_del_actions)
for act in to_del_actions:
if act.is_a == 'notification':
self.find_item_by_id(act.ref).remove_in_progress_notification(act)
del self.actions[act.uuid]
|
[
"def",
"clean_queues",
"(",
"self",
")",
":",
"# pylint: disable=too-many-locals",
"# If we set the interval at 0, we bail out",
"if",
"getattr",
"(",
"self",
".",
"pushed_conf",
",",
"'tick_clean_queues'",
",",
"0",
")",
"==",
"0",
":",
"logger",
".",
"debug",
"(",
"\"No queues cleaning...\"",
")",
"return",
"max_checks",
"=",
"MULTIPLIER_MAX_CHECKS",
"*",
"(",
"len",
"(",
"self",
".",
"hosts",
")",
"+",
"len",
"(",
"self",
".",
"services",
")",
")",
"max_broks",
"=",
"MULTIPLIER_MAX_BROKS",
"*",
"(",
"len",
"(",
"self",
".",
"hosts",
")",
"+",
"len",
"(",
"self",
".",
"services",
")",
")",
"max_actions",
"=",
"MULTIPLIER_MAX_ACTIONS",
"*",
"len",
"(",
"self",
".",
"contacts",
")",
"*",
"(",
"len",
"(",
"self",
".",
"hosts",
")",
"+",
"len",
"(",
"self",
".",
"services",
")",
")",
"# For checks, it's not very simple:",
"# For checks, they may be referred to their host/service",
"# We do not just del them in the check list, but also in their service/host",
"# We want id of lower than max_id - 2*max_checks",
"self",
".",
"nb_checks_dropped",
"=",
"0",
"if",
"max_checks",
"and",
"len",
"(",
"self",
".",
"checks",
")",
">",
"max_checks",
":",
"# keys does not ensure sorted keys. Max is slow but we have no other way.",
"to_del_checks",
"=",
"[",
"c",
"for",
"c",
"in",
"list",
"(",
"self",
".",
"checks",
".",
"values",
"(",
")",
")",
"]",
"to_del_checks",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"creation_time",
")",
"to_del_checks",
"=",
"to_del_checks",
"[",
":",
"-",
"max_checks",
"]",
"self",
".",
"nb_checks_dropped",
"=",
"len",
"(",
"to_del_checks",
")",
"if",
"to_del_checks",
":",
"logger",
".",
"warning",
"(",
"\"I have to drop some checks (%d)..., sorry :(\"",
",",
"self",
".",
"nb_checks_dropped",
")",
"for",
"chk",
"in",
"to_del_checks",
":",
"c_id",
"=",
"chk",
".",
"uuid",
"items",
"=",
"getattr",
"(",
"self",
",",
"chk",
".",
"ref_type",
"+",
"'s'",
")",
"elt",
"=",
"items",
"[",
"chk",
".",
"ref",
"]",
"# First remove the link in host/service",
"elt",
".",
"remove_in_progress_check",
"(",
"chk",
")",
"# Then in dependent checks (I depend on, or check",
"# depend on me)",
"for",
"dependent_checks",
"in",
"chk",
".",
"depend_on_me",
":",
"dependent_checks",
".",
"depend_on",
".",
"remove",
"(",
"chk",
".",
"uuid",
")",
"for",
"c_temp",
"in",
"chk",
".",
"depend_on",
":",
"c_temp",
".",
"depend_on_me",
".",
"remove",
"(",
"chk",
")",
"del",
"self",
".",
"checks",
"[",
"c_id",
"]",
"# Final Bye bye ...",
"# For broks and actions, it's more simple",
"# or broks, manage global but also all brokers",
"self",
".",
"nb_broks_dropped",
"=",
"0",
"for",
"broker_link",
"in",
"list",
"(",
"self",
".",
"my_daemon",
".",
"brokers",
".",
"values",
"(",
")",
")",
":",
"if",
"max_broks",
"and",
"len",
"(",
"broker_link",
".",
"broks",
")",
">",
"max_broks",
":",
"logger",
".",
"warning",
"(",
"\"I have to drop some broks (%d > %d) for the broker %s \"",
"\"..., sorry :(\"",
",",
"len",
"(",
"broker_link",
".",
"broks",
")",
",",
"max_broks",
",",
"broker_link",
")",
"kept_broks",
"=",
"sorted",
"(",
"broker_link",
".",
"broks",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"creation_time",
")",
"# Delete the oldest broks to keep the max_broks most recent...",
"# todo: is it a good choice !",
"broker_link",
".",
"broks",
"=",
"kept_broks",
"[",
"0",
":",
"max_broks",
"]",
"self",
".",
"nb_actions_dropped",
"=",
"0",
"if",
"max_actions",
"and",
"len",
"(",
"self",
".",
"actions",
")",
">",
"max_actions",
":",
"logger",
".",
"warning",
"(",
"\"I have to del some actions (currently: %d, max: %d)..., sorry :(\"",
",",
"len",
"(",
"self",
".",
"actions",
")",
",",
"max_actions",
")",
"to_del_actions",
"=",
"[",
"c",
"for",
"c",
"in",
"list",
"(",
"self",
".",
"actions",
".",
"values",
"(",
")",
")",
"]",
"to_del_actions",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"creation_time",
")",
"to_del_actions",
"=",
"to_del_actions",
"[",
":",
"-",
"max_actions",
"]",
"self",
".",
"nb_actions_dropped",
"=",
"len",
"(",
"to_del_actions",
")",
"for",
"act",
"in",
"to_del_actions",
":",
"if",
"act",
".",
"is_a",
"==",
"'notification'",
":",
"self",
".",
"find_item_by_id",
"(",
"act",
".",
"ref",
")",
".",
"remove_in_progress_notification",
"(",
"act",
")",
"del",
"self",
".",
"actions",
"[",
"act",
".",
"uuid",
"]"
] |
Reduces internal list size to max allowed
* checks and broks : 5 * length of hosts + services
* actions : 5 * length of hosts + services + contacts
:return: None
|
[
"Reduces",
"internal",
"list",
"size",
"to",
"max",
"allowed"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L609-L680
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.update_business_values
|
def update_business_values(self):
"""Iter over host and service and update business_impact
:return: None
"""
for elt in self.all_my_hosts_and_services():
if not elt.is_problem:
was = elt.business_impact
elt.update_business_impact_value(self.hosts, self.services,
self.timeperiods, self.businessimpactmodulations)
new = elt.business_impact
# Ok, the business_impact change, we can update the broks
if new != was:
self.get_and_register_status_brok(elt)
# When all impacts and classic elements are updated,
# we can update problems (their value depend on impacts, so
# they must be done after)
for elt in self.all_my_hosts_and_services():
# We first update impacts and classic elements
if elt.is_problem:
was = elt.business_impact
elt.update_business_impact_value(self.hosts, self.services,
self.timeperiods, self.businessimpactmodulations)
new = elt.business_impact
# Maybe one of the impacts change it's business_impact to a high value
# and so ask for the problem to raise too
if new != was:
self.get_and_register_status_brok(elt)
|
python
|
def update_business_values(self):
"""Iter over host and service and update business_impact
:return: None
"""
for elt in self.all_my_hosts_and_services():
if not elt.is_problem:
was = elt.business_impact
elt.update_business_impact_value(self.hosts, self.services,
self.timeperiods, self.businessimpactmodulations)
new = elt.business_impact
# Ok, the business_impact change, we can update the broks
if new != was:
self.get_and_register_status_brok(elt)
# When all impacts and classic elements are updated,
# we can update problems (their value depend on impacts, so
# they must be done after)
for elt in self.all_my_hosts_and_services():
# We first update impacts and classic elements
if elt.is_problem:
was = elt.business_impact
elt.update_business_impact_value(self.hosts, self.services,
self.timeperiods, self.businessimpactmodulations)
new = elt.business_impact
# Maybe one of the impacts change it's business_impact to a high value
# and so ask for the problem to raise too
if new != was:
self.get_and_register_status_brok(elt)
|
[
"def",
"update_business_values",
"(",
"self",
")",
":",
"for",
"elt",
"in",
"self",
".",
"all_my_hosts_and_services",
"(",
")",
":",
"if",
"not",
"elt",
".",
"is_problem",
":",
"was",
"=",
"elt",
".",
"business_impact",
"elt",
".",
"update_business_impact_value",
"(",
"self",
".",
"hosts",
",",
"self",
".",
"services",
",",
"self",
".",
"timeperiods",
",",
"self",
".",
"businessimpactmodulations",
")",
"new",
"=",
"elt",
".",
"business_impact",
"# Ok, the business_impact change, we can update the broks",
"if",
"new",
"!=",
"was",
":",
"self",
".",
"get_and_register_status_brok",
"(",
"elt",
")",
"# When all impacts and classic elements are updated,",
"# we can update problems (their value depend on impacts, so",
"# they must be done after)",
"for",
"elt",
"in",
"self",
".",
"all_my_hosts_and_services",
"(",
")",
":",
"# We first update impacts and classic elements",
"if",
"elt",
".",
"is_problem",
":",
"was",
"=",
"elt",
".",
"business_impact",
"elt",
".",
"update_business_impact_value",
"(",
"self",
".",
"hosts",
",",
"self",
".",
"services",
",",
"self",
".",
"timeperiods",
",",
"self",
".",
"businessimpactmodulations",
")",
"new",
"=",
"elt",
".",
"business_impact",
"# Maybe one of the impacts change it's business_impact to a high value",
"# and so ask for the problem to raise too",
"if",
"new",
"!=",
"was",
":",
"self",
".",
"get_and_register_status_brok",
"(",
"elt",
")"
] |
Iter over host and service and update business_impact
:return: None
|
[
"Iter",
"over",
"host",
"and",
"service",
"and",
"update",
"business_impact"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L716-L744
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.scatter_master_notifications
|
def scatter_master_notifications(self):
"""Generate children notifications from a master notification
Also update notification number
Master notification are raised when a notification must be sent out. They are not
launched by reactionners (only children are) but they are used to build the
children notifications.
From one master notification, several children notifications may be built,
indeed one per each contact...
:return: None
"""
now = time.time()
# We only want the master scheduled notifications that are immediately launchable
notifications = [a for a in self.actions.values()
if a.is_a == u'notification' and a.status == ACT_STATUS_SCHEDULED
and not a.contact and a.is_launchable(now)]
if notifications:
logger.debug("Scatter master notification: %d notifications",
len(notifications))
for notification in notifications:
logger.debug("Scheduler got a master notification: %s", notification)
# This is a "master" notification created by an host/service.
# We use it to create children notifications (for the contacts and
# notification_commands) which are executed in the reactionner.
item = self.find_item_by_id(notification.ref)
children = []
notification_period = None
if getattr(item, 'notification_period', None) is not None:
notification_period = self.timeperiods[item.notification_period]
if not item.is_blocking_notifications(notification_period,
self.hosts, self.services,
notification.type, now):
# If it is possible to send notifications
# of this type at the current time, then create
# a single notification for each contact of this item.
children = item.scatter_notification(
notification, self.contacts, self.notificationways, self.timeperiods,
self.macromodulations, self.escalations,
self.find_item_by_id(getattr(item, "host", None))
)
for notif in children:
logger.debug(" - child notification: %s", notif)
notif.status = ACT_STATUS_SCHEDULED
# Add the notification to the scheduler objects
self.add(notif)
# If we have notification_interval then schedule
# the next notification (problems only)
if notification.type == u'PROBLEM':
# Update the ref notif number after raise the one of the notification
if children:
# notif_nb of the master notification
# was already current_notification_number+1.
# If notifications were sent,
# then host/service-counter will also be incremented
item.current_notification_number = notification.notif_nb
if item.notification_interval and notification.t_to_go is not None:
# We must continue to send notifications.
# Just leave it in the actions list and set it to "scheduled"
# and it will be found again later
# Ask the service/host to compute the next notif time. It can be just
# a.t_to_go + item.notification_interval*item.__class__.interval_length
# or maybe before because we have an
# escalation that need to raise up before
notification.t_to_go = item.get_next_notification_time(notification,
self.escalations,
self.timeperiods)
notification.notif_nb = item.current_notification_number + 1
logger.debug("Repeat master notification: %s", notification)
else:
# Wipe out this master notification. It is a master one
item.remove_in_progress_notification(notification)
logger.debug("Remove master notification (no repeat): %s", notification)
else:
# Wipe out this master notification.
logger.debug("Remove master notification (no more a problem): %s", notification)
# We don't repeat recover/downtime/flap/etc...
item.remove_in_progress_notification(notification)
|
python
|
def scatter_master_notifications(self):
"""Generate children notifications from a master notification
Also update notification number
Master notification are raised when a notification must be sent out. They are not
launched by reactionners (only children are) but they are used to build the
children notifications.
From one master notification, several children notifications may be built,
indeed one per each contact...
:return: None
"""
now = time.time()
# We only want the master scheduled notifications that are immediately launchable
notifications = [a for a in self.actions.values()
if a.is_a == u'notification' and a.status == ACT_STATUS_SCHEDULED
and not a.contact and a.is_launchable(now)]
if notifications:
logger.debug("Scatter master notification: %d notifications",
len(notifications))
for notification in notifications:
logger.debug("Scheduler got a master notification: %s", notification)
# This is a "master" notification created by an host/service.
# We use it to create children notifications (for the contacts and
# notification_commands) which are executed in the reactionner.
item = self.find_item_by_id(notification.ref)
children = []
notification_period = None
if getattr(item, 'notification_period', None) is not None:
notification_period = self.timeperiods[item.notification_period]
if not item.is_blocking_notifications(notification_period,
self.hosts, self.services,
notification.type, now):
# If it is possible to send notifications
# of this type at the current time, then create
# a single notification for each contact of this item.
children = item.scatter_notification(
notification, self.contacts, self.notificationways, self.timeperiods,
self.macromodulations, self.escalations,
self.find_item_by_id(getattr(item, "host", None))
)
for notif in children:
logger.debug(" - child notification: %s", notif)
notif.status = ACT_STATUS_SCHEDULED
# Add the notification to the scheduler objects
self.add(notif)
# If we have notification_interval then schedule
# the next notification (problems only)
if notification.type == u'PROBLEM':
# Update the ref notif number after raise the one of the notification
if children:
# notif_nb of the master notification
# was already current_notification_number+1.
# If notifications were sent,
# then host/service-counter will also be incremented
item.current_notification_number = notification.notif_nb
if item.notification_interval and notification.t_to_go is not None:
# We must continue to send notifications.
# Just leave it in the actions list and set it to "scheduled"
# and it will be found again later
# Ask the service/host to compute the next notif time. It can be just
# a.t_to_go + item.notification_interval*item.__class__.interval_length
# or maybe before because we have an
# escalation that need to raise up before
notification.t_to_go = item.get_next_notification_time(notification,
self.escalations,
self.timeperiods)
notification.notif_nb = item.current_notification_number + 1
logger.debug("Repeat master notification: %s", notification)
else:
# Wipe out this master notification. It is a master one
item.remove_in_progress_notification(notification)
logger.debug("Remove master notification (no repeat): %s", notification)
else:
# Wipe out this master notification.
logger.debug("Remove master notification (no more a problem): %s", notification)
# We don't repeat recover/downtime/flap/etc...
item.remove_in_progress_notification(notification)
|
[
"def",
"scatter_master_notifications",
"(",
"self",
")",
":",
"now",
"=",
"time",
".",
"time",
"(",
")",
"# We only want the master scheduled notifications that are immediately launchable",
"notifications",
"=",
"[",
"a",
"for",
"a",
"in",
"self",
".",
"actions",
".",
"values",
"(",
")",
"if",
"a",
".",
"is_a",
"==",
"u'notification'",
"and",
"a",
".",
"status",
"==",
"ACT_STATUS_SCHEDULED",
"and",
"not",
"a",
".",
"contact",
"and",
"a",
".",
"is_launchable",
"(",
"now",
")",
"]",
"if",
"notifications",
":",
"logger",
".",
"debug",
"(",
"\"Scatter master notification: %d notifications\"",
",",
"len",
"(",
"notifications",
")",
")",
"for",
"notification",
"in",
"notifications",
":",
"logger",
".",
"debug",
"(",
"\"Scheduler got a master notification: %s\"",
",",
"notification",
")",
"# This is a \"master\" notification created by an host/service.",
"# We use it to create children notifications (for the contacts and",
"# notification_commands) which are executed in the reactionner.",
"item",
"=",
"self",
".",
"find_item_by_id",
"(",
"notification",
".",
"ref",
")",
"children",
"=",
"[",
"]",
"notification_period",
"=",
"None",
"if",
"getattr",
"(",
"item",
",",
"'notification_period'",
",",
"None",
")",
"is",
"not",
"None",
":",
"notification_period",
"=",
"self",
".",
"timeperiods",
"[",
"item",
".",
"notification_period",
"]",
"if",
"not",
"item",
".",
"is_blocking_notifications",
"(",
"notification_period",
",",
"self",
".",
"hosts",
",",
"self",
".",
"services",
",",
"notification",
".",
"type",
",",
"now",
")",
":",
"# If it is possible to send notifications",
"# of this type at the current time, then create",
"# a single notification for each contact of this item.",
"children",
"=",
"item",
".",
"scatter_notification",
"(",
"notification",
",",
"self",
".",
"contacts",
",",
"self",
".",
"notificationways",
",",
"self",
".",
"timeperiods",
",",
"self",
".",
"macromodulations",
",",
"self",
".",
"escalations",
",",
"self",
".",
"find_item_by_id",
"(",
"getattr",
"(",
"item",
",",
"\"host\"",
",",
"None",
")",
")",
")",
"for",
"notif",
"in",
"children",
":",
"logger",
".",
"debug",
"(",
"\" - child notification: %s\"",
",",
"notif",
")",
"notif",
".",
"status",
"=",
"ACT_STATUS_SCHEDULED",
"# Add the notification to the scheduler objects",
"self",
".",
"add",
"(",
"notif",
")",
"# If we have notification_interval then schedule",
"# the next notification (problems only)",
"if",
"notification",
".",
"type",
"==",
"u'PROBLEM'",
":",
"# Update the ref notif number after raise the one of the notification",
"if",
"children",
":",
"# notif_nb of the master notification",
"# was already current_notification_number+1.",
"# If notifications were sent,",
"# then host/service-counter will also be incremented",
"item",
".",
"current_notification_number",
"=",
"notification",
".",
"notif_nb",
"if",
"item",
".",
"notification_interval",
"and",
"notification",
".",
"t_to_go",
"is",
"not",
"None",
":",
"# We must continue to send notifications.",
"# Just leave it in the actions list and set it to \"scheduled\"",
"# and it will be found again later",
"# Ask the service/host to compute the next notif time. It can be just",
"# a.t_to_go + item.notification_interval*item.__class__.interval_length",
"# or maybe before because we have an",
"# escalation that need to raise up before",
"notification",
".",
"t_to_go",
"=",
"item",
".",
"get_next_notification_time",
"(",
"notification",
",",
"self",
".",
"escalations",
",",
"self",
".",
"timeperiods",
")",
"notification",
".",
"notif_nb",
"=",
"item",
".",
"current_notification_number",
"+",
"1",
"logger",
".",
"debug",
"(",
"\"Repeat master notification: %s\"",
",",
"notification",
")",
"else",
":",
"# Wipe out this master notification. It is a master one",
"item",
".",
"remove_in_progress_notification",
"(",
"notification",
")",
"logger",
".",
"debug",
"(",
"\"Remove master notification (no repeat): %s\"",
",",
"notification",
")",
"else",
":",
"# Wipe out this master notification.",
"logger",
".",
"debug",
"(",
"\"Remove master notification (no more a problem): %s\"",
",",
"notification",
")",
"# We don't repeat recover/downtime/flap/etc...",
"item",
".",
"remove_in_progress_notification",
"(",
"notification",
")"
] |
Generate children notifications from a master notification
Also update notification number
Master notification are raised when a notification must be sent out. They are not
launched by reactionners (only children are) but they are used to build the
children notifications.
From one master notification, several children notifications may be built,
indeed one per each contact...
:return: None
|
[
"Generate",
"children",
"notifications",
"from",
"a",
"master",
"notification",
"Also",
"update",
"notification",
"number"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L746-L828
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.manage_internal_checks
|
def manage_internal_checks(self):
"""Run internal checks
:return: None
"""
if os.getenv('ALIGNAK_MANAGE_INTERNAL', '1') != '1':
return
now = time.time()
for chk in list(self.checks.values()):
if not chk.internal:
# Exclude checks that are not internal ones
continue
# Exclude checks that are not yet ready to launch
if not chk.is_launchable(now) or chk.status not in [ACT_STATUS_SCHEDULED]:
continue
item = self.find_item_by_id(chk.ref)
# Only if active checks are enabled
if not item or not item.active_checks_enabled:
# Ask to remove the check
chk.status = ACT_STATUS_ZOMBIE
continue
logger.debug("Run internal check for %s", item)
self.nb_internal_checks += 1
# Execute internal check
item.manage_internal_check(self.hosts, self.services, chk, self.hostgroups,
self.servicegroups, self.macromodulations,
self.timeperiods)
# Ask to consume the check result
chk.status = ACT_STATUS_WAIT_CONSUME
|
python
|
def manage_internal_checks(self):
"""Run internal checks
:return: None
"""
if os.getenv('ALIGNAK_MANAGE_INTERNAL', '1') != '1':
return
now = time.time()
for chk in list(self.checks.values()):
if not chk.internal:
# Exclude checks that are not internal ones
continue
# Exclude checks that are not yet ready to launch
if not chk.is_launchable(now) or chk.status not in [ACT_STATUS_SCHEDULED]:
continue
item = self.find_item_by_id(chk.ref)
# Only if active checks are enabled
if not item or not item.active_checks_enabled:
# Ask to remove the check
chk.status = ACT_STATUS_ZOMBIE
continue
logger.debug("Run internal check for %s", item)
self.nb_internal_checks += 1
# Execute internal check
item.manage_internal_check(self.hosts, self.services, chk, self.hostgroups,
self.servicegroups, self.macromodulations,
self.timeperiods)
# Ask to consume the check result
chk.status = ACT_STATUS_WAIT_CONSUME
|
[
"def",
"manage_internal_checks",
"(",
"self",
")",
":",
"if",
"os",
".",
"getenv",
"(",
"'ALIGNAK_MANAGE_INTERNAL'",
",",
"'1'",
")",
"!=",
"'1'",
":",
"return",
"now",
"=",
"time",
".",
"time",
"(",
")",
"for",
"chk",
"in",
"list",
"(",
"self",
".",
"checks",
".",
"values",
"(",
")",
")",
":",
"if",
"not",
"chk",
".",
"internal",
":",
"# Exclude checks that are not internal ones",
"continue",
"# Exclude checks that are not yet ready to launch",
"if",
"not",
"chk",
".",
"is_launchable",
"(",
"now",
")",
"or",
"chk",
".",
"status",
"not",
"in",
"[",
"ACT_STATUS_SCHEDULED",
"]",
":",
"continue",
"item",
"=",
"self",
".",
"find_item_by_id",
"(",
"chk",
".",
"ref",
")",
"# Only if active checks are enabled",
"if",
"not",
"item",
"or",
"not",
"item",
".",
"active_checks_enabled",
":",
"# Ask to remove the check",
"chk",
".",
"status",
"=",
"ACT_STATUS_ZOMBIE",
"continue",
"logger",
".",
"debug",
"(",
"\"Run internal check for %s\"",
",",
"item",
")",
"self",
".",
"nb_internal_checks",
"+=",
"1",
"# Execute internal check",
"item",
".",
"manage_internal_check",
"(",
"self",
".",
"hosts",
",",
"self",
".",
"services",
",",
"chk",
",",
"self",
".",
"hostgroups",
",",
"self",
".",
"servicegroups",
",",
"self",
".",
"macromodulations",
",",
"self",
".",
"timeperiods",
")",
"# Ask to consume the check result",
"chk",
".",
"status",
"=",
"ACT_STATUS_WAIT_CONSUME"
] |
Run internal checks
:return: None
|
[
"Run",
"internal",
"checks"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L1190-L1222
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.reset_topology_change_flag
|
def reset_topology_change_flag(self):
"""Set topology_change attribute to False in all hosts and services
:return: None
"""
for i in self.hosts:
i.topology_change = False
for i in self.services:
i.topology_change = False
|
python
|
def reset_topology_change_flag(self):
"""Set topology_change attribute to False in all hosts and services
:return: None
"""
for i in self.hosts:
i.topology_change = False
for i in self.services:
i.topology_change = False
|
[
"def",
"reset_topology_change_flag",
"(",
"self",
")",
":",
"for",
"i",
"in",
"self",
".",
"hosts",
":",
"i",
".",
"topology_change",
"=",
"False",
"for",
"i",
"in",
"self",
".",
"services",
":",
"i",
".",
"topology_change",
"=",
"False"
] |
Set topology_change attribute to False in all hosts and services
:return: None
|
[
"Set",
"topology_change",
"attribute",
"to",
"False",
"in",
"all",
"hosts",
"and",
"services"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L1224-L1232
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.log_initial_states
|
def log_initial_states(self):
"""Raise hosts and services initial status logs
First, raise hosts status and then services. This to allow the events log
to be a little sorted.
:return: None
"""
# Raise hosts initial status broks
for elt in self.hosts:
elt.raise_initial_state()
# And then services initial status broks
for elt in self.services:
elt.raise_initial_state()
|
python
|
def log_initial_states(self):
"""Raise hosts and services initial status logs
First, raise hosts status and then services. This to allow the events log
to be a little sorted.
:return: None
"""
# Raise hosts initial status broks
for elt in self.hosts:
elt.raise_initial_state()
# And then services initial status broks
for elt in self.services:
elt.raise_initial_state()
|
[
"def",
"log_initial_states",
"(",
"self",
")",
":",
"# Raise hosts initial status broks",
"for",
"elt",
"in",
"self",
".",
"hosts",
":",
"elt",
".",
"raise_initial_state",
"(",
")",
"# And then services initial status broks",
"for",
"elt",
"in",
"self",
".",
"services",
":",
"elt",
".",
"raise_initial_state",
"(",
")"
] |
Raise hosts and services initial status logs
First, raise hosts status and then services. This to allow the events log
to be a little sorted.
:return: None
|
[
"Raise",
"hosts",
"and",
"services",
"initial",
"status",
"logs"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L1276-L1290
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.get_retention_data
|
def get_retention_data(self): # pylint: disable=too-many-branches,too-many-statements
# pylint: disable=too-many-locals
"""Get all hosts and services data to be sent to the retention storage.
This function only prepares the data because a module is in charge of making
the data survive to the scheduler restart.
todo: Alignak scheduler creates two separate dictionaries: hosts and services
It would be better to merge the services into the host dictionary!
:return: dict containing host and service data
:rtype: dict
"""
retention_data = {
'hosts': {}, 'services': {}
}
for host in self.hosts:
h_dict = {}
# Get the hosts properties and running properties
properties = host.__class__.properties
properties.update(host.__class__.running_properties)
for prop, entry in list(properties.items()):
if not entry.retention:
continue
val = getattr(host, prop)
# If a preparation function exists...
prepare_retention = entry.retention_preparation
if prepare_retention:
val = prepare_retention(host, val)
h_dict[prop] = val
retention_data['hosts'][host.host_name] = h_dict
logger.info('%d hosts sent to retention', len(retention_data['hosts']))
# Same for services
for service in self.services:
s_dict = {}
# Get the services properties and running properties
properties = service.__class__.properties
properties.update(service.__class__.running_properties)
for prop, entry in list(properties.items()):
if not entry.retention:
continue
val = getattr(service, prop)
# If a preparation function exists...
prepare_retention = entry.retention_preparation
if prepare_retention:
val = prepare_retention(service, val)
s_dict[prop] = val
retention_data['services'][(service.host_name, service.service_description)] = s_dict
logger.info('%d services sent to retention', len(retention_data['services']))
return retention_data
|
python
|
def get_retention_data(self): # pylint: disable=too-many-branches,too-many-statements
# pylint: disable=too-many-locals
"""Get all hosts and services data to be sent to the retention storage.
This function only prepares the data because a module is in charge of making
the data survive to the scheduler restart.
todo: Alignak scheduler creates two separate dictionaries: hosts and services
It would be better to merge the services into the host dictionary!
:return: dict containing host and service data
:rtype: dict
"""
retention_data = {
'hosts': {}, 'services': {}
}
for host in self.hosts:
h_dict = {}
# Get the hosts properties and running properties
properties = host.__class__.properties
properties.update(host.__class__.running_properties)
for prop, entry in list(properties.items()):
if not entry.retention:
continue
val = getattr(host, prop)
# If a preparation function exists...
prepare_retention = entry.retention_preparation
if prepare_retention:
val = prepare_retention(host, val)
h_dict[prop] = val
retention_data['hosts'][host.host_name] = h_dict
logger.info('%d hosts sent to retention', len(retention_data['hosts']))
# Same for services
for service in self.services:
s_dict = {}
# Get the services properties and running properties
properties = service.__class__.properties
properties.update(service.__class__.running_properties)
for prop, entry in list(properties.items()):
if not entry.retention:
continue
val = getattr(service, prop)
# If a preparation function exists...
prepare_retention = entry.retention_preparation
if prepare_retention:
val = prepare_retention(service, val)
s_dict[prop] = val
retention_data['services'][(service.host_name, service.service_description)] = s_dict
logger.info('%d services sent to retention', len(retention_data['services']))
return retention_data
|
[
"def",
"get_retention_data",
"(",
"self",
")",
":",
"# pylint: disable=too-many-branches,too-many-statements",
"# pylint: disable=too-many-locals",
"retention_data",
"=",
"{",
"'hosts'",
":",
"{",
"}",
",",
"'services'",
":",
"{",
"}",
"}",
"for",
"host",
"in",
"self",
".",
"hosts",
":",
"h_dict",
"=",
"{",
"}",
"# Get the hosts properties and running properties",
"properties",
"=",
"host",
".",
"__class__",
".",
"properties",
"properties",
".",
"update",
"(",
"host",
".",
"__class__",
".",
"running_properties",
")",
"for",
"prop",
",",
"entry",
"in",
"list",
"(",
"properties",
".",
"items",
"(",
")",
")",
":",
"if",
"not",
"entry",
".",
"retention",
":",
"continue",
"val",
"=",
"getattr",
"(",
"host",
",",
"prop",
")",
"# If a preparation function exists...",
"prepare_retention",
"=",
"entry",
".",
"retention_preparation",
"if",
"prepare_retention",
":",
"val",
"=",
"prepare_retention",
"(",
"host",
",",
"val",
")",
"h_dict",
"[",
"prop",
"]",
"=",
"val",
"retention_data",
"[",
"'hosts'",
"]",
"[",
"host",
".",
"host_name",
"]",
"=",
"h_dict",
"logger",
".",
"info",
"(",
"'%d hosts sent to retention'",
",",
"len",
"(",
"retention_data",
"[",
"'hosts'",
"]",
")",
")",
"# Same for services",
"for",
"service",
"in",
"self",
".",
"services",
":",
"s_dict",
"=",
"{",
"}",
"# Get the services properties and running properties",
"properties",
"=",
"service",
".",
"__class__",
".",
"properties",
"properties",
".",
"update",
"(",
"service",
".",
"__class__",
".",
"running_properties",
")",
"for",
"prop",
",",
"entry",
"in",
"list",
"(",
"properties",
".",
"items",
"(",
")",
")",
":",
"if",
"not",
"entry",
".",
"retention",
":",
"continue",
"val",
"=",
"getattr",
"(",
"service",
",",
"prop",
")",
"# If a preparation function exists...",
"prepare_retention",
"=",
"entry",
".",
"retention_preparation",
"if",
"prepare_retention",
":",
"val",
"=",
"prepare_retention",
"(",
"service",
",",
"val",
")",
"s_dict",
"[",
"prop",
"]",
"=",
"val",
"retention_data",
"[",
"'services'",
"]",
"[",
"(",
"service",
".",
"host_name",
",",
"service",
".",
"service_description",
")",
"]",
"=",
"s_dict",
"logger",
".",
"info",
"(",
"'%d services sent to retention'",
",",
"len",
"(",
"retention_data",
"[",
"'services'",
"]",
")",
")",
"return",
"retention_data"
] |
Get all hosts and services data to be sent to the retention storage.
This function only prepares the data because a module is in charge of making
the data survive to the scheduler restart.
todo: Alignak scheduler creates two separate dictionaries: hosts and services
It would be better to merge the services into the host dictionary!
:return: dict containing host and service data
:rtype: dict
|
[
"Get",
"all",
"hosts",
"and",
"services",
"data",
"to",
"be",
"sent",
"to",
"the",
"retention",
"storage",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L1292-L1349
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.restore_retention_data
|
def restore_retention_data(self, data):
"""Restore retention data
Data coming from retention will override data coming from configuration
It is kinda confusing when you modify an attribute (external command) and it get saved
by retention
:param data: data from retention
:type data: dict
:return: None
"""
if 'hosts' not in data:
logger.warning("Retention data are not correct, no 'hosts' property!")
return
for host_name in data['hosts']:
# We take the dict of our value to load
host = self.hosts.find_by_name(host_name)
if host is not None:
self.restore_retention_data_item(data['hosts'][host_name], host)
statsmgr.gauge('retention.hosts', len(data['hosts']))
logger.info('%d hosts restored from retention', len(data['hosts']))
# Same for services
for (host_name, service_description) in data['services']:
# We take our dict to load
service = self.services.find_srv_by_name_and_hostname(host_name, service_description)
if service is not None:
self.restore_retention_data_item(data['services'][(host_name, service_description)],
service)
statsmgr.gauge('retention.services', len(data['services']))
logger.info('%d services restored from retention', len(data['services']))
|
python
|
def restore_retention_data(self, data):
"""Restore retention data
Data coming from retention will override data coming from configuration
It is kinda confusing when you modify an attribute (external command) and it get saved
by retention
:param data: data from retention
:type data: dict
:return: None
"""
if 'hosts' not in data:
logger.warning("Retention data are not correct, no 'hosts' property!")
return
for host_name in data['hosts']:
# We take the dict of our value to load
host = self.hosts.find_by_name(host_name)
if host is not None:
self.restore_retention_data_item(data['hosts'][host_name], host)
statsmgr.gauge('retention.hosts', len(data['hosts']))
logger.info('%d hosts restored from retention', len(data['hosts']))
# Same for services
for (host_name, service_description) in data['services']:
# We take our dict to load
service = self.services.find_srv_by_name_and_hostname(host_name, service_description)
if service is not None:
self.restore_retention_data_item(data['services'][(host_name, service_description)],
service)
statsmgr.gauge('retention.services', len(data['services']))
logger.info('%d services restored from retention', len(data['services']))
|
[
"def",
"restore_retention_data",
"(",
"self",
",",
"data",
")",
":",
"if",
"'hosts'",
"not",
"in",
"data",
":",
"logger",
".",
"warning",
"(",
"\"Retention data are not correct, no 'hosts' property!\"",
")",
"return",
"for",
"host_name",
"in",
"data",
"[",
"'hosts'",
"]",
":",
"# We take the dict of our value to load",
"host",
"=",
"self",
".",
"hosts",
".",
"find_by_name",
"(",
"host_name",
")",
"if",
"host",
"is",
"not",
"None",
":",
"self",
".",
"restore_retention_data_item",
"(",
"data",
"[",
"'hosts'",
"]",
"[",
"host_name",
"]",
",",
"host",
")",
"statsmgr",
".",
"gauge",
"(",
"'retention.hosts'",
",",
"len",
"(",
"data",
"[",
"'hosts'",
"]",
")",
")",
"logger",
".",
"info",
"(",
"'%d hosts restored from retention'",
",",
"len",
"(",
"data",
"[",
"'hosts'",
"]",
")",
")",
"# Same for services",
"for",
"(",
"host_name",
",",
"service_description",
")",
"in",
"data",
"[",
"'services'",
"]",
":",
"# We take our dict to load",
"service",
"=",
"self",
".",
"services",
".",
"find_srv_by_name_and_hostname",
"(",
"host_name",
",",
"service_description",
")",
"if",
"service",
"is",
"not",
"None",
":",
"self",
".",
"restore_retention_data_item",
"(",
"data",
"[",
"'services'",
"]",
"[",
"(",
"host_name",
",",
"service_description",
")",
"]",
",",
"service",
")",
"statsmgr",
".",
"gauge",
"(",
"'retention.services'",
",",
"len",
"(",
"data",
"[",
"'services'",
"]",
")",
")",
"logger",
".",
"info",
"(",
"'%d services restored from retention'",
",",
"len",
"(",
"data",
"[",
"'services'",
"]",
")",
")"
] |
Restore retention data
Data coming from retention will override data coming from configuration
It is kinda confusing when you modify an attribute (external command) and it get saved
by retention
:param data: data from retention
:type data: dict
:return: None
|
[
"Restore",
"retention",
"data"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L1351-L1382
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.restore_retention_data_item
|
def restore_retention_data_item(self, data, item):
# pylint: disable=too-many-branches, too-many-locals
"""
Restore data in item
:param data: retention data of the item
:type data: dict
:param item: host or service item
:type item: alignak.objects.host.Host | alignak.objects.service.Service
:return: None
"""
# Manage the properties and running properties
properties = item.__class__.properties
properties.update(item.__class__.running_properties)
for prop, entry in list(properties.items()):
if not entry.retention:
continue
if prop not in data:
continue
# If a restoration function exists...
restore_retention = entry.retention_restoration
if restore_retention:
setattr(item, prop, restore_retention(item, data[prop]))
else:
setattr(item, prop, data[prop])
# Now manage all linked objects load from/ previous run
for notification_uuid in item.notifications_in_progress:
notification = item.notifications_in_progress[notification_uuid]
# Update the notification referenced object
notification['ref'] = item.uuid
my_notification = Notification(params=notification)
item.notifications_in_progress[notification_uuid] = my_notification
# Add a notification in the scheduler actions
self.add(my_notification)
# todo: is it useful? We do not save/restore checks in the retention data...
item.update_in_checking()
# And also add downtimes and comments
# Downtimes are in a list..
for downtime_uuid in data['downtimes']:
downtime = data['downtimes'][downtime_uuid]
# Update the downtime referenced object
downtime['ref'] = item.uuid
my_downtime = Downtime(params=downtime)
if downtime['comment_id']:
if downtime['comment_id'] not in data['comments']:
downtime['comment_id'] = ''
# case comment_id has comment dict instead uuid
# todo: This should never happen! Why this code ?
if 'uuid' in downtime['comment_id']:
data['comments'].append(downtime['comment_id'])
downtime['comment_id'] = downtime['comment_id']['uuid']
item.add_downtime(my_downtime)
# Comments are in a list..
for comment_uuid in data['comments']:
comment = data['comments'][comment_uuid]
# Update the comment referenced object
comment['ref'] = item.uuid
item.add_comment(Comment(comment))
if item.acknowledgement is not None:
# Update the comment referenced object
item.acknowledgement['ref'] = item.uuid
item.acknowledgement = Acknowledge(item.acknowledgement)
# Relink the notified_contacts as a set() of true contacts objects
# if it was loaded from the retention, it's now a list of contacts
# names
new_notified_contacts = set()
new_notified_contacts_ids = set()
for contact_name in item.notified_contacts:
contact = self.contacts.find_by_name(contact_name)
if contact is not None:
new_notified_contacts.add(contact_name)
new_notified_contacts_ids.add(contact.uuid)
item.notified_contacts = new_notified_contacts
item.notified_contacts_ids = new_notified_contacts_ids
|
python
|
def restore_retention_data_item(self, data, item):
# pylint: disable=too-many-branches, too-many-locals
"""
Restore data in item
:param data: retention data of the item
:type data: dict
:param item: host or service item
:type item: alignak.objects.host.Host | alignak.objects.service.Service
:return: None
"""
# Manage the properties and running properties
properties = item.__class__.properties
properties.update(item.__class__.running_properties)
for prop, entry in list(properties.items()):
if not entry.retention:
continue
if prop not in data:
continue
# If a restoration function exists...
restore_retention = entry.retention_restoration
if restore_retention:
setattr(item, prop, restore_retention(item, data[prop]))
else:
setattr(item, prop, data[prop])
# Now manage all linked objects load from/ previous run
for notification_uuid in item.notifications_in_progress:
notification = item.notifications_in_progress[notification_uuid]
# Update the notification referenced object
notification['ref'] = item.uuid
my_notification = Notification(params=notification)
item.notifications_in_progress[notification_uuid] = my_notification
# Add a notification in the scheduler actions
self.add(my_notification)
# todo: is it useful? We do not save/restore checks in the retention data...
item.update_in_checking()
# And also add downtimes and comments
# Downtimes are in a list..
for downtime_uuid in data['downtimes']:
downtime = data['downtimes'][downtime_uuid]
# Update the downtime referenced object
downtime['ref'] = item.uuid
my_downtime = Downtime(params=downtime)
if downtime['comment_id']:
if downtime['comment_id'] not in data['comments']:
downtime['comment_id'] = ''
# case comment_id has comment dict instead uuid
# todo: This should never happen! Why this code ?
if 'uuid' in downtime['comment_id']:
data['comments'].append(downtime['comment_id'])
downtime['comment_id'] = downtime['comment_id']['uuid']
item.add_downtime(my_downtime)
# Comments are in a list..
for comment_uuid in data['comments']:
comment = data['comments'][comment_uuid]
# Update the comment referenced object
comment['ref'] = item.uuid
item.add_comment(Comment(comment))
if item.acknowledgement is not None:
# Update the comment referenced object
item.acknowledgement['ref'] = item.uuid
item.acknowledgement = Acknowledge(item.acknowledgement)
# Relink the notified_contacts as a set() of true contacts objects
# if it was loaded from the retention, it's now a list of contacts
# names
new_notified_contacts = set()
new_notified_contacts_ids = set()
for contact_name in item.notified_contacts:
contact = self.contacts.find_by_name(contact_name)
if contact is not None:
new_notified_contacts.add(contact_name)
new_notified_contacts_ids.add(contact.uuid)
item.notified_contacts = new_notified_contacts
item.notified_contacts_ids = new_notified_contacts_ids
|
[
"def",
"restore_retention_data_item",
"(",
"self",
",",
"data",
",",
"item",
")",
":",
"# pylint: disable=too-many-branches, too-many-locals",
"# Manage the properties and running properties",
"properties",
"=",
"item",
".",
"__class__",
".",
"properties",
"properties",
".",
"update",
"(",
"item",
".",
"__class__",
".",
"running_properties",
")",
"for",
"prop",
",",
"entry",
"in",
"list",
"(",
"properties",
".",
"items",
"(",
")",
")",
":",
"if",
"not",
"entry",
".",
"retention",
":",
"continue",
"if",
"prop",
"not",
"in",
"data",
":",
"continue",
"# If a restoration function exists...",
"restore_retention",
"=",
"entry",
".",
"retention_restoration",
"if",
"restore_retention",
":",
"setattr",
"(",
"item",
",",
"prop",
",",
"restore_retention",
"(",
"item",
",",
"data",
"[",
"prop",
"]",
")",
")",
"else",
":",
"setattr",
"(",
"item",
",",
"prop",
",",
"data",
"[",
"prop",
"]",
")",
"# Now manage all linked objects load from/ previous run",
"for",
"notification_uuid",
"in",
"item",
".",
"notifications_in_progress",
":",
"notification",
"=",
"item",
".",
"notifications_in_progress",
"[",
"notification_uuid",
"]",
"# Update the notification referenced object",
"notification",
"[",
"'ref'",
"]",
"=",
"item",
".",
"uuid",
"my_notification",
"=",
"Notification",
"(",
"params",
"=",
"notification",
")",
"item",
".",
"notifications_in_progress",
"[",
"notification_uuid",
"]",
"=",
"my_notification",
"# Add a notification in the scheduler actions",
"self",
".",
"add",
"(",
"my_notification",
")",
"# todo: is it useful? We do not save/restore checks in the retention data...",
"item",
".",
"update_in_checking",
"(",
")",
"# And also add downtimes and comments",
"# Downtimes are in a list..",
"for",
"downtime_uuid",
"in",
"data",
"[",
"'downtimes'",
"]",
":",
"downtime",
"=",
"data",
"[",
"'downtimes'",
"]",
"[",
"downtime_uuid",
"]",
"# Update the downtime referenced object",
"downtime",
"[",
"'ref'",
"]",
"=",
"item",
".",
"uuid",
"my_downtime",
"=",
"Downtime",
"(",
"params",
"=",
"downtime",
")",
"if",
"downtime",
"[",
"'comment_id'",
"]",
":",
"if",
"downtime",
"[",
"'comment_id'",
"]",
"not",
"in",
"data",
"[",
"'comments'",
"]",
":",
"downtime",
"[",
"'comment_id'",
"]",
"=",
"''",
"# case comment_id has comment dict instead uuid",
"# todo: This should never happen! Why this code ?",
"if",
"'uuid'",
"in",
"downtime",
"[",
"'comment_id'",
"]",
":",
"data",
"[",
"'comments'",
"]",
".",
"append",
"(",
"downtime",
"[",
"'comment_id'",
"]",
")",
"downtime",
"[",
"'comment_id'",
"]",
"=",
"downtime",
"[",
"'comment_id'",
"]",
"[",
"'uuid'",
"]",
"item",
".",
"add_downtime",
"(",
"my_downtime",
")",
"# Comments are in a list..",
"for",
"comment_uuid",
"in",
"data",
"[",
"'comments'",
"]",
":",
"comment",
"=",
"data",
"[",
"'comments'",
"]",
"[",
"comment_uuid",
"]",
"# Update the comment referenced object",
"comment",
"[",
"'ref'",
"]",
"=",
"item",
".",
"uuid",
"item",
".",
"add_comment",
"(",
"Comment",
"(",
"comment",
")",
")",
"if",
"item",
".",
"acknowledgement",
"is",
"not",
"None",
":",
"# Update the comment referenced object",
"item",
".",
"acknowledgement",
"[",
"'ref'",
"]",
"=",
"item",
".",
"uuid",
"item",
".",
"acknowledgement",
"=",
"Acknowledge",
"(",
"item",
".",
"acknowledgement",
")",
"# Relink the notified_contacts as a set() of true contacts objects",
"# if it was loaded from the retention, it's now a list of contacts",
"# names",
"new_notified_contacts",
"=",
"set",
"(",
")",
"new_notified_contacts_ids",
"=",
"set",
"(",
")",
"for",
"contact_name",
"in",
"item",
".",
"notified_contacts",
":",
"contact",
"=",
"self",
".",
"contacts",
".",
"find_by_name",
"(",
"contact_name",
")",
"if",
"contact",
"is",
"not",
"None",
":",
"new_notified_contacts",
".",
"add",
"(",
"contact_name",
")",
"new_notified_contacts_ids",
".",
"add",
"(",
"contact",
".",
"uuid",
")",
"item",
".",
"notified_contacts",
"=",
"new_notified_contacts",
"item",
".",
"notified_contacts_ids",
"=",
"new_notified_contacts_ids"
] |
Restore data in item
:param data: retention data of the item
:type data: dict
:param item: host or service item
:type item: alignak.objects.host.Host | alignak.objects.service.Service
:return: None
|
[
"Restore",
"data",
"in",
"item"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L1384-L1468
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.fill_initial_broks
|
def fill_initial_broks(self, broker_name):
# pylint: disable=too-many-branches
"""Create initial broks for a specific broker
:param broker_name: broker name
:type broker_name: str
:return: number of created broks
"""
broker_uuid = None
logger.debug("My brokers: %s", self.my_daemon.brokers)
for broker_link in list(self.my_daemon.brokers.values()):
logger.debug("Searching broker: %s", broker_link)
if broker_name == broker_link.name:
broker_uuid = broker_link.uuid
logger.info("Filling initial broks for: %s (%s)", broker_name, broker_uuid)
break
else:
if self.pushed_conf:
# I am yet configured but I do not know this broker ! Something went wrong!!!
logger.error("Requested initial broks for an unknown broker: %s", broker_name)
else:
logger.info("Requested initial broks for an unknown broker: %s", broker_name)
return 0
if self.my_daemon.brokers[broker_uuid].initialized:
logger.warning("The broker %s still got its initial broks...", broker_name)
return 0
initial_broks_count = len(self.my_daemon.brokers[broker_uuid].broks)
# First the program status
brok = self.get_program_status_brok()
self.add_brok(brok, broker_uuid)
# We can't call initial_status from all this types
# The order is important, service need host...
initial_status_types = (self.timeperiods, self.commands,
self.contacts, self.contactgroups,
self.hosts, self.hostgroups,
self.services, self.servicegroups)
self.pushed_conf.skip_initial_broks = getattr(self.pushed_conf, 'skip_initial_broks', False)
logger.debug("Skipping initial broks? %s", str(self.pushed_conf.skip_initial_broks))
if not self.pushed_conf.skip_initial_broks:
# We call initial_status from all this types
# The order is important, service need host...
initial_status_types = (self.realms, self.timeperiods, self.commands,
self.notificationways, self.contacts, self.contactgroups,
self.hosts, self.hostgroups, self.hostdependencies,
self.services, self.servicegroups, self.servicedependencies,
self.escalations)
for tab in initial_status_types:
for item in tab:
# Awful! simply to get the group members property name... :(
# todo: replace this!
member_items = None
if hasattr(item, 'members'):
member_items = getattr(self, item.my_type.replace("group", "s"))
brok = item.get_initial_status_brok(member_items)
self.add_brok(brok, broker_uuid)
# Add a brok to say that we finished all initial_pass
brok = Brok({'type': 'initial_broks_done', 'data': {'instance_id': self.instance_id}})
self.add_brok(brok, broker_uuid)
final_broks_count = len(self.my_daemon.brokers[broker_uuid].broks)
self.my_daemon.brokers[broker_uuid].initialized = True
# Send the initial broks to our modules
self.send_broks_to_modules()
# We now have raised all the initial broks
self.raised_initial_broks = True
logger.info("Created %d initial broks for %s",
final_broks_count - initial_broks_count, broker_name)
return final_broks_count - initial_broks_count
|
python
|
def fill_initial_broks(self, broker_name):
# pylint: disable=too-many-branches
"""Create initial broks for a specific broker
:param broker_name: broker name
:type broker_name: str
:return: number of created broks
"""
broker_uuid = None
logger.debug("My brokers: %s", self.my_daemon.brokers)
for broker_link in list(self.my_daemon.brokers.values()):
logger.debug("Searching broker: %s", broker_link)
if broker_name == broker_link.name:
broker_uuid = broker_link.uuid
logger.info("Filling initial broks for: %s (%s)", broker_name, broker_uuid)
break
else:
if self.pushed_conf:
# I am yet configured but I do not know this broker ! Something went wrong!!!
logger.error("Requested initial broks for an unknown broker: %s", broker_name)
else:
logger.info("Requested initial broks for an unknown broker: %s", broker_name)
return 0
if self.my_daemon.brokers[broker_uuid].initialized:
logger.warning("The broker %s still got its initial broks...", broker_name)
return 0
initial_broks_count = len(self.my_daemon.brokers[broker_uuid].broks)
# First the program status
brok = self.get_program_status_brok()
self.add_brok(brok, broker_uuid)
# We can't call initial_status from all this types
# The order is important, service need host...
initial_status_types = (self.timeperiods, self.commands,
self.contacts, self.contactgroups,
self.hosts, self.hostgroups,
self.services, self.servicegroups)
self.pushed_conf.skip_initial_broks = getattr(self.pushed_conf, 'skip_initial_broks', False)
logger.debug("Skipping initial broks? %s", str(self.pushed_conf.skip_initial_broks))
if not self.pushed_conf.skip_initial_broks:
# We call initial_status from all this types
# The order is important, service need host...
initial_status_types = (self.realms, self.timeperiods, self.commands,
self.notificationways, self.contacts, self.contactgroups,
self.hosts, self.hostgroups, self.hostdependencies,
self.services, self.servicegroups, self.servicedependencies,
self.escalations)
for tab in initial_status_types:
for item in tab:
# Awful! simply to get the group members property name... :(
# todo: replace this!
member_items = None
if hasattr(item, 'members'):
member_items = getattr(self, item.my_type.replace("group", "s"))
brok = item.get_initial_status_brok(member_items)
self.add_brok(brok, broker_uuid)
# Add a brok to say that we finished all initial_pass
brok = Brok({'type': 'initial_broks_done', 'data': {'instance_id': self.instance_id}})
self.add_brok(brok, broker_uuid)
final_broks_count = len(self.my_daemon.brokers[broker_uuid].broks)
self.my_daemon.brokers[broker_uuid].initialized = True
# Send the initial broks to our modules
self.send_broks_to_modules()
# We now have raised all the initial broks
self.raised_initial_broks = True
logger.info("Created %d initial broks for %s",
final_broks_count - initial_broks_count, broker_name)
return final_broks_count - initial_broks_count
|
[
"def",
"fill_initial_broks",
"(",
"self",
",",
"broker_name",
")",
":",
"# pylint: disable=too-many-branches",
"broker_uuid",
"=",
"None",
"logger",
".",
"debug",
"(",
"\"My brokers: %s\"",
",",
"self",
".",
"my_daemon",
".",
"brokers",
")",
"for",
"broker_link",
"in",
"list",
"(",
"self",
".",
"my_daemon",
".",
"brokers",
".",
"values",
"(",
")",
")",
":",
"logger",
".",
"debug",
"(",
"\"Searching broker: %s\"",
",",
"broker_link",
")",
"if",
"broker_name",
"==",
"broker_link",
".",
"name",
":",
"broker_uuid",
"=",
"broker_link",
".",
"uuid",
"logger",
".",
"info",
"(",
"\"Filling initial broks for: %s (%s)\"",
",",
"broker_name",
",",
"broker_uuid",
")",
"break",
"else",
":",
"if",
"self",
".",
"pushed_conf",
":",
"# I am yet configured but I do not know this broker ! Something went wrong!!!",
"logger",
".",
"error",
"(",
"\"Requested initial broks for an unknown broker: %s\"",
",",
"broker_name",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Requested initial broks for an unknown broker: %s\"",
",",
"broker_name",
")",
"return",
"0",
"if",
"self",
".",
"my_daemon",
".",
"brokers",
"[",
"broker_uuid",
"]",
".",
"initialized",
":",
"logger",
".",
"warning",
"(",
"\"The broker %s still got its initial broks...\"",
",",
"broker_name",
")",
"return",
"0",
"initial_broks_count",
"=",
"len",
"(",
"self",
".",
"my_daemon",
".",
"brokers",
"[",
"broker_uuid",
"]",
".",
"broks",
")",
"# First the program status",
"brok",
"=",
"self",
".",
"get_program_status_brok",
"(",
")",
"self",
".",
"add_brok",
"(",
"brok",
",",
"broker_uuid",
")",
"# We can't call initial_status from all this types",
"# The order is important, service need host...",
"initial_status_types",
"=",
"(",
"self",
".",
"timeperiods",
",",
"self",
".",
"commands",
",",
"self",
".",
"contacts",
",",
"self",
".",
"contactgroups",
",",
"self",
".",
"hosts",
",",
"self",
".",
"hostgroups",
",",
"self",
".",
"services",
",",
"self",
".",
"servicegroups",
")",
"self",
".",
"pushed_conf",
".",
"skip_initial_broks",
"=",
"getattr",
"(",
"self",
".",
"pushed_conf",
",",
"'skip_initial_broks'",
",",
"False",
")",
"logger",
".",
"debug",
"(",
"\"Skipping initial broks? %s\"",
",",
"str",
"(",
"self",
".",
"pushed_conf",
".",
"skip_initial_broks",
")",
")",
"if",
"not",
"self",
".",
"pushed_conf",
".",
"skip_initial_broks",
":",
"# We call initial_status from all this types",
"# The order is important, service need host...",
"initial_status_types",
"=",
"(",
"self",
".",
"realms",
",",
"self",
".",
"timeperiods",
",",
"self",
".",
"commands",
",",
"self",
".",
"notificationways",
",",
"self",
".",
"contacts",
",",
"self",
".",
"contactgroups",
",",
"self",
".",
"hosts",
",",
"self",
".",
"hostgroups",
",",
"self",
".",
"hostdependencies",
",",
"self",
".",
"services",
",",
"self",
".",
"servicegroups",
",",
"self",
".",
"servicedependencies",
",",
"self",
".",
"escalations",
")",
"for",
"tab",
"in",
"initial_status_types",
":",
"for",
"item",
"in",
"tab",
":",
"# Awful! simply to get the group members property name... :(",
"# todo: replace this!",
"member_items",
"=",
"None",
"if",
"hasattr",
"(",
"item",
",",
"'members'",
")",
":",
"member_items",
"=",
"getattr",
"(",
"self",
",",
"item",
".",
"my_type",
".",
"replace",
"(",
"\"group\"",
",",
"\"s\"",
")",
")",
"brok",
"=",
"item",
".",
"get_initial_status_brok",
"(",
"member_items",
")",
"self",
".",
"add_brok",
"(",
"brok",
",",
"broker_uuid",
")",
"# Add a brok to say that we finished all initial_pass",
"brok",
"=",
"Brok",
"(",
"{",
"'type'",
":",
"'initial_broks_done'",
",",
"'data'",
":",
"{",
"'instance_id'",
":",
"self",
".",
"instance_id",
"}",
"}",
")",
"self",
".",
"add_brok",
"(",
"brok",
",",
"broker_uuid",
")",
"final_broks_count",
"=",
"len",
"(",
"self",
".",
"my_daemon",
".",
"brokers",
"[",
"broker_uuid",
"]",
".",
"broks",
")",
"self",
".",
"my_daemon",
".",
"brokers",
"[",
"broker_uuid",
"]",
".",
"initialized",
"=",
"True",
"# Send the initial broks to our modules",
"self",
".",
"send_broks_to_modules",
"(",
")",
"# We now have raised all the initial broks",
"self",
".",
"raised_initial_broks",
"=",
"True",
"logger",
".",
"info",
"(",
"\"Created %d initial broks for %s\"",
",",
"final_broks_count",
"-",
"initial_broks_count",
",",
"broker_name",
")",
"return",
"final_broks_count",
"-",
"initial_broks_count"
] |
Create initial broks for a specific broker
:param broker_name: broker name
:type broker_name: str
:return: number of created broks
|
[
"Create",
"initial",
"broks",
"for",
"a",
"specific",
"broker"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L1470-L1547
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.get_program_status_brok
|
def get_program_status_brok(self, brok_type='program_status'):
"""Create a program status brok
Initially builds the running properties and then, if initial status brok,
get the properties from the Config class where an entry exist for the brok
'full_status'
:return: Brok with program status data
:rtype: alignak.brok.Brok
"""
# Get the running statistics
data = {
"is_running": True,
"instance_id": self.instance_id,
# "alignak_name": self.alignak_name,
"instance_name": self.name,
"last_alive": time.time(),
"pid": os.getpid(),
'_running': self.get_scheduler_stats(details=True),
'_config': {},
'_macros': {}
}
# Get configuration data from the pushed configuration
cls = self.pushed_conf.__class__
for prop, entry in list(cls.properties.items()):
# Is this property intended for broking?
if 'full_status' not in entry.fill_brok:
continue
data['_config'][prop] = self.pushed_conf.get_property_value_for_brok(
prop, cls.properties)
# data['_config'][prop] = getattr(self.pushed_conf, prop, entry.default)
# Get the macros from the pushed configuration and try to resolve
# the macros to provide the result in the status brok
macro_resolver = MacroResolver()
macro_resolver.init(self.pushed_conf)
for macro_name in sorted(self.pushed_conf.macros):
data['_macros'][macro_name] = \
macro_resolver.resolve_simple_macros_in_string("$%s$" % macro_name,
[], None, None)
logger.debug("Program status brok %s data: %s", brok_type, data)
return Brok({'type': brok_type, 'data': data})
|
python
|
def get_program_status_brok(self, brok_type='program_status'):
"""Create a program status brok
Initially builds the running properties and then, if initial status brok,
get the properties from the Config class where an entry exist for the brok
'full_status'
:return: Brok with program status data
:rtype: alignak.brok.Brok
"""
# Get the running statistics
data = {
"is_running": True,
"instance_id": self.instance_id,
# "alignak_name": self.alignak_name,
"instance_name": self.name,
"last_alive": time.time(),
"pid": os.getpid(),
'_running': self.get_scheduler_stats(details=True),
'_config': {},
'_macros': {}
}
# Get configuration data from the pushed configuration
cls = self.pushed_conf.__class__
for prop, entry in list(cls.properties.items()):
# Is this property intended for broking?
if 'full_status' not in entry.fill_brok:
continue
data['_config'][prop] = self.pushed_conf.get_property_value_for_brok(
prop, cls.properties)
# data['_config'][prop] = getattr(self.pushed_conf, prop, entry.default)
# Get the macros from the pushed configuration and try to resolve
# the macros to provide the result in the status brok
macro_resolver = MacroResolver()
macro_resolver.init(self.pushed_conf)
for macro_name in sorted(self.pushed_conf.macros):
data['_macros'][macro_name] = \
macro_resolver.resolve_simple_macros_in_string("$%s$" % macro_name,
[], None, None)
logger.debug("Program status brok %s data: %s", brok_type, data)
return Brok({'type': brok_type, 'data': data})
|
[
"def",
"get_program_status_brok",
"(",
"self",
",",
"brok_type",
"=",
"'program_status'",
")",
":",
"# Get the running statistics",
"data",
"=",
"{",
"\"is_running\"",
":",
"True",
",",
"\"instance_id\"",
":",
"self",
".",
"instance_id",
",",
"# \"alignak_name\": self.alignak_name,",
"\"instance_name\"",
":",
"self",
".",
"name",
",",
"\"last_alive\"",
":",
"time",
".",
"time",
"(",
")",
",",
"\"pid\"",
":",
"os",
".",
"getpid",
"(",
")",
",",
"'_running'",
":",
"self",
".",
"get_scheduler_stats",
"(",
"details",
"=",
"True",
")",
",",
"'_config'",
":",
"{",
"}",
",",
"'_macros'",
":",
"{",
"}",
"}",
"# Get configuration data from the pushed configuration",
"cls",
"=",
"self",
".",
"pushed_conf",
".",
"__class__",
"for",
"prop",
",",
"entry",
"in",
"list",
"(",
"cls",
".",
"properties",
".",
"items",
"(",
")",
")",
":",
"# Is this property intended for broking?",
"if",
"'full_status'",
"not",
"in",
"entry",
".",
"fill_brok",
":",
"continue",
"data",
"[",
"'_config'",
"]",
"[",
"prop",
"]",
"=",
"self",
".",
"pushed_conf",
".",
"get_property_value_for_brok",
"(",
"prop",
",",
"cls",
".",
"properties",
")",
"# data['_config'][prop] = getattr(self.pushed_conf, prop, entry.default)",
"# Get the macros from the pushed configuration and try to resolve",
"# the macros to provide the result in the status brok",
"macro_resolver",
"=",
"MacroResolver",
"(",
")",
"macro_resolver",
".",
"init",
"(",
"self",
".",
"pushed_conf",
")",
"for",
"macro_name",
"in",
"sorted",
"(",
"self",
".",
"pushed_conf",
".",
"macros",
")",
":",
"data",
"[",
"'_macros'",
"]",
"[",
"macro_name",
"]",
"=",
"macro_resolver",
".",
"resolve_simple_macros_in_string",
"(",
"\"$%s$\"",
"%",
"macro_name",
",",
"[",
"]",
",",
"None",
",",
"None",
")",
"logger",
".",
"debug",
"(",
"\"Program status brok %s data: %s\"",
",",
"brok_type",
",",
"data",
")",
"return",
"Brok",
"(",
"{",
"'type'",
":",
"brok_type",
",",
"'data'",
":",
"data",
"}",
")"
] |
Create a program status brok
Initially builds the running properties and then, if initial status brok,
get the properties from the Config class where an entry exist for the brok
'full_status'
:return: Brok with program status data
:rtype: alignak.brok.Brok
|
[
"Create",
"a",
"program",
"status",
"brok"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L1563-L1606
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.consume_results
|
def consume_results(self): # pylint: disable=too-many-branches
"""Handle results waiting in waiting_results list.
Check ref will call consume result and update their status
:return: None
"""
# All results are in self.waiting_results
# We need to get them first
queue_size = self.waiting_results.qsize()
for _ in range(queue_size):
self.manage_results(self.waiting_results.get())
# Then we consume them
for chk in list(self.checks.values()):
if chk.status == ACT_STATUS_WAIT_CONSUME:
logger.debug("Consuming: %s", chk)
item = self.find_item_by_id(chk.ref)
notification_period = None
if getattr(item, 'notification_period', None) is not None:
notification_period = self.timeperiods[item.notification_period]
dep_checks = item.consume_result(chk, notification_period, self.hosts,
self.services, self.timeperiods,
self.macromodulations, self.checkmodulations,
self.businessimpactmodulations,
self.resultmodulations, self.checks,
self.pushed_conf.log_active_checks and
not chk.passive_check)
# # Raise the log only when the check got consumed!
# # Else the item information are not up-to-date :/
# if self.pushed_conf.log_active_checks and not chk.passive_check:
# item.raise_check_result()
#
for check in dep_checks:
logger.debug("-> raised a dependency check: %s", chk)
self.add(check)
# loop to resolve dependencies
have_resolved_checks = True
while have_resolved_checks:
have_resolved_checks = False
# All 'finished' checks (no more dep) raise checks they depend on
for chk in list(self.checks.values()):
if chk.status == ACT_STATUS_WAITING_ME:
for dependent_checks in chk.depend_on_me:
# Ok, now dependent will no more wait
dependent_checks.depend_on.remove(chk.uuid)
have_resolved_checks = True
# REMOVE OLD DEP CHECK -> zombie
chk.status = ACT_STATUS_ZOMBIE
# Now, reinteger dep checks
for chk in list(self.checks.values()):
if chk.status == ACT_STATUS_WAIT_DEPEND and not chk.depend_on:
item = self.find_item_by_id(chk.ref)
notification_period = None
if getattr(item, 'notification_period', None) is not None:
notification_period = self.timeperiods[item.notification_period]
dep_checks = item.consume_result(chk, notification_period, self.hosts,
self.services, self.timeperiods,
self.macromodulations, self.checkmodulations,
self.businessimpactmodulations,
self.resultmodulations, self.checks,
self.pushed_conf.log_active_checks and
not chk.passive_check)
for check in dep_checks:
self.add(check)
|
python
|
def consume_results(self): # pylint: disable=too-many-branches
"""Handle results waiting in waiting_results list.
Check ref will call consume result and update their status
:return: None
"""
# All results are in self.waiting_results
# We need to get them first
queue_size = self.waiting_results.qsize()
for _ in range(queue_size):
self.manage_results(self.waiting_results.get())
# Then we consume them
for chk in list(self.checks.values()):
if chk.status == ACT_STATUS_WAIT_CONSUME:
logger.debug("Consuming: %s", chk)
item = self.find_item_by_id(chk.ref)
notification_period = None
if getattr(item, 'notification_period', None) is not None:
notification_period = self.timeperiods[item.notification_period]
dep_checks = item.consume_result(chk, notification_period, self.hosts,
self.services, self.timeperiods,
self.macromodulations, self.checkmodulations,
self.businessimpactmodulations,
self.resultmodulations, self.checks,
self.pushed_conf.log_active_checks and
not chk.passive_check)
# # Raise the log only when the check got consumed!
# # Else the item information are not up-to-date :/
# if self.pushed_conf.log_active_checks and not chk.passive_check:
# item.raise_check_result()
#
for check in dep_checks:
logger.debug("-> raised a dependency check: %s", chk)
self.add(check)
# loop to resolve dependencies
have_resolved_checks = True
while have_resolved_checks:
have_resolved_checks = False
# All 'finished' checks (no more dep) raise checks they depend on
for chk in list(self.checks.values()):
if chk.status == ACT_STATUS_WAITING_ME:
for dependent_checks in chk.depend_on_me:
# Ok, now dependent will no more wait
dependent_checks.depend_on.remove(chk.uuid)
have_resolved_checks = True
# REMOVE OLD DEP CHECK -> zombie
chk.status = ACT_STATUS_ZOMBIE
# Now, reinteger dep checks
for chk in list(self.checks.values()):
if chk.status == ACT_STATUS_WAIT_DEPEND and not chk.depend_on:
item = self.find_item_by_id(chk.ref)
notification_period = None
if getattr(item, 'notification_period', None) is not None:
notification_period = self.timeperiods[item.notification_period]
dep_checks = item.consume_result(chk, notification_period, self.hosts,
self.services, self.timeperiods,
self.macromodulations, self.checkmodulations,
self.businessimpactmodulations,
self.resultmodulations, self.checks,
self.pushed_conf.log_active_checks and
not chk.passive_check)
for check in dep_checks:
self.add(check)
|
[
"def",
"consume_results",
"(",
"self",
")",
":",
"# pylint: disable=too-many-branches",
"# All results are in self.waiting_results",
"# We need to get them first",
"queue_size",
"=",
"self",
".",
"waiting_results",
".",
"qsize",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"queue_size",
")",
":",
"self",
".",
"manage_results",
"(",
"self",
".",
"waiting_results",
".",
"get",
"(",
")",
")",
"# Then we consume them",
"for",
"chk",
"in",
"list",
"(",
"self",
".",
"checks",
".",
"values",
"(",
")",
")",
":",
"if",
"chk",
".",
"status",
"==",
"ACT_STATUS_WAIT_CONSUME",
":",
"logger",
".",
"debug",
"(",
"\"Consuming: %s\"",
",",
"chk",
")",
"item",
"=",
"self",
".",
"find_item_by_id",
"(",
"chk",
".",
"ref",
")",
"notification_period",
"=",
"None",
"if",
"getattr",
"(",
"item",
",",
"'notification_period'",
",",
"None",
")",
"is",
"not",
"None",
":",
"notification_period",
"=",
"self",
".",
"timeperiods",
"[",
"item",
".",
"notification_period",
"]",
"dep_checks",
"=",
"item",
".",
"consume_result",
"(",
"chk",
",",
"notification_period",
",",
"self",
".",
"hosts",
",",
"self",
".",
"services",
",",
"self",
".",
"timeperiods",
",",
"self",
".",
"macromodulations",
",",
"self",
".",
"checkmodulations",
",",
"self",
".",
"businessimpactmodulations",
",",
"self",
".",
"resultmodulations",
",",
"self",
".",
"checks",
",",
"self",
".",
"pushed_conf",
".",
"log_active_checks",
"and",
"not",
"chk",
".",
"passive_check",
")",
"# # Raise the log only when the check got consumed!",
"# # Else the item information are not up-to-date :/",
"# if self.pushed_conf.log_active_checks and not chk.passive_check:",
"# item.raise_check_result()",
"#",
"for",
"check",
"in",
"dep_checks",
":",
"logger",
".",
"debug",
"(",
"\"-> raised a dependency check: %s\"",
",",
"chk",
")",
"self",
".",
"add",
"(",
"check",
")",
"# loop to resolve dependencies",
"have_resolved_checks",
"=",
"True",
"while",
"have_resolved_checks",
":",
"have_resolved_checks",
"=",
"False",
"# All 'finished' checks (no more dep) raise checks they depend on",
"for",
"chk",
"in",
"list",
"(",
"self",
".",
"checks",
".",
"values",
"(",
")",
")",
":",
"if",
"chk",
".",
"status",
"==",
"ACT_STATUS_WAITING_ME",
":",
"for",
"dependent_checks",
"in",
"chk",
".",
"depend_on_me",
":",
"# Ok, now dependent will no more wait",
"dependent_checks",
".",
"depend_on",
".",
"remove",
"(",
"chk",
".",
"uuid",
")",
"have_resolved_checks",
"=",
"True",
"# REMOVE OLD DEP CHECK -> zombie",
"chk",
".",
"status",
"=",
"ACT_STATUS_ZOMBIE",
"# Now, reinteger dep checks",
"for",
"chk",
"in",
"list",
"(",
"self",
".",
"checks",
".",
"values",
"(",
")",
")",
":",
"if",
"chk",
".",
"status",
"==",
"ACT_STATUS_WAIT_DEPEND",
"and",
"not",
"chk",
".",
"depend_on",
":",
"item",
"=",
"self",
".",
"find_item_by_id",
"(",
"chk",
".",
"ref",
")",
"notification_period",
"=",
"None",
"if",
"getattr",
"(",
"item",
",",
"'notification_period'",
",",
"None",
")",
"is",
"not",
"None",
":",
"notification_period",
"=",
"self",
".",
"timeperiods",
"[",
"item",
".",
"notification_period",
"]",
"dep_checks",
"=",
"item",
".",
"consume_result",
"(",
"chk",
",",
"notification_period",
",",
"self",
".",
"hosts",
",",
"self",
".",
"services",
",",
"self",
".",
"timeperiods",
",",
"self",
".",
"macromodulations",
",",
"self",
".",
"checkmodulations",
",",
"self",
".",
"businessimpactmodulations",
",",
"self",
".",
"resultmodulations",
",",
"self",
".",
"checks",
",",
"self",
".",
"pushed_conf",
".",
"log_active_checks",
"and",
"not",
"chk",
".",
"passive_check",
")",
"for",
"check",
"in",
"dep_checks",
":",
"self",
".",
"add",
"(",
"check",
")"
] |
Handle results waiting in waiting_results list.
Check ref will call consume result and update their status
:return: None
|
[
"Handle",
"results",
"waiting",
"in",
"waiting_results",
"list",
".",
"Check",
"ref",
"will",
"call",
"consume",
"result",
"and",
"update",
"their",
"status"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L1608-L1675
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.get_new_actions
|
def get_new_actions(self):
"""Call 'get_new_actions' hook point
Iter over all hosts and services to add new actions in internal lists
:return: None
"""
_t0 = time.time()
self.hook_point('get_new_actions')
statsmgr.timer('hook.get-new-actions', time.time() - _t0)
# ask for service and hosts their next check
for elt in self.all_my_hosts_and_services():
for action in elt.actions:
logger.debug("Got a new action for %s: %s", elt, action)
self.add(action)
# We take all, we can clear it
elt.actions = []
|
python
|
def get_new_actions(self):
"""Call 'get_new_actions' hook point
Iter over all hosts and services to add new actions in internal lists
:return: None
"""
_t0 = time.time()
self.hook_point('get_new_actions')
statsmgr.timer('hook.get-new-actions', time.time() - _t0)
# ask for service and hosts their next check
for elt in self.all_my_hosts_and_services():
for action in elt.actions:
logger.debug("Got a new action for %s: %s", elt, action)
self.add(action)
# We take all, we can clear it
elt.actions = []
|
[
"def",
"get_new_actions",
"(",
"self",
")",
":",
"_t0",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"hook_point",
"(",
"'get_new_actions'",
")",
"statsmgr",
".",
"timer",
"(",
"'hook.get-new-actions'",
",",
"time",
".",
"time",
"(",
")",
"-",
"_t0",
")",
"# ask for service and hosts their next check",
"for",
"elt",
"in",
"self",
".",
"all_my_hosts_and_services",
"(",
")",
":",
"for",
"action",
"in",
"elt",
".",
"actions",
":",
"logger",
".",
"debug",
"(",
"\"Got a new action for %s: %s\"",
",",
"elt",
",",
"action",
")",
"self",
".",
"add",
"(",
"action",
")",
"# We take all, we can clear it",
"elt",
".",
"actions",
"=",
"[",
"]"
] |
Call 'get_new_actions' hook point
Iter over all hosts and services to add new actions in internal lists
:return: None
|
[
"Call",
"get_new_actions",
"hook",
"point",
"Iter",
"over",
"all",
"hosts",
"and",
"services",
"to",
"add",
"new",
"actions",
"in",
"internal",
"lists"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L1805-L1820
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.get_new_broks
|
def get_new_broks(self):
"""Iter over all hosts and services to add new broks in internal lists
:return: None
"""
# ask for service and hosts their broks waiting
# be eaten
for elt in self.all_my_hosts_and_services():
for brok in elt.broks:
self.add(brok)
# We got all, clear item broks list
elt.broks = []
# Also fetch broks from contact (like contactdowntime)
for contact in self.contacts:
for brok in contact.broks:
self.add(brok)
# We got all, clear contact broks list
contact.broks = []
|
python
|
def get_new_broks(self):
"""Iter over all hosts and services to add new broks in internal lists
:return: None
"""
# ask for service and hosts their broks waiting
# be eaten
for elt in self.all_my_hosts_and_services():
for brok in elt.broks:
self.add(brok)
# We got all, clear item broks list
elt.broks = []
# Also fetch broks from contact (like contactdowntime)
for contact in self.contacts:
for brok in contact.broks:
self.add(brok)
# We got all, clear contact broks list
contact.broks = []
|
[
"def",
"get_new_broks",
"(",
"self",
")",
":",
"# ask for service and hosts their broks waiting",
"# be eaten",
"for",
"elt",
"in",
"self",
".",
"all_my_hosts_and_services",
"(",
")",
":",
"for",
"brok",
"in",
"elt",
".",
"broks",
":",
"self",
".",
"add",
"(",
"brok",
")",
"# We got all, clear item broks list",
"elt",
".",
"broks",
"=",
"[",
"]",
"# Also fetch broks from contact (like contactdowntime)",
"for",
"contact",
"in",
"self",
".",
"contacts",
":",
"for",
"brok",
"in",
"contact",
".",
"broks",
":",
"self",
".",
"add",
"(",
"brok",
")",
"# We got all, clear contact broks list",
"contact",
".",
"broks",
"=",
"[",
"]"
] |
Iter over all hosts and services to add new broks in internal lists
:return: None
|
[
"Iter",
"over",
"all",
"hosts",
"and",
"services",
"to",
"add",
"new",
"broks",
"in",
"internal",
"lists"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L1822-L1840
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.send_broks_to_modules
|
def send_broks_to_modules(self):
"""Put broks into module queues
Only broks without sent_to_externals to True are sent
Only modules that ask for broks will get some
:return: None
"""
t00 = time.time()
nb_sent = 0
broks = []
for broker_link in list(self.my_daemon.brokers.values()):
for brok in broker_link.broks:
if not getattr(brok, 'sent_to_externals', False):
brok.to_send = True
broks.append(brok)
if not broks:
return
logger.debug("sending %d broks to modules...", len(broks))
for mod in self.my_daemon.modules_manager.get_external_instances():
logger.debug("Look for sending to module %s", mod.get_name())
module_queue = mod.to_q
if module_queue:
to_send = [b for b in broks if mod.want_brok(b)]
module_queue.put(to_send)
nb_sent += len(to_send)
# No more need to send them
for broker_link in list(self.my_daemon.brokers.values()):
for brok in broker_link.broks:
if not getattr(brok, 'sent_to_externals', False):
brok.to_send = False
brok.sent_to_externals = True
logger.debug("Time to send %d broks (after %d secs)", nb_sent, time.time() - t00)
|
python
|
def send_broks_to_modules(self):
"""Put broks into module queues
Only broks without sent_to_externals to True are sent
Only modules that ask for broks will get some
:return: None
"""
t00 = time.time()
nb_sent = 0
broks = []
for broker_link in list(self.my_daemon.brokers.values()):
for brok in broker_link.broks:
if not getattr(brok, 'sent_to_externals', False):
brok.to_send = True
broks.append(brok)
if not broks:
return
logger.debug("sending %d broks to modules...", len(broks))
for mod in self.my_daemon.modules_manager.get_external_instances():
logger.debug("Look for sending to module %s", mod.get_name())
module_queue = mod.to_q
if module_queue:
to_send = [b for b in broks if mod.want_brok(b)]
module_queue.put(to_send)
nb_sent += len(to_send)
# No more need to send them
for broker_link in list(self.my_daemon.brokers.values()):
for brok in broker_link.broks:
if not getattr(brok, 'sent_to_externals', False):
brok.to_send = False
brok.sent_to_externals = True
logger.debug("Time to send %d broks (after %d secs)", nb_sent, time.time() - t00)
|
[
"def",
"send_broks_to_modules",
"(",
"self",
")",
":",
"t00",
"=",
"time",
".",
"time",
"(",
")",
"nb_sent",
"=",
"0",
"broks",
"=",
"[",
"]",
"for",
"broker_link",
"in",
"list",
"(",
"self",
".",
"my_daemon",
".",
"brokers",
".",
"values",
"(",
")",
")",
":",
"for",
"brok",
"in",
"broker_link",
".",
"broks",
":",
"if",
"not",
"getattr",
"(",
"brok",
",",
"'sent_to_externals'",
",",
"False",
")",
":",
"brok",
".",
"to_send",
"=",
"True",
"broks",
".",
"append",
"(",
"brok",
")",
"if",
"not",
"broks",
":",
"return",
"logger",
".",
"debug",
"(",
"\"sending %d broks to modules...\"",
",",
"len",
"(",
"broks",
")",
")",
"for",
"mod",
"in",
"self",
".",
"my_daemon",
".",
"modules_manager",
".",
"get_external_instances",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"\"Look for sending to module %s\"",
",",
"mod",
".",
"get_name",
"(",
")",
")",
"module_queue",
"=",
"mod",
".",
"to_q",
"if",
"module_queue",
":",
"to_send",
"=",
"[",
"b",
"for",
"b",
"in",
"broks",
"if",
"mod",
".",
"want_brok",
"(",
"b",
")",
"]",
"module_queue",
".",
"put",
"(",
"to_send",
")",
"nb_sent",
"+=",
"len",
"(",
"to_send",
")",
"# No more need to send them",
"for",
"broker_link",
"in",
"list",
"(",
"self",
".",
"my_daemon",
".",
"brokers",
".",
"values",
"(",
")",
")",
":",
"for",
"brok",
"in",
"broker_link",
".",
"broks",
":",
"if",
"not",
"getattr",
"(",
"brok",
",",
"'sent_to_externals'",
",",
"False",
")",
":",
"brok",
".",
"to_send",
"=",
"False",
"brok",
".",
"sent_to_externals",
"=",
"True",
"logger",
".",
"debug",
"(",
"\"Time to send %d broks (after %d secs)\"",
",",
"nb_sent",
",",
"time",
".",
"time",
"(",
")",
"-",
"t00",
")"
] |
Put broks into module queues
Only broks without sent_to_externals to True are sent
Only modules that ask for broks will get some
:return: None
|
[
"Put",
"broks",
"into",
"module",
"queues",
"Only",
"broks",
"without",
"sent_to_externals",
"to",
"True",
"are",
"sent",
"Only",
"modules",
"that",
"ask",
"for",
"broks",
"will",
"get",
"some"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L1978-L2011
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.find_item_by_id
|
def find_item_by_id(self, object_id):
"""Get item based on its id or uuid
:param object_id:
:type object_id: int | str
:return:
:rtype: alignak.objects.item.Item | None
"""
# Item id may be an item
if isinstance(object_id, Item):
return object_id
# Item id should be a uuid string
if not isinstance(object_id, string_types):
logger.debug("Find an item by id, object_id is not int nor string: %s", object_id)
return object_id
for items in [self.hosts, self.services, self.actions, self.checks, self.hostgroups,
self.servicegroups, self.contacts, self.contactgroups]:
if object_id in items:
return items[object_id]
# raise AttributeError("Item with id %s not found" % object_id) # pragma: no cover,
logger.error("Item with id %s not found", str(object_id)) # pragma: no cover,
return None
|
python
|
def find_item_by_id(self, object_id):
"""Get item based on its id or uuid
:param object_id:
:type object_id: int | str
:return:
:rtype: alignak.objects.item.Item | None
"""
# Item id may be an item
if isinstance(object_id, Item):
return object_id
# Item id should be a uuid string
if not isinstance(object_id, string_types):
logger.debug("Find an item by id, object_id is not int nor string: %s", object_id)
return object_id
for items in [self.hosts, self.services, self.actions, self.checks, self.hostgroups,
self.servicegroups, self.contacts, self.contactgroups]:
if object_id in items:
return items[object_id]
# raise AttributeError("Item with id %s not found" % object_id) # pragma: no cover,
logger.error("Item with id %s not found", str(object_id)) # pragma: no cover,
return None
|
[
"def",
"find_item_by_id",
"(",
"self",
",",
"object_id",
")",
":",
"# Item id may be an item",
"if",
"isinstance",
"(",
"object_id",
",",
"Item",
")",
":",
"return",
"object_id",
"# Item id should be a uuid string",
"if",
"not",
"isinstance",
"(",
"object_id",
",",
"string_types",
")",
":",
"logger",
".",
"debug",
"(",
"\"Find an item by id, object_id is not int nor string: %s\"",
",",
"object_id",
")",
"return",
"object_id",
"for",
"items",
"in",
"[",
"self",
".",
"hosts",
",",
"self",
".",
"services",
",",
"self",
".",
"actions",
",",
"self",
".",
"checks",
",",
"self",
".",
"hostgroups",
",",
"self",
".",
"servicegroups",
",",
"self",
".",
"contacts",
",",
"self",
".",
"contactgroups",
"]",
":",
"if",
"object_id",
"in",
"items",
":",
"return",
"items",
"[",
"object_id",
"]",
"# raise AttributeError(\"Item with id %s not found\" % object_id) # pragma: no cover,",
"logger",
".",
"error",
"(",
"\"Item with id %s not found\"",
",",
"str",
"(",
"object_id",
")",
")",
"# pragma: no cover,",
"return",
"None"
] |
Get item based on its id or uuid
:param object_id:
:type object_id: int | str
:return:
:rtype: alignak.objects.item.Item | None
|
[
"Get",
"item",
"based",
"on",
"its",
"id",
"or",
"uuid"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L2203-L2227
|
train
|
Alignak-monitoring/alignak
|
alignak/scheduler.py
|
Scheduler.before_run
|
def before_run(self):
"""Initialize the scheduling process"""
# Actions and checks counters
self.nb_checks = 0
self.nb_internal_checks = 0
self.nb_checks_launched = 0
self.nb_actions_launched = 0
self.nb_checks_results = 0
self.nb_checks_results_timeout = 0
self.nb_checks_results_passive = 0
self.nb_checks_results_active = 0
self.nb_actions_results = 0
self.nb_actions_results_timeout = 0
self.nb_actions_results_passive = 0
self.nb_broks_dropped = 0
self.nb_checks_dropped = 0
self.nb_actions_dropped = 0
# Broks, notifications, ... counters
self.nb_broks = 0
self.nb_notifications = 0
self.nb_event_handlers = 0
self.nb_external_commands = 0
self.ticks = 0
|
python
|
def before_run(self):
"""Initialize the scheduling process"""
# Actions and checks counters
self.nb_checks = 0
self.nb_internal_checks = 0
self.nb_checks_launched = 0
self.nb_actions_launched = 0
self.nb_checks_results = 0
self.nb_checks_results_timeout = 0
self.nb_checks_results_passive = 0
self.nb_checks_results_active = 0
self.nb_actions_results = 0
self.nb_actions_results_timeout = 0
self.nb_actions_results_passive = 0
self.nb_broks_dropped = 0
self.nb_checks_dropped = 0
self.nb_actions_dropped = 0
# Broks, notifications, ... counters
self.nb_broks = 0
self.nb_notifications = 0
self.nb_event_handlers = 0
self.nb_external_commands = 0
self.ticks = 0
|
[
"def",
"before_run",
"(",
"self",
")",
":",
"# Actions and checks counters",
"self",
".",
"nb_checks",
"=",
"0",
"self",
".",
"nb_internal_checks",
"=",
"0",
"self",
".",
"nb_checks_launched",
"=",
"0",
"self",
".",
"nb_actions_launched",
"=",
"0",
"self",
".",
"nb_checks_results",
"=",
"0",
"self",
".",
"nb_checks_results_timeout",
"=",
"0",
"self",
".",
"nb_checks_results_passive",
"=",
"0",
"self",
".",
"nb_checks_results_active",
"=",
"0",
"self",
".",
"nb_actions_results",
"=",
"0",
"self",
".",
"nb_actions_results_timeout",
"=",
"0",
"self",
".",
"nb_actions_results_passive",
"=",
"0",
"self",
".",
"nb_broks_dropped",
"=",
"0",
"self",
".",
"nb_checks_dropped",
"=",
"0",
"self",
".",
"nb_actions_dropped",
"=",
"0",
"# Broks, notifications, ... counters",
"self",
".",
"nb_broks",
"=",
"0",
"self",
".",
"nb_notifications",
"=",
"0",
"self",
".",
"nb_event_handlers",
"=",
"0",
"self",
".",
"nb_external_commands",
"=",
"0",
"self",
".",
"ticks",
"=",
"0"
] |
Initialize the scheduling process
|
[
"Initialize",
"the",
"scheduling",
"process"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L2230-L2257
|
train
|
Alignak-monitoring/alignak
|
alignak/daemons/receiverdaemon.py
|
Receiver.setup_new_conf
|
def setup_new_conf(self):
"""Receiver custom setup_new_conf method
This function calls the base satellite treatment and manages the configuration needed
for a receiver daemon:
- get and configure its satellites
- configure the modules
:return: None
"""
# Execute the base class treatment...
super(Receiver, self).setup_new_conf()
# ...then our own specific treatment!
with self.conf_lock:
# self_conf is our own configuration from the alignak environment
# self_conf = self.cur_conf['self_conf']
logger.debug("Got config: %s", self.cur_conf)
# Configure and start our modules
if not self.have_modules:
try:
self.modules = unserialize(self.cur_conf['modules'], no_load=True)
except AlignakClassLookupException as exp: # pragma: no cover, simple protection
logger.error('Cannot un-serialize modules configuration '
'received from arbiter: %s', exp)
if self.modules:
logger.info("I received some modules configuration: %s", self.modules)
self.have_modules = True
self.do_load_modules(self.modules)
# and start external modules too
self.modules_manager.start_external_instances()
else:
logger.info("I do not have modules")
# Now create the external commands manager
# We are a receiver: our role is to get and dispatch commands to the schedulers
global_conf = self.cur_conf.get('global_conf', None)
if not global_conf:
logger.error("Received a configuration without any global_conf! "
"This may hide a configuration problem with the "
"realms and the manage_sub_realms of the satellites!")
global_conf = {
'accept_passive_unknown_check_results': False,
'log_external_commands': True
}
self.external_commands_manager = \
ExternalCommandManager(None, 'receiver', self,
global_conf.get(
'accept_passive_unknown_check_results', False),
global_conf.get(
'log_external_commands', False))
# Initialize connection with all our satellites
logger.info("Initializing connection with my satellites:")
my_satellites = self.get_links_of_type(s_type='')
for satellite in list(my_satellites.values()):
logger.info("- : %s/%s", satellite.type, satellite.name)
if not self.daemon_connection_init(satellite):
logger.error("Satellite connection failed: %s", satellite)
# Now I have a configuration!
self.have_conf = True
|
python
|
def setup_new_conf(self):
"""Receiver custom setup_new_conf method
This function calls the base satellite treatment and manages the configuration needed
for a receiver daemon:
- get and configure its satellites
- configure the modules
:return: None
"""
# Execute the base class treatment...
super(Receiver, self).setup_new_conf()
# ...then our own specific treatment!
with self.conf_lock:
# self_conf is our own configuration from the alignak environment
# self_conf = self.cur_conf['self_conf']
logger.debug("Got config: %s", self.cur_conf)
# Configure and start our modules
if not self.have_modules:
try:
self.modules = unserialize(self.cur_conf['modules'], no_load=True)
except AlignakClassLookupException as exp: # pragma: no cover, simple protection
logger.error('Cannot un-serialize modules configuration '
'received from arbiter: %s', exp)
if self.modules:
logger.info("I received some modules configuration: %s", self.modules)
self.have_modules = True
self.do_load_modules(self.modules)
# and start external modules too
self.modules_manager.start_external_instances()
else:
logger.info("I do not have modules")
# Now create the external commands manager
# We are a receiver: our role is to get and dispatch commands to the schedulers
global_conf = self.cur_conf.get('global_conf', None)
if not global_conf:
logger.error("Received a configuration without any global_conf! "
"This may hide a configuration problem with the "
"realms and the manage_sub_realms of the satellites!")
global_conf = {
'accept_passive_unknown_check_results': False,
'log_external_commands': True
}
self.external_commands_manager = \
ExternalCommandManager(None, 'receiver', self,
global_conf.get(
'accept_passive_unknown_check_results', False),
global_conf.get(
'log_external_commands', False))
# Initialize connection with all our satellites
logger.info("Initializing connection with my satellites:")
my_satellites = self.get_links_of_type(s_type='')
for satellite in list(my_satellites.values()):
logger.info("- : %s/%s", satellite.type, satellite.name)
if not self.daemon_connection_init(satellite):
logger.error("Satellite connection failed: %s", satellite)
# Now I have a configuration!
self.have_conf = True
|
[
"def",
"setup_new_conf",
"(",
"self",
")",
":",
"# Execute the base class treatment...",
"super",
"(",
"Receiver",
",",
"self",
")",
".",
"setup_new_conf",
"(",
")",
"# ...then our own specific treatment!",
"with",
"self",
".",
"conf_lock",
":",
"# self_conf is our own configuration from the alignak environment",
"# self_conf = self.cur_conf['self_conf']",
"logger",
".",
"debug",
"(",
"\"Got config: %s\"",
",",
"self",
".",
"cur_conf",
")",
"# Configure and start our modules",
"if",
"not",
"self",
".",
"have_modules",
":",
"try",
":",
"self",
".",
"modules",
"=",
"unserialize",
"(",
"self",
".",
"cur_conf",
"[",
"'modules'",
"]",
",",
"no_load",
"=",
"True",
")",
"except",
"AlignakClassLookupException",
"as",
"exp",
":",
"# pragma: no cover, simple protection",
"logger",
".",
"error",
"(",
"'Cannot un-serialize modules configuration '",
"'received from arbiter: %s'",
",",
"exp",
")",
"if",
"self",
".",
"modules",
":",
"logger",
".",
"info",
"(",
"\"I received some modules configuration: %s\"",
",",
"self",
".",
"modules",
")",
"self",
".",
"have_modules",
"=",
"True",
"self",
".",
"do_load_modules",
"(",
"self",
".",
"modules",
")",
"# and start external modules too",
"self",
".",
"modules_manager",
".",
"start_external_instances",
"(",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"I do not have modules\"",
")",
"# Now create the external commands manager",
"# We are a receiver: our role is to get and dispatch commands to the schedulers",
"global_conf",
"=",
"self",
".",
"cur_conf",
".",
"get",
"(",
"'global_conf'",
",",
"None",
")",
"if",
"not",
"global_conf",
":",
"logger",
".",
"error",
"(",
"\"Received a configuration without any global_conf! \"",
"\"This may hide a configuration problem with the \"",
"\"realms and the manage_sub_realms of the satellites!\"",
")",
"global_conf",
"=",
"{",
"'accept_passive_unknown_check_results'",
":",
"False",
",",
"'log_external_commands'",
":",
"True",
"}",
"self",
".",
"external_commands_manager",
"=",
"ExternalCommandManager",
"(",
"None",
",",
"'receiver'",
",",
"self",
",",
"global_conf",
".",
"get",
"(",
"'accept_passive_unknown_check_results'",
",",
"False",
")",
",",
"global_conf",
".",
"get",
"(",
"'log_external_commands'",
",",
"False",
")",
")",
"# Initialize connection with all our satellites",
"logger",
".",
"info",
"(",
"\"Initializing connection with my satellites:\"",
")",
"my_satellites",
"=",
"self",
".",
"get_links_of_type",
"(",
"s_type",
"=",
"''",
")",
"for",
"satellite",
"in",
"list",
"(",
"my_satellites",
".",
"values",
"(",
")",
")",
":",
"logger",
".",
"info",
"(",
"\"- : %s/%s\"",
",",
"satellite",
".",
"type",
",",
"satellite",
".",
"name",
")",
"if",
"not",
"self",
".",
"daemon_connection_init",
"(",
"satellite",
")",
":",
"logger",
".",
"error",
"(",
"\"Satellite connection failed: %s\"",
",",
"satellite",
")",
"# Now I have a configuration!",
"self",
".",
"have_conf",
"=",
"True"
] |
Receiver custom setup_new_conf method
This function calls the base satellite treatment and manages the configuration needed
for a receiver daemon:
- get and configure its satellites
- configure the modules
:return: None
|
[
"Receiver",
"custom",
"setup_new_conf",
"method"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/receiverdaemon.py#L147-L210
|
train
|
Alignak-monitoring/alignak
|
alignak/daemons/receiverdaemon.py
|
Receiver.get_external_commands_from_arbiters
|
def get_external_commands_from_arbiters(self):
"""Get external commands from our arbiters
As of now, only the arbiter are requested to provide their external commands that
the receiver will push to all the known schedulers to make them being executed.
:return: None
"""
for arbiter_link_uuid in self.arbiters:
link = self.arbiters[arbiter_link_uuid]
if not link.active:
logger.debug("The arbiter '%s' is not active, it is not possible to get "
"its external commands!", link.name)
continue
try:
logger.debug("Getting external commands from: %s", link.name)
external_commands = link.get_external_commands()
if external_commands:
logger.debug("Got %d commands from: %s", len(external_commands), link.name)
else:
# Simple protection against None value
external_commands = []
for external_command in external_commands:
self.add(external_command)
except LinkError:
logger.warning("Arbiter connection failed, I could not get external commands!")
except Exception as exp: # pylint: disable=broad-except
logger.error("Arbiter connection failed, I could not get external commands!")
logger.exception("Exception: %s", exp)
|
python
|
def get_external_commands_from_arbiters(self):
"""Get external commands from our arbiters
As of now, only the arbiter are requested to provide their external commands that
the receiver will push to all the known schedulers to make them being executed.
:return: None
"""
for arbiter_link_uuid in self.arbiters:
link = self.arbiters[arbiter_link_uuid]
if not link.active:
logger.debug("The arbiter '%s' is not active, it is not possible to get "
"its external commands!", link.name)
continue
try:
logger.debug("Getting external commands from: %s", link.name)
external_commands = link.get_external_commands()
if external_commands:
logger.debug("Got %d commands from: %s", len(external_commands), link.name)
else:
# Simple protection against None value
external_commands = []
for external_command in external_commands:
self.add(external_command)
except LinkError:
logger.warning("Arbiter connection failed, I could not get external commands!")
except Exception as exp: # pylint: disable=broad-except
logger.error("Arbiter connection failed, I could not get external commands!")
logger.exception("Exception: %s", exp)
|
[
"def",
"get_external_commands_from_arbiters",
"(",
"self",
")",
":",
"for",
"arbiter_link_uuid",
"in",
"self",
".",
"arbiters",
":",
"link",
"=",
"self",
".",
"arbiters",
"[",
"arbiter_link_uuid",
"]",
"if",
"not",
"link",
".",
"active",
":",
"logger",
".",
"debug",
"(",
"\"The arbiter '%s' is not active, it is not possible to get \"",
"\"its external commands!\"",
",",
"link",
".",
"name",
")",
"continue",
"try",
":",
"logger",
".",
"debug",
"(",
"\"Getting external commands from: %s\"",
",",
"link",
".",
"name",
")",
"external_commands",
"=",
"link",
".",
"get_external_commands",
"(",
")",
"if",
"external_commands",
":",
"logger",
".",
"debug",
"(",
"\"Got %d commands from: %s\"",
",",
"len",
"(",
"external_commands",
")",
",",
"link",
".",
"name",
")",
"else",
":",
"# Simple protection against None value",
"external_commands",
"=",
"[",
"]",
"for",
"external_command",
"in",
"external_commands",
":",
"self",
".",
"add",
"(",
"external_command",
")",
"except",
"LinkError",
":",
"logger",
".",
"warning",
"(",
"\"Arbiter connection failed, I could not get external commands!\"",
")",
"except",
"Exception",
"as",
"exp",
":",
"# pylint: disable=broad-except",
"logger",
".",
"error",
"(",
"\"Arbiter connection failed, I could not get external commands!\"",
")",
"logger",
".",
"exception",
"(",
"\"Exception: %s\"",
",",
"exp",
")"
] |
Get external commands from our arbiters
As of now, only the arbiter are requested to provide their external commands that
the receiver will push to all the known schedulers to make them being executed.
:return: None
|
[
"Get",
"external",
"commands",
"from",
"our",
"arbiters"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/receiverdaemon.py#L212-L242
|
train
|
Alignak-monitoring/alignak
|
alignak/daemons/receiverdaemon.py
|
Receiver.push_external_commands_to_schedulers
|
def push_external_commands_to_schedulers(self):
"""Push received external commands to the schedulers
:return: None
"""
if not self.unprocessed_external_commands:
return
# Those are the global external commands
commands_to_process = self.unprocessed_external_commands
self.unprocessed_external_commands = []
logger.debug("Commands: %s", commands_to_process)
# Now get all external commands and put them into the good schedulers
logger.debug("Commands to process: %d commands", len(commands_to_process))
for ext_cmd in commands_to_process:
cmd = self.external_commands_manager.resolve_command(ext_cmd)
logger.debug("Resolved command: %s, result: %s", ext_cmd.cmd_line, cmd)
if cmd and cmd['global']:
# Send global command to all our schedulers
for scheduler_link_uuid in self.schedulers:
self.schedulers[scheduler_link_uuid].pushed_commands.append(ext_cmd)
# Now for all active schedulers, send the commands
count_pushed_commands = 0
count_failed_commands = 0
for scheduler_link_uuid in self.schedulers:
link = self.schedulers[scheduler_link_uuid]
if not link.active:
logger.debug("The scheduler '%s' is not active, it is not possible to push "
"external commands to its connection!", link.name)
continue
# If there are some commands for this scheduler...
commands = [ext_cmd.cmd_line for ext_cmd in link.pushed_commands]
if not commands:
logger.debug("The scheduler '%s' has no commands.", link.name)
continue
logger.debug("Sending %d commands to scheduler %s", len(commands), link.name)
sent = []
try:
sent = link.push_external_commands(commands)
except LinkError:
logger.warning("Scheduler connection failed, I could not push external commands!")
# Whether we sent the commands or not, clean the scheduler list
link.pushed_commands = []
# If we didn't sent them, add the commands to the arbiter list
if sent:
statsmgr.gauge('external-commands.pushed.%s' % link.name, len(commands))
count_pushed_commands = count_pushed_commands + len(commands)
else:
count_failed_commands = count_failed_commands + len(commands)
statsmgr.gauge('external-commands.failed.%s' % link.name, len(commands))
# Kepp the not sent commands... for a next try
self.external_commands.extend(commands)
statsmgr.gauge('external-commands.pushed.all', count_pushed_commands)
statsmgr.gauge('external-commands.failed.all', count_failed_commands)
|
python
|
def push_external_commands_to_schedulers(self):
"""Push received external commands to the schedulers
:return: None
"""
if not self.unprocessed_external_commands:
return
# Those are the global external commands
commands_to_process = self.unprocessed_external_commands
self.unprocessed_external_commands = []
logger.debug("Commands: %s", commands_to_process)
# Now get all external commands and put them into the good schedulers
logger.debug("Commands to process: %d commands", len(commands_to_process))
for ext_cmd in commands_to_process:
cmd = self.external_commands_manager.resolve_command(ext_cmd)
logger.debug("Resolved command: %s, result: %s", ext_cmd.cmd_line, cmd)
if cmd and cmd['global']:
# Send global command to all our schedulers
for scheduler_link_uuid in self.schedulers:
self.schedulers[scheduler_link_uuid].pushed_commands.append(ext_cmd)
# Now for all active schedulers, send the commands
count_pushed_commands = 0
count_failed_commands = 0
for scheduler_link_uuid in self.schedulers:
link = self.schedulers[scheduler_link_uuid]
if not link.active:
logger.debug("The scheduler '%s' is not active, it is not possible to push "
"external commands to its connection!", link.name)
continue
# If there are some commands for this scheduler...
commands = [ext_cmd.cmd_line for ext_cmd in link.pushed_commands]
if not commands:
logger.debug("The scheduler '%s' has no commands.", link.name)
continue
logger.debug("Sending %d commands to scheduler %s", len(commands), link.name)
sent = []
try:
sent = link.push_external_commands(commands)
except LinkError:
logger.warning("Scheduler connection failed, I could not push external commands!")
# Whether we sent the commands or not, clean the scheduler list
link.pushed_commands = []
# If we didn't sent them, add the commands to the arbiter list
if sent:
statsmgr.gauge('external-commands.pushed.%s' % link.name, len(commands))
count_pushed_commands = count_pushed_commands + len(commands)
else:
count_failed_commands = count_failed_commands + len(commands)
statsmgr.gauge('external-commands.failed.%s' % link.name, len(commands))
# Kepp the not sent commands... for a next try
self.external_commands.extend(commands)
statsmgr.gauge('external-commands.pushed.all', count_pushed_commands)
statsmgr.gauge('external-commands.failed.all', count_failed_commands)
|
[
"def",
"push_external_commands_to_schedulers",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"unprocessed_external_commands",
":",
"return",
"# Those are the global external commands",
"commands_to_process",
"=",
"self",
".",
"unprocessed_external_commands",
"self",
".",
"unprocessed_external_commands",
"=",
"[",
"]",
"logger",
".",
"debug",
"(",
"\"Commands: %s\"",
",",
"commands_to_process",
")",
"# Now get all external commands and put them into the good schedulers",
"logger",
".",
"debug",
"(",
"\"Commands to process: %d commands\"",
",",
"len",
"(",
"commands_to_process",
")",
")",
"for",
"ext_cmd",
"in",
"commands_to_process",
":",
"cmd",
"=",
"self",
".",
"external_commands_manager",
".",
"resolve_command",
"(",
"ext_cmd",
")",
"logger",
".",
"debug",
"(",
"\"Resolved command: %s, result: %s\"",
",",
"ext_cmd",
".",
"cmd_line",
",",
"cmd",
")",
"if",
"cmd",
"and",
"cmd",
"[",
"'global'",
"]",
":",
"# Send global command to all our schedulers",
"for",
"scheduler_link_uuid",
"in",
"self",
".",
"schedulers",
":",
"self",
".",
"schedulers",
"[",
"scheduler_link_uuid",
"]",
".",
"pushed_commands",
".",
"append",
"(",
"ext_cmd",
")",
"# Now for all active schedulers, send the commands",
"count_pushed_commands",
"=",
"0",
"count_failed_commands",
"=",
"0",
"for",
"scheduler_link_uuid",
"in",
"self",
".",
"schedulers",
":",
"link",
"=",
"self",
".",
"schedulers",
"[",
"scheduler_link_uuid",
"]",
"if",
"not",
"link",
".",
"active",
":",
"logger",
".",
"debug",
"(",
"\"The scheduler '%s' is not active, it is not possible to push \"",
"\"external commands to its connection!\"",
",",
"link",
".",
"name",
")",
"continue",
"# If there are some commands for this scheduler...",
"commands",
"=",
"[",
"ext_cmd",
".",
"cmd_line",
"for",
"ext_cmd",
"in",
"link",
".",
"pushed_commands",
"]",
"if",
"not",
"commands",
":",
"logger",
".",
"debug",
"(",
"\"The scheduler '%s' has no commands.\"",
",",
"link",
".",
"name",
")",
"continue",
"logger",
".",
"debug",
"(",
"\"Sending %d commands to scheduler %s\"",
",",
"len",
"(",
"commands",
")",
",",
"link",
".",
"name",
")",
"sent",
"=",
"[",
"]",
"try",
":",
"sent",
"=",
"link",
".",
"push_external_commands",
"(",
"commands",
")",
"except",
"LinkError",
":",
"logger",
".",
"warning",
"(",
"\"Scheduler connection failed, I could not push external commands!\"",
")",
"# Whether we sent the commands or not, clean the scheduler list",
"link",
".",
"pushed_commands",
"=",
"[",
"]",
"# If we didn't sent them, add the commands to the arbiter list",
"if",
"sent",
":",
"statsmgr",
".",
"gauge",
"(",
"'external-commands.pushed.%s'",
"%",
"link",
".",
"name",
",",
"len",
"(",
"commands",
")",
")",
"count_pushed_commands",
"=",
"count_pushed_commands",
"+",
"len",
"(",
"commands",
")",
"else",
":",
"count_failed_commands",
"=",
"count_failed_commands",
"+",
"len",
"(",
"commands",
")",
"statsmgr",
".",
"gauge",
"(",
"'external-commands.failed.%s'",
"%",
"link",
".",
"name",
",",
"len",
"(",
"commands",
")",
")",
"# Kepp the not sent commands... for a next try",
"self",
".",
"external_commands",
".",
"extend",
"(",
"commands",
")",
"statsmgr",
".",
"gauge",
"(",
"'external-commands.pushed.all'",
",",
"count_pushed_commands",
")",
"statsmgr",
".",
"gauge",
"(",
"'external-commands.failed.all'",
",",
"count_failed_commands",
")"
] |
Push received external commands to the schedulers
:return: None
|
[
"Push",
"received",
"external",
"commands",
"to",
"the",
"schedulers"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/receiverdaemon.py#L244-L305
|
train
|
Alignak-monitoring/alignak
|
alignak/daemons/receiverdaemon.py
|
Receiver.do_loop_turn
|
def do_loop_turn(self):
"""Receiver daemon main loop
:return: None
"""
# Begin to clean modules
self.check_and_del_zombie_modules()
# Maybe the arbiter pushed a new configuration...
if self.watch_for_new_conf(timeout=0.05):
logger.info("I got a new configuration...")
# Manage the new configuration
self.setup_new_conf()
# Maybe external modules raised 'objects'
# we should get them
_t0 = time.time()
self.get_objects_from_from_queues()
statsmgr.timer('core.get-objects-from-queues', time.time() - _t0)
# Get external commands from the arbiters...
_t0 = time.time()
self.get_external_commands_from_arbiters()
statsmgr.timer('external-commands.got.time', time.time() - _t0)
statsmgr.gauge('external-commands.got.count', len(self.unprocessed_external_commands))
_t0 = time.time()
self.push_external_commands_to_schedulers()
statsmgr.timer('external-commands.pushed.time', time.time() - _t0)
# Say to modules it's a new tick :)
_t0 = time.time()
self.hook_point('tick')
statsmgr.timer('hook.tick', time.time() - _t0)
|
python
|
def do_loop_turn(self):
"""Receiver daemon main loop
:return: None
"""
# Begin to clean modules
self.check_and_del_zombie_modules()
# Maybe the arbiter pushed a new configuration...
if self.watch_for_new_conf(timeout=0.05):
logger.info("I got a new configuration...")
# Manage the new configuration
self.setup_new_conf()
# Maybe external modules raised 'objects'
# we should get them
_t0 = time.time()
self.get_objects_from_from_queues()
statsmgr.timer('core.get-objects-from-queues', time.time() - _t0)
# Get external commands from the arbiters...
_t0 = time.time()
self.get_external_commands_from_arbiters()
statsmgr.timer('external-commands.got.time', time.time() - _t0)
statsmgr.gauge('external-commands.got.count', len(self.unprocessed_external_commands))
_t0 = time.time()
self.push_external_commands_to_schedulers()
statsmgr.timer('external-commands.pushed.time', time.time() - _t0)
# Say to modules it's a new tick :)
_t0 = time.time()
self.hook_point('tick')
statsmgr.timer('hook.tick', time.time() - _t0)
|
[
"def",
"do_loop_turn",
"(",
"self",
")",
":",
"# Begin to clean modules",
"self",
".",
"check_and_del_zombie_modules",
"(",
")",
"# Maybe the arbiter pushed a new configuration...",
"if",
"self",
".",
"watch_for_new_conf",
"(",
"timeout",
"=",
"0.05",
")",
":",
"logger",
".",
"info",
"(",
"\"I got a new configuration...\"",
")",
"# Manage the new configuration",
"self",
".",
"setup_new_conf",
"(",
")",
"# Maybe external modules raised 'objects'",
"# we should get them",
"_t0",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"get_objects_from_from_queues",
"(",
")",
"statsmgr",
".",
"timer",
"(",
"'core.get-objects-from-queues'",
",",
"time",
".",
"time",
"(",
")",
"-",
"_t0",
")",
"# Get external commands from the arbiters...",
"_t0",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"get_external_commands_from_arbiters",
"(",
")",
"statsmgr",
".",
"timer",
"(",
"'external-commands.got.time'",
",",
"time",
".",
"time",
"(",
")",
"-",
"_t0",
")",
"statsmgr",
".",
"gauge",
"(",
"'external-commands.got.count'",
",",
"len",
"(",
"self",
".",
"unprocessed_external_commands",
")",
")",
"_t0",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"push_external_commands_to_schedulers",
"(",
")",
"statsmgr",
".",
"timer",
"(",
"'external-commands.pushed.time'",
",",
"time",
".",
"time",
"(",
")",
"-",
"_t0",
")",
"# Say to modules it's a new tick :)",
"_t0",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"hook_point",
"(",
"'tick'",
")",
"statsmgr",
".",
"timer",
"(",
"'hook.tick'",
",",
"time",
".",
"time",
"(",
")",
"-",
"_t0",
")"
] |
Receiver daemon main loop
:return: None
|
[
"Receiver",
"daemon",
"main",
"loop"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/receiverdaemon.py#L307-L341
|
train
|
Alignak-monitoring/alignak
|
alignak/check.py
|
Check.serialize
|
def serialize(self):
"""This function serializes into a simple dict object.
The only usage is to send to poller, and it does not need to have the
depend_on and depend_on_me properties.
:return: json representation of a Check
:rtype: dict
"""
res = super(Check, self).serialize()
if 'depend_on' in res:
del res['depend_on']
if 'depend_on_me' in res:
del res['depend_on_me']
return res
|
python
|
def serialize(self):
"""This function serializes into a simple dict object.
The only usage is to send to poller, and it does not need to have the
depend_on and depend_on_me properties.
:return: json representation of a Check
:rtype: dict
"""
res = super(Check, self).serialize()
if 'depend_on' in res:
del res['depend_on']
if 'depend_on_me' in res:
del res['depend_on_me']
return res
|
[
"def",
"serialize",
"(",
"self",
")",
":",
"res",
"=",
"super",
"(",
"Check",
",",
"self",
")",
".",
"serialize",
"(",
")",
"if",
"'depend_on'",
"in",
"res",
":",
"del",
"res",
"[",
"'depend_on'",
"]",
"if",
"'depend_on_me'",
"in",
"res",
":",
"del",
"res",
"[",
"'depend_on_me'",
"]",
"return",
"res"
] |
This function serializes into a simple dict object.
The only usage is to send to poller, and it does not need to have the
depend_on and depend_on_me properties.
:return: json representation of a Check
:rtype: dict
|
[
"This",
"function",
"serializes",
"into",
"a",
"simple",
"dict",
"object",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/check.py#L136-L150
|
train
|
Alignak-monitoring/alignak
|
alignak/alignakobject.py
|
AlignakObject.serialize
|
def serialize(self):
"""This function serializes into a simple dictionary object.
It is used when transferring data to other daemons over the network (http)
Here is the generic function that simply export attributes declared in the
properties dictionary of the object.
Note that a SetProp property will be serialized as a list.
:return: Dictionary containing key and value from properties
:rtype: dict
"""
# uuid is not in *_properties
res = {
'uuid': self.uuid
}
for prop in self.__class__.properties:
if not hasattr(self, prop):
continue
res[prop] = getattr(self, prop)
if isinstance(self.__class__.properties[prop], SetProp):
res[prop] = list(getattr(self, prop))
return res
|
python
|
def serialize(self):
"""This function serializes into a simple dictionary object.
It is used when transferring data to other daemons over the network (http)
Here is the generic function that simply export attributes declared in the
properties dictionary of the object.
Note that a SetProp property will be serialized as a list.
:return: Dictionary containing key and value from properties
:rtype: dict
"""
# uuid is not in *_properties
res = {
'uuid': self.uuid
}
for prop in self.__class__.properties:
if not hasattr(self, prop):
continue
res[prop] = getattr(self, prop)
if isinstance(self.__class__.properties[prop], SetProp):
res[prop] = list(getattr(self, prop))
return res
|
[
"def",
"serialize",
"(",
"self",
")",
":",
"# uuid is not in *_properties",
"res",
"=",
"{",
"'uuid'",
":",
"self",
".",
"uuid",
"}",
"for",
"prop",
"in",
"self",
".",
"__class__",
".",
"properties",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"prop",
")",
":",
"continue",
"res",
"[",
"prop",
"]",
"=",
"getattr",
"(",
"self",
",",
"prop",
")",
"if",
"isinstance",
"(",
"self",
".",
"__class__",
".",
"properties",
"[",
"prop",
"]",
",",
"SetProp",
")",
":",
"res",
"[",
"prop",
"]",
"=",
"list",
"(",
"getattr",
"(",
"self",
",",
"prop",
")",
")",
"return",
"res"
] |
This function serializes into a simple dictionary object.
It is used when transferring data to other daemons over the network (http)
Here is the generic function that simply export attributes declared in the
properties dictionary of the object.
Note that a SetProp property will be serialized as a list.
:return: Dictionary containing key and value from properties
:rtype: dict
|
[
"This",
"function",
"serializes",
"into",
"a",
"simple",
"dictionary",
"object",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/alignakobject.py#L92-L117
|
train
|
Alignak-monitoring/alignak
|
alignak/alignakobject.py
|
AlignakObject.fill_default
|
def fill_default(self):
"""
Define the object properties with a default value when the property is not yet defined
:return: None
"""
for prop, entry in self.__class__.properties.items():
if hasattr(self, prop):
continue
if not hasattr(entry, 'default') or entry.default is NONE_OBJECT:
continue
if hasattr(entry.default, '__iter__'):
setattr(self, prop, copy(entry.default))
else:
setattr(self, prop, entry.default)
|
python
|
def fill_default(self):
"""
Define the object properties with a default value when the property is not yet defined
:return: None
"""
for prop, entry in self.__class__.properties.items():
if hasattr(self, prop):
continue
if not hasattr(entry, 'default') or entry.default is NONE_OBJECT:
continue
if hasattr(entry.default, '__iter__'):
setattr(self, prop, copy(entry.default))
else:
setattr(self, prop, entry.default)
|
[
"def",
"fill_default",
"(",
"self",
")",
":",
"for",
"prop",
",",
"entry",
"in",
"self",
".",
"__class__",
".",
"properties",
".",
"items",
"(",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"prop",
")",
":",
"continue",
"if",
"not",
"hasattr",
"(",
"entry",
",",
"'default'",
")",
"or",
"entry",
".",
"default",
"is",
"NONE_OBJECT",
":",
"continue",
"if",
"hasattr",
"(",
"entry",
".",
"default",
",",
"'__iter__'",
")",
":",
"setattr",
"(",
"self",
",",
"prop",
",",
"copy",
"(",
"entry",
".",
"default",
")",
")",
"else",
":",
"setattr",
"(",
"self",
",",
"prop",
",",
"entry",
".",
"default",
")"
] |
Define the object properties with a default value when the property is not yet defined
:return: None
|
[
"Define",
"the",
"object",
"properties",
"with",
"a",
"default",
"value",
"when",
"the",
"property",
"is",
"not",
"yet",
"defined"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/alignakobject.py#L119-L134
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/host.py
|
Host.fill_predictive_missing_parameters
|
def fill_predictive_missing_parameters(self):
"""Fill address with host_name if not already set
and define state with initial_state
:return: None
"""
if hasattr(self, 'host_name') and not hasattr(self, 'address'):
self.address = self.host_name
if hasattr(self, 'host_name') and not hasattr(self, 'alias'):
self.alias = self.host_name
if self.initial_state == 'd':
self.state = 'DOWN'
elif self.initial_state == 'x':
self.state = 'UNREACHABLE'
|
python
|
def fill_predictive_missing_parameters(self):
"""Fill address with host_name if not already set
and define state with initial_state
:return: None
"""
if hasattr(self, 'host_name') and not hasattr(self, 'address'):
self.address = self.host_name
if hasattr(self, 'host_name') and not hasattr(self, 'alias'):
self.alias = self.host_name
if self.initial_state == 'd':
self.state = 'DOWN'
elif self.initial_state == 'x':
self.state = 'UNREACHABLE'
|
[
"def",
"fill_predictive_missing_parameters",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'host_name'",
")",
"and",
"not",
"hasattr",
"(",
"self",
",",
"'address'",
")",
":",
"self",
".",
"address",
"=",
"self",
".",
"host_name",
"if",
"hasattr",
"(",
"self",
",",
"'host_name'",
")",
"and",
"not",
"hasattr",
"(",
"self",
",",
"'alias'",
")",
":",
"self",
".",
"alias",
"=",
"self",
".",
"host_name",
"if",
"self",
".",
"initial_state",
"==",
"'d'",
":",
"self",
".",
"state",
"=",
"'DOWN'",
"elif",
"self",
".",
"initial_state",
"==",
"'x'",
":",
"self",
".",
"state",
"=",
"'UNREACHABLE'"
] |
Fill address with host_name if not already set
and define state with initial_state
:return: None
|
[
"Fill",
"address",
"with",
"host_name",
"if",
"not",
"already",
"set",
"and",
"define",
"state",
"with",
"initial_state"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/host.py#L304-L317
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/host.py
|
Host.get_groupnames
|
def get_groupnames(self, hostgroups):
"""Get names of the host's hostgroups
:return: comma separated names of hostgroups alphabetically sorted
:rtype: str
"""
group_names = []
for hostgroup_id in self.hostgroups:
hostgroup = hostgroups[hostgroup_id]
group_names.append(hostgroup.get_name())
return ','.join(sorted(group_names))
|
python
|
def get_groupnames(self, hostgroups):
"""Get names of the host's hostgroups
:return: comma separated names of hostgroups alphabetically sorted
:rtype: str
"""
group_names = []
for hostgroup_id in self.hostgroups:
hostgroup = hostgroups[hostgroup_id]
group_names.append(hostgroup.get_name())
return ','.join(sorted(group_names))
|
[
"def",
"get_groupnames",
"(",
"self",
",",
"hostgroups",
")",
":",
"group_names",
"=",
"[",
"]",
"for",
"hostgroup_id",
"in",
"self",
".",
"hostgroups",
":",
"hostgroup",
"=",
"hostgroups",
"[",
"hostgroup_id",
"]",
"group_names",
".",
"append",
"(",
"hostgroup",
".",
"get_name",
"(",
")",
")",
"return",
"','",
".",
"join",
"(",
"sorted",
"(",
"group_names",
")",
")"
] |
Get names of the host's hostgroups
:return: comma separated names of hostgroups alphabetically sorted
:rtype: str
|
[
"Get",
"names",
"of",
"the",
"host",
"s",
"hostgroups"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/host.py#L408-L418
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/host.py
|
Host.get_groupaliases
|
def get_groupaliases(self, hostgroups):
"""Get aliases of the host's hostgroups
:return: comma separated aliases of hostgroups alphabetically sorted
:rtype: str
"""
group_aliases = []
for hostgroup_id in self.hostgroups:
hostgroup = hostgroups[hostgroup_id]
group_aliases.append(hostgroup.alias)
return ','.join(sorted(group_aliases))
|
python
|
def get_groupaliases(self, hostgroups):
"""Get aliases of the host's hostgroups
:return: comma separated aliases of hostgroups alphabetically sorted
:rtype: str
"""
group_aliases = []
for hostgroup_id in self.hostgroups:
hostgroup = hostgroups[hostgroup_id]
group_aliases.append(hostgroup.alias)
return ','.join(sorted(group_aliases))
|
[
"def",
"get_groupaliases",
"(",
"self",
",",
"hostgroups",
")",
":",
"group_aliases",
"=",
"[",
"]",
"for",
"hostgroup_id",
"in",
"self",
".",
"hostgroups",
":",
"hostgroup",
"=",
"hostgroups",
"[",
"hostgroup_id",
"]",
"group_aliases",
".",
"append",
"(",
"hostgroup",
".",
"alias",
")",
"return",
"','",
".",
"join",
"(",
"sorted",
"(",
"group_aliases",
")",
")"
] |
Get aliases of the host's hostgroups
:return: comma separated aliases of hostgroups alphabetically sorted
:rtype: str
|
[
"Get",
"aliases",
"of",
"the",
"host",
"s",
"hostgroups"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/host.py#L420-L430
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/host.py
|
Host.set_state_from_exit_status
|
def set_state_from_exit_status(self, status, notif_period, hosts, services):
"""Set the state in UP, DOWN, or UNREACHABLE according to the status of a check result.
:param status: integer between 0 and 3 (but not 1)
:type status: int
:return: None
"""
now = time.time()
# we should put in last_state the good last state:
# if not just change the state by an problem/impact
# we can take current state. But if it's the case, the
# real old state is self.state_before_impact (it's the TRUE
# state in fact)
# And only if we enable the impact state change
cls = self.__class__
if (cls.enable_problem_impacts_states_change and
self.is_impact and not self.state_changed_since_impact):
self.last_state = self.state_before_impact
else:
self.last_state = self.state
# There is no 1 case because it should have been managed by the caller for a host
# like the schedulingitem::consume method.
if status == 0:
self.state = u'UP'
self.state_id = 0
self.last_time_up = int(self.last_state_update)
# self.last_time_up = self.last_state_update
state_code = 'u'
elif status in (2, 3):
self.state = u'DOWN'
self.state_id = 1
self.last_time_down = int(self.last_state_update)
# self.last_time_down = self.last_state_update
state_code = 'd'
elif status == 4:
self.state = u'UNREACHABLE'
self.state_id = 4
self.last_time_unreachable = int(self.last_state_update)
# self.last_time_unreachable = self.last_state_update
state_code = 'x'
else:
self.state = u'DOWN' # exit code UNDETERMINED
self.state_id = 1
# self.last_time_down = int(self.last_state_update)
self.last_time_down = self.last_state_update
state_code = 'd'
if state_code in self.flap_detection_options:
self.add_flapping_change(self.state != self.last_state)
# Now we add a value, we update the is_flapping prop
self.update_flapping(notif_period, hosts, services)
if self.state != self.last_state and \
not (self.state == "DOWN" and self.last_state == "UNREACHABLE"):
self.last_state_change = self.last_state_update
self.duration_sec = now - self.last_state_change
|
python
|
def set_state_from_exit_status(self, status, notif_period, hosts, services):
"""Set the state in UP, DOWN, or UNREACHABLE according to the status of a check result.
:param status: integer between 0 and 3 (but not 1)
:type status: int
:return: None
"""
now = time.time()
# we should put in last_state the good last state:
# if not just change the state by an problem/impact
# we can take current state. But if it's the case, the
# real old state is self.state_before_impact (it's the TRUE
# state in fact)
# And only if we enable the impact state change
cls = self.__class__
if (cls.enable_problem_impacts_states_change and
self.is_impact and not self.state_changed_since_impact):
self.last_state = self.state_before_impact
else:
self.last_state = self.state
# There is no 1 case because it should have been managed by the caller for a host
# like the schedulingitem::consume method.
if status == 0:
self.state = u'UP'
self.state_id = 0
self.last_time_up = int(self.last_state_update)
# self.last_time_up = self.last_state_update
state_code = 'u'
elif status in (2, 3):
self.state = u'DOWN'
self.state_id = 1
self.last_time_down = int(self.last_state_update)
# self.last_time_down = self.last_state_update
state_code = 'd'
elif status == 4:
self.state = u'UNREACHABLE'
self.state_id = 4
self.last_time_unreachable = int(self.last_state_update)
# self.last_time_unreachable = self.last_state_update
state_code = 'x'
else:
self.state = u'DOWN' # exit code UNDETERMINED
self.state_id = 1
# self.last_time_down = int(self.last_state_update)
self.last_time_down = self.last_state_update
state_code = 'd'
if state_code in self.flap_detection_options:
self.add_flapping_change(self.state != self.last_state)
# Now we add a value, we update the is_flapping prop
self.update_flapping(notif_period, hosts, services)
if self.state != self.last_state and \
not (self.state == "DOWN" and self.last_state == "UNREACHABLE"):
self.last_state_change = self.last_state_update
self.duration_sec = now - self.last_state_change
|
[
"def",
"set_state_from_exit_status",
"(",
"self",
",",
"status",
",",
"notif_period",
",",
"hosts",
",",
"services",
")",
":",
"now",
"=",
"time",
".",
"time",
"(",
")",
"# we should put in last_state the good last state:",
"# if not just change the state by an problem/impact",
"# we can take current state. But if it's the case, the",
"# real old state is self.state_before_impact (it's the TRUE",
"# state in fact)",
"# And only if we enable the impact state change",
"cls",
"=",
"self",
".",
"__class__",
"if",
"(",
"cls",
".",
"enable_problem_impacts_states_change",
"and",
"self",
".",
"is_impact",
"and",
"not",
"self",
".",
"state_changed_since_impact",
")",
":",
"self",
".",
"last_state",
"=",
"self",
".",
"state_before_impact",
"else",
":",
"self",
".",
"last_state",
"=",
"self",
".",
"state",
"# There is no 1 case because it should have been managed by the caller for a host",
"# like the schedulingitem::consume method.",
"if",
"status",
"==",
"0",
":",
"self",
".",
"state",
"=",
"u'UP'",
"self",
".",
"state_id",
"=",
"0",
"self",
".",
"last_time_up",
"=",
"int",
"(",
"self",
".",
"last_state_update",
")",
"# self.last_time_up = self.last_state_update",
"state_code",
"=",
"'u'",
"elif",
"status",
"in",
"(",
"2",
",",
"3",
")",
":",
"self",
".",
"state",
"=",
"u'DOWN'",
"self",
".",
"state_id",
"=",
"1",
"self",
".",
"last_time_down",
"=",
"int",
"(",
"self",
".",
"last_state_update",
")",
"# self.last_time_down = self.last_state_update",
"state_code",
"=",
"'d'",
"elif",
"status",
"==",
"4",
":",
"self",
".",
"state",
"=",
"u'UNREACHABLE'",
"self",
".",
"state_id",
"=",
"4",
"self",
".",
"last_time_unreachable",
"=",
"int",
"(",
"self",
".",
"last_state_update",
")",
"# self.last_time_unreachable = self.last_state_update",
"state_code",
"=",
"'x'",
"else",
":",
"self",
".",
"state",
"=",
"u'DOWN'",
"# exit code UNDETERMINED",
"self",
".",
"state_id",
"=",
"1",
"# self.last_time_down = int(self.last_state_update)",
"self",
".",
"last_time_down",
"=",
"self",
".",
"last_state_update",
"state_code",
"=",
"'d'",
"if",
"state_code",
"in",
"self",
".",
"flap_detection_options",
":",
"self",
".",
"add_flapping_change",
"(",
"self",
".",
"state",
"!=",
"self",
".",
"last_state",
")",
"# Now we add a value, we update the is_flapping prop",
"self",
".",
"update_flapping",
"(",
"notif_period",
",",
"hosts",
",",
"services",
")",
"if",
"self",
".",
"state",
"!=",
"self",
".",
"last_state",
"and",
"not",
"(",
"self",
".",
"state",
"==",
"\"DOWN\"",
"and",
"self",
".",
"last_state",
"==",
"\"UNREACHABLE\"",
")",
":",
"self",
".",
"last_state_change",
"=",
"self",
".",
"last_state_update",
"self",
".",
"duration_sec",
"=",
"now",
"-",
"self",
".",
"last_state_change"
] |
Set the state in UP, DOWN, or UNREACHABLE according to the status of a check result.
:param status: integer between 0 and 3 (but not 1)
:type status: int
:return: None
|
[
"Set",
"the",
"state",
"in",
"UP",
"DOWN",
"or",
"UNREACHABLE",
"according",
"to",
"the",
"status",
"of",
"a",
"check",
"result",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/host.py#L492-L548
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/host.py
|
Host.is_state
|
def is_state(self, status):
"""Return if status match the current host status
:param status: status to compare ( "o", "d", "x"). Usually comes from config files
:type status: str
:return: True if status <=> self.status, otherwise False
:rtype: bool
"""
if status == self.state:
return True
# Now low status
if status == 'o' and self.state == u'UP':
return True
if status == 'd' and self.state == u'DOWN':
return True
if status in ['u', 'x'] and self.state == u'UNREACHABLE':
return True
return False
|
python
|
def is_state(self, status):
"""Return if status match the current host status
:param status: status to compare ( "o", "d", "x"). Usually comes from config files
:type status: str
:return: True if status <=> self.status, otherwise False
:rtype: bool
"""
if status == self.state:
return True
# Now low status
if status == 'o' and self.state == u'UP':
return True
if status == 'd' and self.state == u'DOWN':
return True
if status in ['u', 'x'] and self.state == u'UNREACHABLE':
return True
return False
|
[
"def",
"is_state",
"(",
"self",
",",
"status",
")",
":",
"if",
"status",
"==",
"self",
".",
"state",
":",
"return",
"True",
"# Now low status",
"if",
"status",
"==",
"'o'",
"and",
"self",
".",
"state",
"==",
"u'UP'",
":",
"return",
"True",
"if",
"status",
"==",
"'d'",
"and",
"self",
".",
"state",
"==",
"u'DOWN'",
":",
"return",
"True",
"if",
"status",
"in",
"[",
"'u'",
",",
"'x'",
"]",
"and",
"self",
".",
"state",
"==",
"u'UNREACHABLE'",
":",
"return",
"True",
"return",
"False"
] |
Return if status match the current host status
:param status: status to compare ( "o", "d", "x"). Usually comes from config files
:type status: str
:return: True if status <=> self.status, otherwise False
:rtype: bool
|
[
"Return",
"if",
"status",
"match",
"the",
"current",
"host",
"status"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/host.py#L550-L567
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/host.py
|
Host.last_time_non_ok_or_up
|
def last_time_non_ok_or_up(self):
"""Get the last time the host was in a non-OK state
:return: self.last_time_down if self.last_time_down > self.last_time_up, 0 otherwise
:rtype: int
"""
non_ok_times = [x for x in [self.last_time_down]
if x > self.last_time_up]
if not non_ok_times:
last_time_non_ok = 0 # todo: program_start would be better?
else:
last_time_non_ok = min(non_ok_times)
return last_time_non_ok
|
python
|
def last_time_non_ok_or_up(self):
"""Get the last time the host was in a non-OK state
:return: self.last_time_down if self.last_time_down > self.last_time_up, 0 otherwise
:rtype: int
"""
non_ok_times = [x for x in [self.last_time_down]
if x > self.last_time_up]
if not non_ok_times:
last_time_non_ok = 0 # todo: program_start would be better?
else:
last_time_non_ok = min(non_ok_times)
return last_time_non_ok
|
[
"def",
"last_time_non_ok_or_up",
"(",
"self",
")",
":",
"non_ok_times",
"=",
"[",
"x",
"for",
"x",
"in",
"[",
"self",
".",
"last_time_down",
"]",
"if",
"x",
">",
"self",
".",
"last_time_up",
"]",
"if",
"not",
"non_ok_times",
":",
"last_time_non_ok",
"=",
"0",
"# todo: program_start would be better?",
"else",
":",
"last_time_non_ok",
"=",
"min",
"(",
"non_ok_times",
")",
"return",
"last_time_non_ok"
] |
Get the last time the host was in a non-OK state
:return: self.last_time_down if self.last_time_down > self.last_time_up, 0 otherwise
:rtype: int
|
[
"Get",
"the",
"last",
"time",
"the",
"host",
"was",
"in",
"a",
"non",
"-",
"OK",
"state"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/host.py#L569-L581
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/host.py
|
Host.notification_is_blocked_by_contact
|
def notification_is_blocked_by_contact(self, notifways, timeperiods, notif, contact):
"""Check if the notification is blocked by this contact.
:param notif: notification created earlier
:type notif: alignak.notification.Notification
:param contact: contact we want to notify
:type notif: alignak.objects.contact.Contact
:return: True if the notification is blocked, False otherwise
:rtype: bool
"""
return not contact.want_host_notification(notifways, timeperiods,
self.last_chk, self.state, notif.type,
self.business_impact, notif.command_call)
|
python
|
def notification_is_blocked_by_contact(self, notifways, timeperiods, notif, contact):
"""Check if the notification is blocked by this contact.
:param notif: notification created earlier
:type notif: alignak.notification.Notification
:param contact: contact we want to notify
:type notif: alignak.objects.contact.Contact
:return: True if the notification is blocked, False otherwise
:rtype: bool
"""
return not contact.want_host_notification(notifways, timeperiods,
self.last_chk, self.state, notif.type,
self.business_impact, notif.command_call)
|
[
"def",
"notification_is_blocked_by_contact",
"(",
"self",
",",
"notifways",
",",
"timeperiods",
",",
"notif",
",",
"contact",
")",
":",
"return",
"not",
"contact",
".",
"want_host_notification",
"(",
"notifways",
",",
"timeperiods",
",",
"self",
".",
"last_chk",
",",
"self",
".",
"state",
",",
"notif",
".",
"type",
",",
"self",
".",
"business_impact",
",",
"notif",
".",
"command_call",
")"
] |
Check if the notification is blocked by this contact.
:param notif: notification created earlier
:type notif: alignak.notification.Notification
:param contact: contact we want to notify
:type notif: alignak.objects.contact.Contact
:return: True if the notification is blocked, False otherwise
:rtype: bool
|
[
"Check",
"if",
"the",
"notification",
"is",
"blocked",
"by",
"this",
"contact",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/host.py#L914-L926
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/host.py
|
Host._tot_services_by_state
|
def _tot_services_by_state(self, services, state):
"""Get the number of service in the specified state
:param state: state to filter service
:type state:
:return: number of service with s.state_id == state
:rtype: int
"""
return str(sum(1 for s in self.services
if services[s].state_id == state))
|
python
|
def _tot_services_by_state(self, services, state):
"""Get the number of service in the specified state
:param state: state to filter service
:type state:
:return: number of service with s.state_id == state
:rtype: int
"""
return str(sum(1 for s in self.services
if services[s].state_id == state))
|
[
"def",
"_tot_services_by_state",
"(",
"self",
",",
"services",
",",
"state",
")",
":",
"return",
"str",
"(",
"sum",
"(",
"1",
"for",
"s",
"in",
"self",
".",
"services",
"if",
"services",
"[",
"s",
"]",
".",
"state_id",
"==",
"state",
")",
")"
] |
Get the number of service in the specified state
:param state: state to filter service
:type state:
:return: number of service with s.state_id == state
:rtype: int
|
[
"Get",
"the",
"number",
"of",
"service",
"in",
"the",
"specified",
"state"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/host.py#L1095-L1104
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/host.py
|
Host.get_overall_state
|
def get_overall_state(self, services):
"""Get the host overall state including the host self status
and the status of its services
Compute the host overall state identifier, including:
- the acknowledged state
- the downtime state
The host overall state is (prioritized):
- an host not monitored (5)
- an host down (4)
- an host unreachable (3)
- an host downtimed (2)
- an host acknowledged (1)
- an host up (0)
If the host overall state is <= 2, then the host overall state is the maximum value
of the host overall state and all the host services overall states.
The overall state of an host is:
- 0 if the host is UP and all its services are OK
- 1 if the host is DOWN or UNREACHABLE and acknowledged or
at least one of its services is acknowledged and
no other services are WARNING or CRITICAL
- 2 if the host is DOWN or UNREACHABLE and in a scheduled downtime or
at least one of its services is in a scheduled downtime and no
other services are WARNING or CRITICAL
- 3 if the host is UNREACHABLE or
at least one of its services is WARNING
- 4 if the host is DOWN or
at least one of its services is CRITICAL
- 5 if the host is not monitored
:param services: a list of known services
:type services: alignak.objects.service.Services
:return: the host overall state
:rtype: int
"""
overall_state = 0
if not self.monitored:
overall_state = 5
elif self.acknowledged:
overall_state = 1
elif self.downtimed:
overall_state = 2
elif self.state_type == 'HARD':
if self.state == 'UNREACHABLE':
overall_state = 3
elif self.state == 'DOWN':
overall_state = 4
# Only consider the hosts services state if all is ok (or almost...)
if overall_state <= 2:
for service in self.services:
if service in services:
service = services[service]
# Only for monitored services
if service.overall_state_id < 5:
overall_state = max(overall_state, service.overall_state_id)
return overall_state
|
python
|
def get_overall_state(self, services):
"""Get the host overall state including the host self status
and the status of its services
Compute the host overall state identifier, including:
- the acknowledged state
- the downtime state
The host overall state is (prioritized):
- an host not monitored (5)
- an host down (4)
- an host unreachable (3)
- an host downtimed (2)
- an host acknowledged (1)
- an host up (0)
If the host overall state is <= 2, then the host overall state is the maximum value
of the host overall state and all the host services overall states.
The overall state of an host is:
- 0 if the host is UP and all its services are OK
- 1 if the host is DOWN or UNREACHABLE and acknowledged or
at least one of its services is acknowledged and
no other services are WARNING or CRITICAL
- 2 if the host is DOWN or UNREACHABLE and in a scheduled downtime or
at least one of its services is in a scheduled downtime and no
other services are WARNING or CRITICAL
- 3 if the host is UNREACHABLE or
at least one of its services is WARNING
- 4 if the host is DOWN or
at least one of its services is CRITICAL
- 5 if the host is not monitored
:param services: a list of known services
:type services: alignak.objects.service.Services
:return: the host overall state
:rtype: int
"""
overall_state = 0
if not self.monitored:
overall_state = 5
elif self.acknowledged:
overall_state = 1
elif self.downtimed:
overall_state = 2
elif self.state_type == 'HARD':
if self.state == 'UNREACHABLE':
overall_state = 3
elif self.state == 'DOWN':
overall_state = 4
# Only consider the hosts services state if all is ok (or almost...)
if overall_state <= 2:
for service in self.services:
if service in services:
service = services[service]
# Only for monitored services
if service.overall_state_id < 5:
overall_state = max(overall_state, service.overall_state_id)
return overall_state
|
[
"def",
"get_overall_state",
"(",
"self",
",",
"services",
")",
":",
"overall_state",
"=",
"0",
"if",
"not",
"self",
".",
"monitored",
":",
"overall_state",
"=",
"5",
"elif",
"self",
".",
"acknowledged",
":",
"overall_state",
"=",
"1",
"elif",
"self",
".",
"downtimed",
":",
"overall_state",
"=",
"2",
"elif",
"self",
".",
"state_type",
"==",
"'HARD'",
":",
"if",
"self",
".",
"state",
"==",
"'UNREACHABLE'",
":",
"overall_state",
"=",
"3",
"elif",
"self",
".",
"state",
"==",
"'DOWN'",
":",
"overall_state",
"=",
"4",
"# Only consider the hosts services state if all is ok (or almost...)",
"if",
"overall_state",
"<=",
"2",
":",
"for",
"service",
"in",
"self",
".",
"services",
":",
"if",
"service",
"in",
"services",
":",
"service",
"=",
"services",
"[",
"service",
"]",
"# Only for monitored services",
"if",
"service",
".",
"overall_state_id",
"<",
"5",
":",
"overall_state",
"=",
"max",
"(",
"overall_state",
",",
"service",
".",
"overall_state_id",
")",
"return",
"overall_state"
] |
Get the host overall state including the host self status
and the status of its services
Compute the host overall state identifier, including:
- the acknowledged state
- the downtime state
The host overall state is (prioritized):
- an host not monitored (5)
- an host down (4)
- an host unreachable (3)
- an host downtimed (2)
- an host acknowledged (1)
- an host up (0)
If the host overall state is <= 2, then the host overall state is the maximum value
of the host overall state and all the host services overall states.
The overall state of an host is:
- 0 if the host is UP and all its services are OK
- 1 if the host is DOWN or UNREACHABLE and acknowledged or
at least one of its services is acknowledged and
no other services are WARNING or CRITICAL
- 2 if the host is DOWN or UNREACHABLE and in a scheduled downtime or
at least one of its services is in a scheduled downtime and no
other services are WARNING or CRITICAL
- 3 if the host is UNREACHABLE or
at least one of its services is WARNING
- 4 if the host is DOWN or
at least one of its services is CRITICAL
- 5 if the host is not monitored
:param services: a list of known services
:type services: alignak.objects.service.Services
:return: the host overall state
:rtype: int
|
[
"Get",
"the",
"host",
"overall",
"state",
"including",
"the",
"host",
"self",
"status",
"and",
"the",
"status",
"of",
"its",
"services"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/host.py#L1236-L1298
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/host.py
|
Hosts.linkify_h_by_h
|
def linkify_h_by_h(self):
"""Link hosts with their parents
:return: None
"""
for host in self:
# The new member list
new_parents = []
for parent in getattr(host, 'parents', []):
parent = parent.strip()
o_parent = self.find_by_name(parent)
if o_parent is not None:
new_parents.append(o_parent.uuid)
else:
err = "the parent '%s' for the host '%s' is unknown!" % (parent,
host.get_name())
self.add_error(err)
# We find the id, we replace the names
host.parents = new_parents
|
python
|
def linkify_h_by_h(self):
"""Link hosts with their parents
:return: None
"""
for host in self:
# The new member list
new_parents = []
for parent in getattr(host, 'parents', []):
parent = parent.strip()
o_parent = self.find_by_name(parent)
if o_parent is not None:
new_parents.append(o_parent.uuid)
else:
err = "the parent '%s' for the host '%s' is unknown!" % (parent,
host.get_name())
self.add_error(err)
# We find the id, we replace the names
host.parents = new_parents
|
[
"def",
"linkify_h_by_h",
"(",
"self",
")",
":",
"for",
"host",
"in",
"self",
":",
"# The new member list",
"new_parents",
"=",
"[",
"]",
"for",
"parent",
"in",
"getattr",
"(",
"host",
",",
"'parents'",
",",
"[",
"]",
")",
":",
"parent",
"=",
"parent",
".",
"strip",
"(",
")",
"o_parent",
"=",
"self",
".",
"find_by_name",
"(",
"parent",
")",
"if",
"o_parent",
"is",
"not",
"None",
":",
"new_parents",
".",
"append",
"(",
"o_parent",
".",
"uuid",
")",
"else",
":",
"err",
"=",
"\"the parent '%s' for the host '%s' is unknown!\"",
"%",
"(",
"parent",
",",
"host",
".",
"get_name",
"(",
")",
")",
"self",
".",
"add_error",
"(",
"err",
")",
"# We find the id, we replace the names",
"host",
".",
"parents",
"=",
"new_parents"
] |
Link hosts with their parents
:return: None
|
[
"Link",
"hosts",
"with",
"their",
"parents"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/host.py#L1372-L1390
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/host.py
|
Hosts.linkify_h_by_hg
|
def linkify_h_by_hg(self, hostgroups):
"""Link hosts with hostgroups
:param hostgroups: hostgroups object to link with
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: None
"""
# Register host in the hostgroups
for host in self:
new_hostgroups = []
if hasattr(host, 'hostgroups') and host.hostgroups != []:
hgs = [n.strip() for n in host.hostgroups if n.strip()]
for hg_name in hgs:
# TODO: should an unknown hostgroup raise an error ?
hostgroup = hostgroups.find_by_name(hg_name)
if hostgroup is not None:
new_hostgroups.append(hostgroup.uuid)
else:
err = ("the hostgroup '%s' of the host '%s' is "
"unknown" % (hg_name, host.host_name))
host.add_error(err)
host.hostgroups = new_hostgroups
|
python
|
def linkify_h_by_hg(self, hostgroups):
"""Link hosts with hostgroups
:param hostgroups: hostgroups object to link with
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: None
"""
# Register host in the hostgroups
for host in self:
new_hostgroups = []
if hasattr(host, 'hostgroups') and host.hostgroups != []:
hgs = [n.strip() for n in host.hostgroups if n.strip()]
for hg_name in hgs:
# TODO: should an unknown hostgroup raise an error ?
hostgroup = hostgroups.find_by_name(hg_name)
if hostgroup is not None:
new_hostgroups.append(hostgroup.uuid)
else:
err = ("the hostgroup '%s' of the host '%s' is "
"unknown" % (hg_name, host.host_name))
host.add_error(err)
host.hostgroups = new_hostgroups
|
[
"def",
"linkify_h_by_hg",
"(",
"self",
",",
"hostgroups",
")",
":",
"# Register host in the hostgroups",
"for",
"host",
"in",
"self",
":",
"new_hostgroups",
"=",
"[",
"]",
"if",
"hasattr",
"(",
"host",
",",
"'hostgroups'",
")",
"and",
"host",
".",
"hostgroups",
"!=",
"[",
"]",
":",
"hgs",
"=",
"[",
"n",
".",
"strip",
"(",
")",
"for",
"n",
"in",
"host",
".",
"hostgroups",
"if",
"n",
".",
"strip",
"(",
")",
"]",
"for",
"hg_name",
"in",
"hgs",
":",
"# TODO: should an unknown hostgroup raise an error ?",
"hostgroup",
"=",
"hostgroups",
".",
"find_by_name",
"(",
"hg_name",
")",
"if",
"hostgroup",
"is",
"not",
"None",
":",
"new_hostgroups",
".",
"append",
"(",
"hostgroup",
".",
"uuid",
")",
"else",
":",
"err",
"=",
"(",
"\"the hostgroup '%s' of the host '%s' is \"",
"\"unknown\"",
"%",
"(",
"hg_name",
",",
"host",
".",
"host_name",
")",
")",
"host",
".",
"add_error",
"(",
"err",
")",
"host",
".",
"hostgroups",
"=",
"new_hostgroups"
] |
Link hosts with hostgroups
:param hostgroups: hostgroups object to link with
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: None
|
[
"Link",
"hosts",
"with",
"hostgroups"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/host.py#L1415-L1436
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/host.py
|
Hosts.apply_dependencies
|
def apply_dependencies(self):
"""Loop on hosts and register dependency between parent and son
call Host.fill_parents_dependency()
:return: None
"""
for host in self:
for parent_id in getattr(host, 'parents', []):
if parent_id is None:
continue
parent = self[parent_id]
if parent.active_checks_enabled:
# Add parent in the list
host.act_depend_of.append((parent_id, ['d', 'x', 's', 'f'], '', True))
# Add child in the parent
parent.act_depend_of_me.append((host.uuid, ['d', 'x', 's', 'f'], '', True))
# And add the parent/child dep filling too, for broking
parent.child_dependencies.add(host.uuid)
host.parent_dependencies.add(parent_id)
|
python
|
def apply_dependencies(self):
"""Loop on hosts and register dependency between parent and son
call Host.fill_parents_dependency()
:return: None
"""
for host in self:
for parent_id in getattr(host, 'parents', []):
if parent_id is None:
continue
parent = self[parent_id]
if parent.active_checks_enabled:
# Add parent in the list
host.act_depend_of.append((parent_id, ['d', 'x', 's', 'f'], '', True))
# Add child in the parent
parent.act_depend_of_me.append((host.uuid, ['d', 'x', 's', 'f'], '', True))
# And add the parent/child dep filling too, for broking
parent.child_dependencies.add(host.uuid)
host.parent_dependencies.add(parent_id)
|
[
"def",
"apply_dependencies",
"(",
"self",
")",
":",
"for",
"host",
"in",
"self",
":",
"for",
"parent_id",
"in",
"getattr",
"(",
"host",
",",
"'parents'",
",",
"[",
"]",
")",
":",
"if",
"parent_id",
"is",
"None",
":",
"continue",
"parent",
"=",
"self",
"[",
"parent_id",
"]",
"if",
"parent",
".",
"active_checks_enabled",
":",
"# Add parent in the list",
"host",
".",
"act_depend_of",
".",
"append",
"(",
"(",
"parent_id",
",",
"[",
"'d'",
",",
"'x'",
",",
"'s'",
",",
"'f'",
"]",
",",
"''",
",",
"True",
")",
")",
"# Add child in the parent",
"parent",
".",
"act_depend_of_me",
".",
"append",
"(",
"(",
"host",
".",
"uuid",
",",
"[",
"'d'",
",",
"'x'",
",",
"'s'",
",",
"'f'",
"]",
",",
"''",
",",
"True",
")",
")",
"# And add the parent/child dep filling too, for broking",
"parent",
".",
"child_dependencies",
".",
"add",
"(",
"host",
".",
"uuid",
")",
"host",
".",
"parent_dependencies",
".",
"add",
"(",
"parent_id",
")"
] |
Loop on hosts and register dependency between parent and son
call Host.fill_parents_dependency()
:return: None
|
[
"Loop",
"on",
"hosts",
"and",
"register",
"dependency",
"between",
"parent",
"and",
"son"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/host.py#L1466-L1487
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/host.py
|
Hosts.find_hosts_that_use_template
|
def find_hosts_that_use_template(self, tpl_name):
"""Find hosts that use the template defined in argument tpl_name
:param tpl_name: the template name we filter or
:type tpl_name: str
:return: list of the host_name of the hosts that got the template tpl_name in tags
:rtype: list[str]
"""
return [h.host_name for h in self if tpl_name in h.tags if hasattr(h, "host_name")]
|
python
|
def find_hosts_that_use_template(self, tpl_name):
"""Find hosts that use the template defined in argument tpl_name
:param tpl_name: the template name we filter or
:type tpl_name: str
:return: list of the host_name of the hosts that got the template tpl_name in tags
:rtype: list[str]
"""
return [h.host_name for h in self if tpl_name in h.tags if hasattr(h, "host_name")]
|
[
"def",
"find_hosts_that_use_template",
"(",
"self",
",",
"tpl_name",
")",
":",
"return",
"[",
"h",
".",
"host_name",
"for",
"h",
"in",
"self",
"if",
"tpl_name",
"in",
"h",
".",
"tags",
"if",
"hasattr",
"(",
"h",
",",
"\"host_name\"",
")",
"]"
] |
Find hosts that use the template defined in argument tpl_name
:param tpl_name: the template name we filter or
:type tpl_name: str
:return: list of the host_name of the hosts that got the template tpl_name in tags
:rtype: list[str]
|
[
"Find",
"hosts",
"that",
"use",
"the",
"template",
"defined",
"in",
"argument",
"tpl_name"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/host.py#L1489-L1497
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/arbiterlink.py
|
ArbiterLink.is_me
|
def is_me(self): # pragma: no cover, seems not to be used anywhere
"""Check if parameter name if same than name of this object
TODO: is it useful?
:return: true if parameter name if same than this name
:rtype: bool
"""
logger.info("And arbiter is launched with the hostname:%s "
"from an arbiter point of view of addr:%s", self.host_name, socket.getfqdn())
return self.host_name == socket.getfqdn() or self.host_name == socket.gethostname()
|
python
|
def is_me(self): # pragma: no cover, seems not to be used anywhere
"""Check if parameter name if same than name of this object
TODO: is it useful?
:return: true if parameter name if same than this name
:rtype: bool
"""
logger.info("And arbiter is launched with the hostname:%s "
"from an arbiter point of view of addr:%s", self.host_name, socket.getfqdn())
return self.host_name == socket.getfqdn() or self.host_name == socket.gethostname()
|
[
"def",
"is_me",
"(",
"self",
")",
":",
"# pragma: no cover, seems not to be used anywhere",
"logger",
".",
"info",
"(",
"\"And arbiter is launched with the hostname:%s \"",
"\"from an arbiter point of view of addr:%s\"",
",",
"self",
".",
"host_name",
",",
"socket",
".",
"getfqdn",
"(",
")",
")",
"return",
"self",
".",
"host_name",
"==",
"socket",
".",
"getfqdn",
"(",
")",
"or",
"self",
".",
"host_name",
"==",
"socket",
".",
"gethostname",
"(",
")"
] |
Check if parameter name if same than name of this object
TODO: is it useful?
:return: true if parameter name if same than this name
:rtype: bool
|
[
"Check",
"if",
"parameter",
"name",
"if",
"same",
"than",
"name",
"of",
"this",
"object"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/arbiterlink.py#L79-L89
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/arbiterlink.py
|
ArbiterLink.do_not_run
|
def do_not_run(self):
"""Check if satellite running or not
If not, try to run
:return: true if satellite not running
:rtype: bool
"""
logger.debug("[%s] do_not_run", self.name)
try:
self.con.get('_do_not_run')
return True
except HTTPClientConnectionException as exp: # pragma: no cover, simple protection
self.add_failed_check_attempt("Connection error when "
"sending do not run: %s" % str(exp))
self.set_dead()
except HTTPClientTimeoutException as exp: # pragma: no cover, simple protection
self.add_failed_check_attempt("Connection timeout when "
"sending do not run: %s" % str(exp))
except HTTPClientException as exp:
self.add_failed_check_attempt("Error when "
"sending do not run: %s" % str(exp))
return False
|
python
|
def do_not_run(self):
"""Check if satellite running or not
If not, try to run
:return: true if satellite not running
:rtype: bool
"""
logger.debug("[%s] do_not_run", self.name)
try:
self.con.get('_do_not_run')
return True
except HTTPClientConnectionException as exp: # pragma: no cover, simple protection
self.add_failed_check_attempt("Connection error when "
"sending do not run: %s" % str(exp))
self.set_dead()
except HTTPClientTimeoutException as exp: # pragma: no cover, simple protection
self.add_failed_check_attempt("Connection timeout when "
"sending do not run: %s" % str(exp))
except HTTPClientException as exp:
self.add_failed_check_attempt("Error when "
"sending do not run: %s" % str(exp))
return False
|
[
"def",
"do_not_run",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"[%s] do_not_run\"",
",",
"self",
".",
"name",
")",
"try",
":",
"self",
".",
"con",
".",
"get",
"(",
"'_do_not_run'",
")",
"return",
"True",
"except",
"HTTPClientConnectionException",
"as",
"exp",
":",
"# pragma: no cover, simple protection",
"self",
".",
"add_failed_check_attempt",
"(",
"\"Connection error when \"",
"\"sending do not run: %s\"",
"%",
"str",
"(",
"exp",
")",
")",
"self",
".",
"set_dead",
"(",
")",
"except",
"HTTPClientTimeoutException",
"as",
"exp",
":",
"# pragma: no cover, simple protection",
"self",
".",
"add_failed_check_attempt",
"(",
"\"Connection timeout when \"",
"\"sending do not run: %s\"",
"%",
"str",
"(",
"exp",
")",
")",
"except",
"HTTPClientException",
"as",
"exp",
":",
"self",
".",
"add_failed_check_attempt",
"(",
"\"Error when \"",
"\"sending do not run: %s\"",
"%",
"str",
"(",
"exp",
")",
")",
"return",
"False"
] |
Check if satellite running or not
If not, try to run
:return: true if satellite not running
:rtype: bool
|
[
"Check",
"if",
"satellite",
"running",
"or",
"not",
"If",
"not",
"try",
"to",
"run"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/arbiterlink.py#L91-L114
|
train
|
Alignak-monitoring/alignak
|
alignak/daemons/schedulerdaemon.py
|
Alignak.get_broks
|
def get_broks(self, broker_name):
"""Send broks to a specific broker
:param broker_name: broker name to send broks
:type broker_name: str
:greturn: dict of brok for this broker
:rtype: dict[alignak.brok.Brok]
"""
logger.debug("Broker %s requests my broks list", broker_name)
res = []
if not broker_name:
return res
for broker_link in list(self.brokers.values()):
if broker_name == broker_link.name:
for brok in sorted(broker_link.broks, key=lambda x: x.creation_time):
# Only provide broks that did not yet sent to our external modules
if getattr(brok, 'sent_to_externals', False):
res.append(brok)
brok.got = True
broker_link.broks = [b for b in broker_link.broks if not getattr(b, 'got', False)]
logger.debug("Providing %d broks to %s", len(res), broker_name)
break
else:
logger.warning("Got a brok request from an unknown broker: %s", broker_name)
return res
|
python
|
def get_broks(self, broker_name):
"""Send broks to a specific broker
:param broker_name: broker name to send broks
:type broker_name: str
:greturn: dict of brok for this broker
:rtype: dict[alignak.brok.Brok]
"""
logger.debug("Broker %s requests my broks list", broker_name)
res = []
if not broker_name:
return res
for broker_link in list(self.brokers.values()):
if broker_name == broker_link.name:
for brok in sorted(broker_link.broks, key=lambda x: x.creation_time):
# Only provide broks that did not yet sent to our external modules
if getattr(brok, 'sent_to_externals', False):
res.append(brok)
brok.got = True
broker_link.broks = [b for b in broker_link.broks if not getattr(b, 'got', False)]
logger.debug("Providing %d broks to %s", len(res), broker_name)
break
else:
logger.warning("Got a brok request from an unknown broker: %s", broker_name)
return res
|
[
"def",
"get_broks",
"(",
"self",
",",
"broker_name",
")",
":",
"logger",
".",
"debug",
"(",
"\"Broker %s requests my broks list\"",
",",
"broker_name",
")",
"res",
"=",
"[",
"]",
"if",
"not",
"broker_name",
":",
"return",
"res",
"for",
"broker_link",
"in",
"list",
"(",
"self",
".",
"brokers",
".",
"values",
"(",
")",
")",
":",
"if",
"broker_name",
"==",
"broker_link",
".",
"name",
":",
"for",
"brok",
"in",
"sorted",
"(",
"broker_link",
".",
"broks",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"creation_time",
")",
":",
"# Only provide broks that did not yet sent to our external modules",
"if",
"getattr",
"(",
"brok",
",",
"'sent_to_externals'",
",",
"False",
")",
":",
"res",
".",
"append",
"(",
"brok",
")",
"brok",
".",
"got",
"=",
"True",
"broker_link",
".",
"broks",
"=",
"[",
"b",
"for",
"b",
"in",
"broker_link",
".",
"broks",
"if",
"not",
"getattr",
"(",
"b",
",",
"'got'",
",",
"False",
")",
"]",
"logger",
".",
"debug",
"(",
"\"Providing %d broks to %s\"",
",",
"len",
"(",
"res",
")",
",",
"broker_name",
")",
"break",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Got a brok request from an unknown broker: %s\"",
",",
"broker_name",
")",
"return",
"res"
] |
Send broks to a specific broker
:param broker_name: broker name to send broks
:type broker_name: str
:greturn: dict of brok for this broker
:rtype: dict[alignak.brok.Brok]
|
[
"Send",
"broks",
"to",
"a",
"specific",
"broker"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/schedulerdaemon.py#L127-L153
|
train
|
Alignak-monitoring/alignak
|
alignak/daemons/schedulerdaemon.py
|
Alignak.do_loop_turn
|
def do_loop_turn(self):
"""Scheduler loop turn
Simply run the Alignak scheduler loop
This is called when a configuration got received by the scheduler daemon. As of it,
check if the first scheduling has been done... and manage this.
:return: None
"""
if not self.first_scheduling:
# Ok, now all is initialized, we can make the initial broks
logger.info("First scheduling launched")
_t0 = time.time()
# Program start brok
self.sched.initial_program_status()
# First scheduling
self.sched.schedule()
statsmgr.timer('first_scheduling', time.time() - _t0)
logger.info("First scheduling done")
# Connect to our passive satellites if needed
for satellite in [s for s in list(self.pollers.values()) if s.passive]:
if not self.daemon_connection_init(satellite):
logger.error("Passive satellite connection failed: %s", satellite)
for satellite in [s for s in list(self.reactionners.values()) if s.passive]:
if not self.daemon_connection_init(satellite):
logger.error("Passive satellite connection failed: %s", satellite)
# Ticks are for recurrent function call like consume, del zombies etc
self.sched.ticks = 0
self.first_scheduling = True
# Each loop turn, execute the daemon specific treatment...
# only if the daemon has a configuration to manage
if self.sched.pushed_conf:
# If scheduling is not yet enabled, enable scheduling
if not self.sched.must_schedule:
self.sched.start_scheduling()
self.sched.before_run()
self.sched.run()
else:
logger.warning("#%d - No monitoring configuration to scheduler...",
self.loop_count)
|
python
|
def do_loop_turn(self):
"""Scheduler loop turn
Simply run the Alignak scheduler loop
This is called when a configuration got received by the scheduler daemon. As of it,
check if the first scheduling has been done... and manage this.
:return: None
"""
if not self.first_scheduling:
# Ok, now all is initialized, we can make the initial broks
logger.info("First scheduling launched")
_t0 = time.time()
# Program start brok
self.sched.initial_program_status()
# First scheduling
self.sched.schedule()
statsmgr.timer('first_scheduling', time.time() - _t0)
logger.info("First scheduling done")
# Connect to our passive satellites if needed
for satellite in [s for s in list(self.pollers.values()) if s.passive]:
if not self.daemon_connection_init(satellite):
logger.error("Passive satellite connection failed: %s", satellite)
for satellite in [s for s in list(self.reactionners.values()) if s.passive]:
if not self.daemon_connection_init(satellite):
logger.error("Passive satellite connection failed: %s", satellite)
# Ticks are for recurrent function call like consume, del zombies etc
self.sched.ticks = 0
self.first_scheduling = True
# Each loop turn, execute the daemon specific treatment...
# only if the daemon has a configuration to manage
if self.sched.pushed_conf:
# If scheduling is not yet enabled, enable scheduling
if not self.sched.must_schedule:
self.sched.start_scheduling()
self.sched.before_run()
self.sched.run()
else:
logger.warning("#%d - No monitoring configuration to scheduler...",
self.loop_count)
|
[
"def",
"do_loop_turn",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"first_scheduling",
":",
"# Ok, now all is initialized, we can make the initial broks",
"logger",
".",
"info",
"(",
"\"First scheduling launched\"",
")",
"_t0",
"=",
"time",
".",
"time",
"(",
")",
"# Program start brok",
"self",
".",
"sched",
".",
"initial_program_status",
"(",
")",
"# First scheduling",
"self",
".",
"sched",
".",
"schedule",
"(",
")",
"statsmgr",
".",
"timer",
"(",
"'first_scheduling'",
",",
"time",
".",
"time",
"(",
")",
"-",
"_t0",
")",
"logger",
".",
"info",
"(",
"\"First scheduling done\"",
")",
"# Connect to our passive satellites if needed",
"for",
"satellite",
"in",
"[",
"s",
"for",
"s",
"in",
"list",
"(",
"self",
".",
"pollers",
".",
"values",
"(",
")",
")",
"if",
"s",
".",
"passive",
"]",
":",
"if",
"not",
"self",
".",
"daemon_connection_init",
"(",
"satellite",
")",
":",
"logger",
".",
"error",
"(",
"\"Passive satellite connection failed: %s\"",
",",
"satellite",
")",
"for",
"satellite",
"in",
"[",
"s",
"for",
"s",
"in",
"list",
"(",
"self",
".",
"reactionners",
".",
"values",
"(",
")",
")",
"if",
"s",
".",
"passive",
"]",
":",
"if",
"not",
"self",
".",
"daemon_connection_init",
"(",
"satellite",
")",
":",
"logger",
".",
"error",
"(",
"\"Passive satellite connection failed: %s\"",
",",
"satellite",
")",
"# Ticks are for recurrent function call like consume, del zombies etc",
"self",
".",
"sched",
".",
"ticks",
"=",
"0",
"self",
".",
"first_scheduling",
"=",
"True",
"# Each loop turn, execute the daemon specific treatment...",
"# only if the daemon has a configuration to manage",
"if",
"self",
".",
"sched",
".",
"pushed_conf",
":",
"# If scheduling is not yet enabled, enable scheduling",
"if",
"not",
"self",
".",
"sched",
".",
"must_schedule",
":",
"self",
".",
"sched",
".",
"start_scheduling",
"(",
")",
"self",
".",
"sched",
".",
"before_run",
"(",
")",
"self",
".",
"sched",
".",
"run",
"(",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"\"#%d - No monitoring configuration to scheduler...\"",
",",
"self",
".",
"loop_count",
")"
] |
Scheduler loop turn
Simply run the Alignak scheduler loop
This is called when a configuration got received by the scheduler daemon. As of it,
check if the first scheduling has been done... and manage this.
:return: None
|
[
"Scheduler",
"loop",
"turn"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/schedulerdaemon.py#L238-L282
|
train
|
Alignak-monitoring/alignak
|
alignak/daemons/schedulerdaemon.py
|
Alignak.get_managed_configurations
|
def get_managed_configurations(self):
"""Get the configurations managed by this scheduler
The configuration managed by a scheduler is the self configuration got
by the scheduler during the dispatching.
:return: a dict of scheduler links with instance_id as key and
hash, push_flavor and configuration identifier as values
:rtype: dict
"""
# for scheduler_link in list(self.schedulers.values()):
# res[scheduler_link.instance_id] = {
# 'hash': scheduler_link.hash,
# 'push_flavor': scheduler_link.push_flavor,
# 'managed_conf_id': scheduler_link.managed_conf_id
# }
res = {}
if self.sched.pushed_conf and self.cur_conf and 'instance_id' in self.cur_conf:
res[self.cur_conf['instance_id']] = {
'hash': self.cur_conf['hash'],
'push_flavor': self.cur_conf['push_flavor'],
'managed_conf_id': self.cur_conf['managed_conf_id']
}
logger.debug("Get managed configuration: %s", res)
return res
|
python
|
def get_managed_configurations(self):
"""Get the configurations managed by this scheduler
The configuration managed by a scheduler is the self configuration got
by the scheduler during the dispatching.
:return: a dict of scheduler links with instance_id as key and
hash, push_flavor and configuration identifier as values
:rtype: dict
"""
# for scheduler_link in list(self.schedulers.values()):
# res[scheduler_link.instance_id] = {
# 'hash': scheduler_link.hash,
# 'push_flavor': scheduler_link.push_flavor,
# 'managed_conf_id': scheduler_link.managed_conf_id
# }
res = {}
if self.sched.pushed_conf and self.cur_conf and 'instance_id' in self.cur_conf:
res[self.cur_conf['instance_id']] = {
'hash': self.cur_conf['hash'],
'push_flavor': self.cur_conf['push_flavor'],
'managed_conf_id': self.cur_conf['managed_conf_id']
}
logger.debug("Get managed configuration: %s", res)
return res
|
[
"def",
"get_managed_configurations",
"(",
"self",
")",
":",
"# for scheduler_link in list(self.schedulers.values()):",
"# res[scheduler_link.instance_id] = {",
"# 'hash': scheduler_link.hash,",
"# 'push_flavor': scheduler_link.push_flavor,",
"# 'managed_conf_id': scheduler_link.managed_conf_id",
"# }",
"res",
"=",
"{",
"}",
"if",
"self",
".",
"sched",
".",
"pushed_conf",
"and",
"self",
".",
"cur_conf",
"and",
"'instance_id'",
"in",
"self",
".",
"cur_conf",
":",
"res",
"[",
"self",
".",
"cur_conf",
"[",
"'instance_id'",
"]",
"]",
"=",
"{",
"'hash'",
":",
"self",
".",
"cur_conf",
"[",
"'hash'",
"]",
",",
"'push_flavor'",
":",
"self",
".",
"cur_conf",
"[",
"'push_flavor'",
"]",
",",
"'managed_conf_id'",
":",
"self",
".",
"cur_conf",
"[",
"'managed_conf_id'",
"]",
"}",
"logger",
".",
"debug",
"(",
"\"Get managed configuration: %s\"",
",",
"res",
")",
"return",
"res"
] |
Get the configurations managed by this scheduler
The configuration managed by a scheduler is the self configuration got
by the scheduler during the dispatching.
:return: a dict of scheduler links with instance_id as key and
hash, push_flavor and configuration identifier as values
:rtype: dict
|
[
"Get",
"the",
"configurations",
"managed",
"by",
"this",
"scheduler"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/schedulerdaemon.py#L284-L309
|
train
|
Alignak-monitoring/alignak
|
alignak/daemons/schedulerdaemon.py
|
Alignak.clean_previous_run
|
def clean_previous_run(self):
"""Clean variables from previous configuration
:return: None
"""
# Execute the base class treatment...
super(Alignak, self).clean_previous_run()
# Clean all lists
self.pollers.clear()
self.reactionners.clear()
self.brokers.clear()
|
python
|
def clean_previous_run(self):
"""Clean variables from previous configuration
:return: None
"""
# Execute the base class treatment...
super(Alignak, self).clean_previous_run()
# Clean all lists
self.pollers.clear()
self.reactionners.clear()
self.brokers.clear()
|
[
"def",
"clean_previous_run",
"(",
"self",
")",
":",
"# Execute the base class treatment...",
"super",
"(",
"Alignak",
",",
"self",
")",
".",
"clean_previous_run",
"(",
")",
"# Clean all lists",
"self",
".",
"pollers",
".",
"clear",
"(",
")",
"self",
".",
"reactionners",
".",
"clear",
"(",
")",
"self",
".",
"brokers",
".",
"clear",
"(",
")"
] |
Clean variables from previous configuration
:return: None
|
[
"Clean",
"variables",
"from",
"previous",
"configuration"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/schedulerdaemon.py#L503-L514
|
train
|
Alignak-monitoring/alignak
|
alignak/daemons/schedulerdaemon.py
|
Alignak.get_monitoring_problems
|
def get_monitoring_problems(self):
"""Get the current scheduler livesynthesis
:return: live synthesis and problems dictionary
:rtype: dict
"""
res = {}
if not self.sched:
return res
# Get statistics from the scheduler
scheduler_stats = self.sched.get_scheduler_stats(details=True)
if 'livesynthesis' in scheduler_stats:
res['livesynthesis'] = scheduler_stats['livesynthesis']
if 'problems' in scheduler_stats:
res['problems'] = scheduler_stats['problems']
return res
|
python
|
def get_monitoring_problems(self):
"""Get the current scheduler livesynthesis
:return: live synthesis and problems dictionary
:rtype: dict
"""
res = {}
if not self.sched:
return res
# Get statistics from the scheduler
scheduler_stats = self.sched.get_scheduler_stats(details=True)
if 'livesynthesis' in scheduler_stats:
res['livesynthesis'] = scheduler_stats['livesynthesis']
if 'problems' in scheduler_stats:
res['problems'] = scheduler_stats['problems']
return res
|
[
"def",
"get_monitoring_problems",
"(",
"self",
")",
":",
"res",
"=",
"{",
"}",
"if",
"not",
"self",
".",
"sched",
":",
"return",
"res",
"# Get statistics from the scheduler",
"scheduler_stats",
"=",
"self",
".",
"sched",
".",
"get_scheduler_stats",
"(",
"details",
"=",
"True",
")",
"if",
"'livesynthesis'",
"in",
"scheduler_stats",
":",
"res",
"[",
"'livesynthesis'",
"]",
"=",
"scheduler_stats",
"[",
"'livesynthesis'",
"]",
"if",
"'problems'",
"in",
"scheduler_stats",
":",
"res",
"[",
"'problems'",
"]",
"=",
"scheduler_stats",
"[",
"'problems'",
"]",
"return",
"res"
] |
Get the current scheduler livesynthesis
:return: live synthesis and problems dictionary
:rtype: dict
|
[
"Get",
"the",
"current",
"scheduler",
"livesynthesis"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/schedulerdaemon.py#L553-L570
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/serviceextinfo.py
|
ServicesExtInfo.merge_extinfo
|
def merge_extinfo(service, extinfo):
"""Merge extended host information into a service
:param service: the service to edit
:type service: alignak.objects.service.Service
:param extinfo: the external info we get data from
:type extinfo: alignak.objects.serviceextinfo.ServiceExtInfo
:return: None
"""
properties = ['notes', 'notes_url', 'icon_image', 'icon_image_alt']
# service properties have precedence over serviceextinfo properties
for prop in properties:
if getattr(service, prop) == '' and getattr(extinfo, prop) != '':
setattr(service, prop, getattr(extinfo, prop))
|
python
|
def merge_extinfo(service, extinfo):
"""Merge extended host information into a service
:param service: the service to edit
:type service: alignak.objects.service.Service
:param extinfo: the external info we get data from
:type extinfo: alignak.objects.serviceextinfo.ServiceExtInfo
:return: None
"""
properties = ['notes', 'notes_url', 'icon_image', 'icon_image_alt']
# service properties have precedence over serviceextinfo properties
for prop in properties:
if getattr(service, prop) == '' and getattr(extinfo, prop) != '':
setattr(service, prop, getattr(extinfo, prop))
|
[
"def",
"merge_extinfo",
"(",
"service",
",",
"extinfo",
")",
":",
"properties",
"=",
"[",
"'notes'",
",",
"'notes_url'",
",",
"'icon_image'",
",",
"'icon_image_alt'",
"]",
"# service properties have precedence over serviceextinfo properties",
"for",
"prop",
"in",
"properties",
":",
"if",
"getattr",
"(",
"service",
",",
"prop",
")",
"==",
"''",
"and",
"getattr",
"(",
"extinfo",
",",
"prop",
")",
"!=",
"''",
":",
"setattr",
"(",
"service",
",",
"prop",
",",
"getattr",
"(",
"extinfo",
",",
"prop",
")",
")"
] |
Merge extended host information into a service
:param service: the service to edit
:type service: alignak.objects.service.Service
:param extinfo: the external info we get data from
:type extinfo: alignak.objects.serviceextinfo.ServiceExtInfo
:return: None
|
[
"Merge",
"extended",
"host",
"information",
"into",
"a",
"service"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/serviceextinfo.py#L144-L157
|
train
|
Alignak-monitoring/alignak
|
alignak/commandcall.py
|
CommandCall.get_command_and_args
|
def get_command_and_args(self):
r"""We want to get the command and the args with ! splitting.
but don't forget to protect against the \! to avoid splitting on them
Remember: A Nagios-like command is command_name!arg1!arg2!...
:return: None
"""
# First protect
p_call = self.call.replace(r'\!', '___PROTECT_EXCLAMATION___')
tab = p_call.split('!')
return tab[0].strip(), [s.replace('___PROTECT_EXCLAMATION___', '!') for s in tab[1:]]
|
python
|
def get_command_and_args(self):
r"""We want to get the command and the args with ! splitting.
but don't forget to protect against the \! to avoid splitting on them
Remember: A Nagios-like command is command_name!arg1!arg2!...
:return: None
"""
# First protect
p_call = self.call.replace(r'\!', '___PROTECT_EXCLAMATION___')
tab = p_call.split('!')
return tab[0].strip(), [s.replace('___PROTECT_EXCLAMATION___', '!') for s in tab[1:]]
|
[
"def",
"get_command_and_args",
"(",
"self",
")",
":",
"# First protect",
"p_call",
"=",
"self",
".",
"call",
".",
"replace",
"(",
"r'\\!'",
",",
"'___PROTECT_EXCLAMATION___'",
")",
"tab",
"=",
"p_call",
".",
"split",
"(",
"'!'",
")",
"return",
"tab",
"[",
"0",
"]",
".",
"strip",
"(",
")",
",",
"[",
"s",
".",
"replace",
"(",
"'___PROTECT_EXCLAMATION___'",
",",
"'!'",
")",
"for",
"s",
"in",
"tab",
"[",
"1",
":",
"]",
"]"
] |
r"""We want to get the command and the args with ! splitting.
but don't forget to protect against the \! to avoid splitting on them
Remember: A Nagios-like command is command_name!arg1!arg2!...
:return: None
|
[
"r",
"We",
"want",
"to",
"get",
"the",
"command",
"and",
"the",
"args",
"with",
"!",
"splitting",
".",
"but",
"don",
"t",
"forget",
"to",
"protect",
"against",
"the",
"\\",
"!",
"to",
"avoid",
"splitting",
"on",
"them"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/commandcall.py#L157-L169
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/satellitelink.py
|
SatelliteLink.get_a_satellite_link
|
def get_a_satellite_link(sat_type, sat_dict):
"""Get a SatelliteLink object for a given satellite type and a dictionary
:param sat_type: type of satellite
:param sat_dict: satellite configuration data
:return:
"""
cls = get_alignak_class('alignak.objects.%slink.%sLink' % (sat_type, sat_type.capitalize()))
return cls(params=sat_dict, parsing=False)
|
python
|
def get_a_satellite_link(sat_type, sat_dict):
"""Get a SatelliteLink object for a given satellite type and a dictionary
:param sat_type: type of satellite
:param sat_dict: satellite configuration data
:return:
"""
cls = get_alignak_class('alignak.objects.%slink.%sLink' % (sat_type, sat_type.capitalize()))
return cls(params=sat_dict, parsing=False)
|
[
"def",
"get_a_satellite_link",
"(",
"sat_type",
",",
"sat_dict",
")",
":",
"cls",
"=",
"get_alignak_class",
"(",
"'alignak.objects.%slink.%sLink'",
"%",
"(",
"sat_type",
",",
"sat_type",
".",
"capitalize",
"(",
")",
")",
")",
"return",
"cls",
"(",
"params",
"=",
"sat_dict",
",",
"parsing",
"=",
"False",
")"
] |
Get a SatelliteLink object for a given satellite type and a dictionary
:param sat_type: type of satellite
:param sat_dict: satellite configuration data
:return:
|
[
"Get",
"a",
"SatelliteLink",
"object",
"for",
"a",
"given",
"satellite",
"type",
"and",
"a",
"dictionary"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L306-L314
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/satellitelink.py
|
SatelliteLink.get_livestate
|
def get_livestate(self):
"""Get the SatelliteLink live state.
The live state is a tuple information containing a state identifier and a message, where:
state is:
- 0 for an up and running satellite
- 1 if the satellite is not reachale
- 2 if the satellite is dead
- 3 else (not active)
:return: tuple
"""
livestate = 0
if self.active:
if not self.reachable:
livestate = 1
elif not self.alive:
livestate = 2
else:
livestate = 3
livestate_output = "%s/%s is %s" % (self.type, self.name, [
"up and running.",
"warning because not reachable.",
"critical because not responding.",
"not active by configuration."
][livestate])
return (livestate, livestate_output)
|
python
|
def get_livestate(self):
"""Get the SatelliteLink live state.
The live state is a tuple information containing a state identifier and a message, where:
state is:
- 0 for an up and running satellite
- 1 if the satellite is not reachale
- 2 if the satellite is dead
- 3 else (not active)
:return: tuple
"""
livestate = 0
if self.active:
if not self.reachable:
livestate = 1
elif not self.alive:
livestate = 2
else:
livestate = 3
livestate_output = "%s/%s is %s" % (self.type, self.name, [
"up and running.",
"warning because not reachable.",
"critical because not responding.",
"not active by configuration."
][livestate])
return (livestate, livestate_output)
|
[
"def",
"get_livestate",
"(",
"self",
")",
":",
"livestate",
"=",
"0",
"if",
"self",
".",
"active",
":",
"if",
"not",
"self",
".",
"reachable",
":",
"livestate",
"=",
"1",
"elif",
"not",
"self",
".",
"alive",
":",
"livestate",
"=",
"2",
"else",
":",
"livestate",
"=",
"3",
"livestate_output",
"=",
"\"%s/%s is %s\"",
"%",
"(",
"self",
".",
"type",
",",
"self",
".",
"name",
",",
"[",
"\"up and running.\"",
",",
"\"warning because not reachable.\"",
",",
"\"critical because not responding.\"",
",",
"\"not active by configuration.\"",
"]",
"[",
"livestate",
"]",
")",
"return",
"(",
"livestate",
",",
"livestate_output",
")"
] |
Get the SatelliteLink live state.
The live state is a tuple information containing a state identifier and a message, where:
state is:
- 0 for an up and running satellite
- 1 if the satellite is not reachale
- 2 if the satellite is dead
- 3 else (not active)
:return: tuple
|
[
"Get",
"the",
"SatelliteLink",
"live",
"state",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L316-L344
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/satellitelink.py
|
SatelliteLink.get_and_clear_context
|
def get_and_clear_context(self):
"""Get and clean all of our broks, actions, external commands and homerun
:return: list of all broks of the satellite link
:rtype: list
"""
res = (self.broks, self.actions, self.wait_homerun, self.pushed_commands)
self.broks = []
self.actions = {}
self.wait_homerun = {}
self.pushed_commands = []
return res
|
python
|
def get_and_clear_context(self):
"""Get and clean all of our broks, actions, external commands and homerun
:return: list of all broks of the satellite link
:rtype: list
"""
res = (self.broks, self.actions, self.wait_homerun, self.pushed_commands)
self.broks = []
self.actions = {}
self.wait_homerun = {}
self.pushed_commands = []
return res
|
[
"def",
"get_and_clear_context",
"(",
"self",
")",
":",
"res",
"=",
"(",
"self",
".",
"broks",
",",
"self",
".",
"actions",
",",
"self",
".",
"wait_homerun",
",",
"self",
".",
"pushed_commands",
")",
"self",
".",
"broks",
"=",
"[",
"]",
"self",
".",
"actions",
"=",
"{",
"}",
"self",
".",
"wait_homerun",
"=",
"{",
"}",
"self",
".",
"pushed_commands",
"=",
"[",
"]",
"return",
"res"
] |
Get and clean all of our broks, actions, external commands and homerun
:return: list of all broks of the satellite link
:rtype: list
|
[
"Get",
"and",
"clean",
"all",
"of",
"our",
"broks",
"actions",
"external",
"commands",
"and",
"homerun"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L363-L374
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/satellitelink.py
|
SatelliteLink.prepare_for_conf
|
def prepare_for_conf(self):
"""Initialize the pushed configuration dictionary
with the inner properties that are to be propagated to the satellite link.
:return: None
"""
logger.debug("- preparing: %s", self)
self.cfg = {
'self_conf': self.give_satellite_cfg(),
'schedulers': {},
'arbiters': {}
}
logger.debug("- prepared: %s", self.cfg)
|
python
|
def prepare_for_conf(self):
"""Initialize the pushed configuration dictionary
with the inner properties that are to be propagated to the satellite link.
:return: None
"""
logger.debug("- preparing: %s", self)
self.cfg = {
'self_conf': self.give_satellite_cfg(),
'schedulers': {},
'arbiters': {}
}
logger.debug("- prepared: %s", self.cfg)
|
[
"def",
"prepare_for_conf",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"- preparing: %s\"",
",",
"self",
")",
"self",
".",
"cfg",
"=",
"{",
"'self_conf'",
":",
"self",
".",
"give_satellite_cfg",
"(",
")",
",",
"'schedulers'",
":",
"{",
"}",
",",
"'arbiters'",
":",
"{",
"}",
"}",
"logger",
".",
"debug",
"(",
"\"- prepared: %s\"",
",",
"self",
".",
"cfg",
")"
] |
Initialize the pushed configuration dictionary
with the inner properties that are to be propagated to the satellite link.
:return: None
|
[
"Initialize",
"the",
"pushed",
"configuration",
"dictionary",
"with",
"the",
"inner",
"properties",
"that",
"are",
"to",
"be",
"propagated",
"to",
"the",
"satellite",
"link",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L386-L398
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/satellitelink.py
|
SatelliteLink.give_satellite_cfg
|
def give_satellite_cfg(self):
"""Get the default information for a satellite.
Overridden by the specific satellites links
:return: dictionary of information common to all the links
:rtype: dict
"""
# All the satellite link class properties that are 'to_send' are stored in a
# dictionary to be pushed to the satellite when the configuration is dispatched
res = {}
properties = self.__class__.properties
for prop, entry in list(properties.items()):
if hasattr(self, prop) and entry.to_send:
res[prop] = getattr(self, prop)
return res
|
python
|
def give_satellite_cfg(self):
"""Get the default information for a satellite.
Overridden by the specific satellites links
:return: dictionary of information common to all the links
:rtype: dict
"""
# All the satellite link class properties that are 'to_send' are stored in a
# dictionary to be pushed to the satellite when the configuration is dispatched
res = {}
properties = self.__class__.properties
for prop, entry in list(properties.items()):
if hasattr(self, prop) and entry.to_send:
res[prop] = getattr(self, prop)
return res
|
[
"def",
"give_satellite_cfg",
"(",
"self",
")",
":",
"# All the satellite link class properties that are 'to_send' are stored in a",
"# dictionary to be pushed to the satellite when the configuration is dispatched",
"res",
"=",
"{",
"}",
"properties",
"=",
"self",
".",
"__class__",
".",
"properties",
"for",
"prop",
",",
"entry",
"in",
"list",
"(",
"properties",
".",
"items",
"(",
")",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"prop",
")",
"and",
"entry",
".",
"to_send",
":",
"res",
"[",
"prop",
"]",
"=",
"getattr",
"(",
"self",
",",
"prop",
")",
"return",
"res"
] |
Get the default information for a satellite.
Overridden by the specific satellites links
:return: dictionary of information common to all the links
:rtype: dict
|
[
"Get",
"the",
"default",
"information",
"for",
"a",
"satellite",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L400-L415
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/satellitelink.py
|
SatelliteLink.give_satellite_json
|
def give_satellite_json(self):
"""Get the json information for a satellite.
This to provide information that will be exposed by a daemon on its HTTP interface.
:return: dictionary of information common to all the links
:rtype: dict
"""
daemon_properties = ['type', 'name', 'uri', 'spare', 'configuration_sent',
'realm_name', 'manage_sub_realms',
'active', 'reachable', 'alive', 'passive',
'last_check', 'polling_interval', 'max_check_attempts']
(livestate, livestate_output) = self.get_livestate()
res = {
"livestate": livestate,
"livestate_output": livestate_output
}
for sat_prop in daemon_properties:
res[sat_prop] = getattr(self, sat_prop, 'not_yet_defined')
return res
|
python
|
def give_satellite_json(self):
"""Get the json information for a satellite.
This to provide information that will be exposed by a daemon on its HTTP interface.
:return: dictionary of information common to all the links
:rtype: dict
"""
daemon_properties = ['type', 'name', 'uri', 'spare', 'configuration_sent',
'realm_name', 'manage_sub_realms',
'active', 'reachable', 'alive', 'passive',
'last_check', 'polling_interval', 'max_check_attempts']
(livestate, livestate_output) = self.get_livestate()
res = {
"livestate": livestate,
"livestate_output": livestate_output
}
for sat_prop in daemon_properties:
res[sat_prop] = getattr(self, sat_prop, 'not_yet_defined')
return res
|
[
"def",
"give_satellite_json",
"(",
"self",
")",
":",
"daemon_properties",
"=",
"[",
"'type'",
",",
"'name'",
",",
"'uri'",
",",
"'spare'",
",",
"'configuration_sent'",
",",
"'realm_name'",
",",
"'manage_sub_realms'",
",",
"'active'",
",",
"'reachable'",
",",
"'alive'",
",",
"'passive'",
",",
"'last_check'",
",",
"'polling_interval'",
",",
"'max_check_attempts'",
"]",
"(",
"livestate",
",",
"livestate_output",
")",
"=",
"self",
".",
"get_livestate",
"(",
")",
"res",
"=",
"{",
"\"livestate\"",
":",
"livestate",
",",
"\"livestate_output\"",
":",
"livestate_output",
"}",
"for",
"sat_prop",
"in",
"daemon_properties",
":",
"res",
"[",
"sat_prop",
"]",
"=",
"getattr",
"(",
"self",
",",
"sat_prop",
",",
"'not_yet_defined'",
")",
"return",
"res"
] |
Get the json information for a satellite.
This to provide information that will be exposed by a daemon on its HTTP interface.
:return: dictionary of information common to all the links
:rtype: dict
|
[
"Get",
"the",
"json",
"information",
"for",
"a",
"satellite",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L417-L437
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/satellitelink.py
|
SatelliteLink.manages
|
def manages(self, cfg_part):
"""Tell if the satellite is managing this configuration part
The managed configuration is formed as a dictionary indexed on the link instance_id:
{
u'SchedulerLink_1': {
u'hash': u'4d08630a3483e1eac7898e7a721bd5d7768c8320',
u'push_flavor': u'4d08630a3483e1eac7898e7a721bd5d7768c8320',
u'managed_conf_id': [u'Config_1']
}
}
Note that the managed configuration is a string array rather than a simple string...
no special for this reason, probably due to the serialization when the configuration is
pushed :/
:param cfg_part: configuration part as prepare by the Dispatcher
:type cfg_part: Conf
:return: True if the satellite manages this configuration
:rtype: bool
"""
logger.debug("Do I (%s/%s) manage: %s, my managed configuration(s): %s",
self.type, self.name, cfg_part, self.cfg_managed)
# If we do not yet manage a configuration
if not self.cfg_managed:
logger.info("I (%s/%s) do not manage (yet) any configuration!", self.type, self.name)
return False
# Check in the schedulers list configurations
for managed_cfg in list(self.cfg_managed.values()):
# If not even the cfg_id in the managed_conf, bail out
if managed_cfg['managed_conf_id'] == cfg_part.instance_id \
and managed_cfg['push_flavor'] == cfg_part.push_flavor:
logger.debug("I do manage this configuration: %s", cfg_part)
break
else:
logger.warning("I (%s/%s) do not manage this configuration: %s",
self.type, self.name, cfg_part)
return False
return True
|
python
|
def manages(self, cfg_part):
"""Tell if the satellite is managing this configuration part
The managed configuration is formed as a dictionary indexed on the link instance_id:
{
u'SchedulerLink_1': {
u'hash': u'4d08630a3483e1eac7898e7a721bd5d7768c8320',
u'push_flavor': u'4d08630a3483e1eac7898e7a721bd5d7768c8320',
u'managed_conf_id': [u'Config_1']
}
}
Note that the managed configuration is a string array rather than a simple string...
no special for this reason, probably due to the serialization when the configuration is
pushed :/
:param cfg_part: configuration part as prepare by the Dispatcher
:type cfg_part: Conf
:return: True if the satellite manages this configuration
:rtype: bool
"""
logger.debug("Do I (%s/%s) manage: %s, my managed configuration(s): %s",
self.type, self.name, cfg_part, self.cfg_managed)
# If we do not yet manage a configuration
if not self.cfg_managed:
logger.info("I (%s/%s) do not manage (yet) any configuration!", self.type, self.name)
return False
# Check in the schedulers list configurations
for managed_cfg in list(self.cfg_managed.values()):
# If not even the cfg_id in the managed_conf, bail out
if managed_cfg['managed_conf_id'] == cfg_part.instance_id \
and managed_cfg['push_flavor'] == cfg_part.push_flavor:
logger.debug("I do manage this configuration: %s", cfg_part)
break
else:
logger.warning("I (%s/%s) do not manage this configuration: %s",
self.type, self.name, cfg_part)
return False
return True
|
[
"def",
"manages",
"(",
"self",
",",
"cfg_part",
")",
":",
"logger",
".",
"debug",
"(",
"\"Do I (%s/%s) manage: %s, my managed configuration(s): %s\"",
",",
"self",
".",
"type",
",",
"self",
".",
"name",
",",
"cfg_part",
",",
"self",
".",
"cfg_managed",
")",
"# If we do not yet manage a configuration",
"if",
"not",
"self",
".",
"cfg_managed",
":",
"logger",
".",
"info",
"(",
"\"I (%s/%s) do not manage (yet) any configuration!\"",
",",
"self",
".",
"type",
",",
"self",
".",
"name",
")",
"return",
"False",
"# Check in the schedulers list configurations",
"for",
"managed_cfg",
"in",
"list",
"(",
"self",
".",
"cfg_managed",
".",
"values",
"(",
")",
")",
":",
"# If not even the cfg_id in the managed_conf, bail out",
"if",
"managed_cfg",
"[",
"'managed_conf_id'",
"]",
"==",
"cfg_part",
".",
"instance_id",
"and",
"managed_cfg",
"[",
"'push_flavor'",
"]",
"==",
"cfg_part",
".",
"push_flavor",
":",
"logger",
".",
"debug",
"(",
"\"I do manage this configuration: %s\"",
",",
"cfg_part",
")",
"break",
"else",
":",
"logger",
".",
"warning",
"(",
"\"I (%s/%s) do not manage this configuration: %s\"",
",",
"self",
".",
"type",
",",
"self",
".",
"name",
",",
"cfg_part",
")",
"return",
"False",
"return",
"True"
] |
Tell if the satellite is managing this configuration part
The managed configuration is formed as a dictionary indexed on the link instance_id:
{
u'SchedulerLink_1': {
u'hash': u'4d08630a3483e1eac7898e7a721bd5d7768c8320',
u'push_flavor': u'4d08630a3483e1eac7898e7a721bd5d7768c8320',
u'managed_conf_id': [u'Config_1']
}
}
Note that the managed configuration is a string array rather than a simple string...
no special for this reason, probably due to the serialization when the configuration is
pushed :/
:param cfg_part: configuration part as prepare by the Dispatcher
:type cfg_part: Conf
:return: True if the satellite manages this configuration
:rtype: bool
|
[
"Tell",
"if",
"the",
"satellite",
"is",
"managing",
"this",
"configuration",
"part"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L439-L480
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/satellitelink.py
|
SatelliteLink.set_alive
|
def set_alive(self):
"""Set alive, reachable, and reset attempts.
If we change state, raise a status brok update
alive, means the daemon is prenset in the system
reachable, means that the HTTP connection is valid
With this function we confirm that the daemon is reachable and, thus, we assume it is alive!
:return: None
"""
was_alive = self.alive
self.alive = True
self.reachable = True
self.attempt = 0
# We came from dead to alive! We must propagate the good news
if not was_alive:
logger.info("Setting %s satellite as alive :)", self.name)
self.broks.append(self.get_update_status_brok())
|
python
|
def set_alive(self):
"""Set alive, reachable, and reset attempts.
If we change state, raise a status brok update
alive, means the daemon is prenset in the system
reachable, means that the HTTP connection is valid
With this function we confirm that the daemon is reachable and, thus, we assume it is alive!
:return: None
"""
was_alive = self.alive
self.alive = True
self.reachable = True
self.attempt = 0
# We came from dead to alive! We must propagate the good news
if not was_alive:
logger.info("Setting %s satellite as alive :)", self.name)
self.broks.append(self.get_update_status_brok())
|
[
"def",
"set_alive",
"(",
"self",
")",
":",
"was_alive",
"=",
"self",
".",
"alive",
"self",
".",
"alive",
"=",
"True",
"self",
".",
"reachable",
"=",
"True",
"self",
".",
"attempt",
"=",
"0",
"# We came from dead to alive! We must propagate the good news",
"if",
"not",
"was_alive",
":",
"logger",
".",
"info",
"(",
"\"Setting %s satellite as alive :)\"",
",",
"self",
".",
"name",
")",
"self",
".",
"broks",
".",
"append",
"(",
"self",
".",
"get_update_status_brok",
"(",
")",
")"
] |
Set alive, reachable, and reset attempts.
If we change state, raise a status brok update
alive, means the daemon is prenset in the system
reachable, means that the HTTP connection is valid
With this function we confirm that the daemon is reachable and, thus, we assume it is alive!
:return: None
|
[
"Set",
"alive",
"reachable",
"and",
"reset",
"attempts",
".",
"If",
"we",
"change",
"state",
"raise",
"a",
"status",
"brok",
"update"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L504-L523
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/satellitelink.py
|
SatelliteLink.add_failed_check_attempt
|
def add_failed_check_attempt(self, reason=''):
"""Set the daemon as unreachable and add a failed attempt
if we reach the maximum attempts, set the daemon as dead
:param reason: the reason of adding an attempts (stack trace sometimes)
:type reason: str
:return: None
"""
self.reachable = False
self.attempt = self.attempt + 1
logger.debug("Failed attempt for %s (%d/%d), reason: %s",
self.name, self.attempt, self.max_check_attempts, reason)
# Don't need to warn again and again if the satellite is already dead
# Only warn when it is alive
if self.alive:
if not self.stopping:
logger.warning("Add failed attempt for %s (%d/%d) - %s",
self.name, self.attempt, self.max_check_attempts, reason)
else:
logger.info("Stopping... failed attempt for %s (%d/%d) - also probably stopping",
self.name, self.attempt, self.max_check_attempts)
# If we reached the maximum attempts, set the daemon as dead
if self.attempt >= self.max_check_attempts:
if not self.stopping:
logger.warning("Set %s as dead, too much failed attempts (%d), last problem is: %s",
self.name, self.max_check_attempts, reason)
else:
logger.info("Stopping... set %s as dead, too much failed attempts (%d)",
self.name, self.max_check_attempts)
self.set_dead()
|
python
|
def add_failed_check_attempt(self, reason=''):
"""Set the daemon as unreachable and add a failed attempt
if we reach the maximum attempts, set the daemon as dead
:param reason: the reason of adding an attempts (stack trace sometimes)
:type reason: str
:return: None
"""
self.reachable = False
self.attempt = self.attempt + 1
logger.debug("Failed attempt for %s (%d/%d), reason: %s",
self.name, self.attempt, self.max_check_attempts, reason)
# Don't need to warn again and again if the satellite is already dead
# Only warn when it is alive
if self.alive:
if not self.stopping:
logger.warning("Add failed attempt for %s (%d/%d) - %s",
self.name, self.attempt, self.max_check_attempts, reason)
else:
logger.info("Stopping... failed attempt for %s (%d/%d) - also probably stopping",
self.name, self.attempt, self.max_check_attempts)
# If we reached the maximum attempts, set the daemon as dead
if self.attempt >= self.max_check_attempts:
if not self.stopping:
logger.warning("Set %s as dead, too much failed attempts (%d), last problem is: %s",
self.name, self.max_check_attempts, reason)
else:
logger.info("Stopping... set %s as dead, too much failed attempts (%d)",
self.name, self.max_check_attempts)
self.set_dead()
|
[
"def",
"add_failed_check_attempt",
"(",
"self",
",",
"reason",
"=",
"''",
")",
":",
"self",
".",
"reachable",
"=",
"False",
"self",
".",
"attempt",
"=",
"self",
".",
"attempt",
"+",
"1",
"logger",
".",
"debug",
"(",
"\"Failed attempt for %s (%d/%d), reason: %s\"",
",",
"self",
".",
"name",
",",
"self",
".",
"attempt",
",",
"self",
".",
"max_check_attempts",
",",
"reason",
")",
"# Don't need to warn again and again if the satellite is already dead",
"# Only warn when it is alive",
"if",
"self",
".",
"alive",
":",
"if",
"not",
"self",
".",
"stopping",
":",
"logger",
".",
"warning",
"(",
"\"Add failed attempt for %s (%d/%d) - %s\"",
",",
"self",
".",
"name",
",",
"self",
".",
"attempt",
",",
"self",
".",
"max_check_attempts",
",",
"reason",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Stopping... failed attempt for %s (%d/%d) - also probably stopping\"",
",",
"self",
".",
"name",
",",
"self",
".",
"attempt",
",",
"self",
".",
"max_check_attempts",
")",
"# If we reached the maximum attempts, set the daemon as dead",
"if",
"self",
".",
"attempt",
">=",
"self",
".",
"max_check_attempts",
":",
"if",
"not",
"self",
".",
"stopping",
":",
"logger",
".",
"warning",
"(",
"\"Set %s as dead, too much failed attempts (%d), last problem is: %s\"",
",",
"self",
".",
"name",
",",
"self",
".",
"max_check_attempts",
",",
"reason",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Stopping... set %s as dead, too much failed attempts (%d)\"",
",",
"self",
".",
"name",
",",
"self",
".",
"max_check_attempts",
")",
"self",
".",
"set_dead",
"(",
")"
] |
Set the daemon as unreachable and add a failed attempt
if we reach the maximum attempts, set the daemon as dead
:param reason: the reason of adding an attempts (stack trace sometimes)
:type reason: str
:return: None
|
[
"Set",
"the",
"daemon",
"as",
"unreachable",
"and",
"add",
"a",
"failed",
"attempt",
"if",
"we",
"reach",
"the",
"maximum",
"attempts",
"set",
"the",
"daemon",
"as",
"dead"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L543-L575
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/satellitelink.py
|
SatelliteLink.valid_connection
|
def valid_connection(*outer_args, **outer_kwargs):
# pylint: disable=unused-argument, no-method-argument
"""Check if the daemon connection is established and valid"""
def decorator(func): # pylint: disable=missing-docstring
def decorated(*args, **kwargs): # pylint: disable=missing-docstring
# outer_args and outer_kwargs are the decorator arguments
# args and kwargs are the decorated function arguments
link = args[0]
if not link.con:
raise LinkError("The connection is not created for %s" % link.name)
if not link.running_id:
raise LinkError("The connection is not initialized for %s" % link.name)
return func(*args, **kwargs)
return decorated
return decorator
|
python
|
def valid_connection(*outer_args, **outer_kwargs):
# pylint: disable=unused-argument, no-method-argument
"""Check if the daemon connection is established and valid"""
def decorator(func): # pylint: disable=missing-docstring
def decorated(*args, **kwargs): # pylint: disable=missing-docstring
# outer_args and outer_kwargs are the decorator arguments
# args and kwargs are the decorated function arguments
link = args[0]
if not link.con:
raise LinkError("The connection is not created for %s" % link.name)
if not link.running_id:
raise LinkError("The connection is not initialized for %s" % link.name)
return func(*args, **kwargs)
return decorated
return decorator
|
[
"def",
"valid_connection",
"(",
"*",
"outer_args",
",",
"*",
"*",
"outer_kwargs",
")",
":",
"# pylint: disable=unused-argument, no-method-argument",
"def",
"decorator",
"(",
"func",
")",
":",
"# pylint: disable=missing-docstring",
"def",
"decorated",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=missing-docstring",
"# outer_args and outer_kwargs are the decorator arguments",
"# args and kwargs are the decorated function arguments",
"link",
"=",
"args",
"[",
"0",
"]",
"if",
"not",
"link",
".",
"con",
":",
"raise",
"LinkError",
"(",
"\"The connection is not created for %s\"",
"%",
"link",
".",
"name",
")",
"if",
"not",
"link",
".",
"running_id",
":",
"raise",
"LinkError",
"(",
"\"The connection is not initialized for %s\"",
"%",
"link",
".",
"name",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"decorated",
"return",
"decorator"
] |
Check if the daemon connection is established and valid
|
[
"Check",
"if",
"the",
"daemon",
"connection",
"is",
"established",
"and",
"valid"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L577-L592
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/satellitelink.py
|
SatelliteLink.communicate
|
def communicate(*outer_args, **outer_kwargs):
# pylint: disable=unused-argument, no-method-argument
"""Check if the daemon connection is authorized and valid"""
def decorator(func): # pylint: disable=missing-docstring
def decorated(*args, **kwargs): # pylint: disable=missing-docstring
# outer_args and outer_kwargs are the decorator arguments
# args and kwargs are the decorated function arguments
fn_name = func.__name__
link = args[0]
if not link.alive:
logger.warning("%s is not alive for %s", link.name, fn_name)
return None
try:
if not link.reachable:
raise LinkError("The %s %s is not reachable" % (link.type, link.name))
logger.debug("[%s] Calling: %s, %s, %s", link.name, fn_name, args, kwargs)
return func(*args, **kwargs)
except HTTPClientConnectionException as exp:
# A Connection error is raised when the daemon connection cannot be established
# No way with the configuration parameters!
if not link.stopping:
logger.warning("A daemon (%s/%s) that we must be related with "
"cannot be connected: %s", link.type, link.name, exp)
else:
logger.info("Stopping... daemon (%s/%s) cannot be connected. "
"It is also probably stopping or yet stopped.",
link.type, link.name)
link.set_dead()
except (LinkError, HTTPClientTimeoutException) as exp:
link.add_failed_check_attempt("Connection timeout "
"with '%s': %s" % (fn_name, str(exp)))
return False
except HTTPClientDataException as exp:
# A Data error is raised when the daemon HTTP reponse is not 200!
# No way with the communication if some problems exist in the daemon interface!
# Abort all
err = "Some daemons that we must be related with " \
"have some interface problems. Sorry, I bail out"
logger.error(err)
os.sys.exit(err)
except HTTPClientException as exp:
link.add_failed_check_attempt("Error with '%s': %s" % (fn_name, str(exp)))
return None
return decorated
return decorator
|
python
|
def communicate(*outer_args, **outer_kwargs):
# pylint: disable=unused-argument, no-method-argument
"""Check if the daemon connection is authorized and valid"""
def decorator(func): # pylint: disable=missing-docstring
def decorated(*args, **kwargs): # pylint: disable=missing-docstring
# outer_args and outer_kwargs are the decorator arguments
# args and kwargs are the decorated function arguments
fn_name = func.__name__
link = args[0]
if not link.alive:
logger.warning("%s is not alive for %s", link.name, fn_name)
return None
try:
if not link.reachable:
raise LinkError("The %s %s is not reachable" % (link.type, link.name))
logger.debug("[%s] Calling: %s, %s, %s", link.name, fn_name, args, kwargs)
return func(*args, **kwargs)
except HTTPClientConnectionException as exp:
# A Connection error is raised when the daemon connection cannot be established
# No way with the configuration parameters!
if not link.stopping:
logger.warning("A daemon (%s/%s) that we must be related with "
"cannot be connected: %s", link.type, link.name, exp)
else:
logger.info("Stopping... daemon (%s/%s) cannot be connected. "
"It is also probably stopping or yet stopped.",
link.type, link.name)
link.set_dead()
except (LinkError, HTTPClientTimeoutException) as exp:
link.add_failed_check_attempt("Connection timeout "
"with '%s': %s" % (fn_name, str(exp)))
return False
except HTTPClientDataException as exp:
# A Data error is raised when the daemon HTTP reponse is not 200!
# No way with the communication if some problems exist in the daemon interface!
# Abort all
err = "Some daemons that we must be related with " \
"have some interface problems. Sorry, I bail out"
logger.error(err)
os.sys.exit(err)
except HTTPClientException as exp:
link.add_failed_check_attempt("Error with '%s': %s" % (fn_name, str(exp)))
return None
return decorated
return decorator
|
[
"def",
"communicate",
"(",
"*",
"outer_args",
",",
"*",
"*",
"outer_kwargs",
")",
":",
"# pylint: disable=unused-argument, no-method-argument",
"def",
"decorator",
"(",
"func",
")",
":",
"# pylint: disable=missing-docstring",
"def",
"decorated",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=missing-docstring",
"# outer_args and outer_kwargs are the decorator arguments",
"# args and kwargs are the decorated function arguments",
"fn_name",
"=",
"func",
".",
"__name__",
"link",
"=",
"args",
"[",
"0",
"]",
"if",
"not",
"link",
".",
"alive",
":",
"logger",
".",
"warning",
"(",
"\"%s is not alive for %s\"",
",",
"link",
".",
"name",
",",
"fn_name",
")",
"return",
"None",
"try",
":",
"if",
"not",
"link",
".",
"reachable",
":",
"raise",
"LinkError",
"(",
"\"The %s %s is not reachable\"",
"%",
"(",
"link",
".",
"type",
",",
"link",
".",
"name",
")",
")",
"logger",
".",
"debug",
"(",
"\"[%s] Calling: %s, %s, %s\"",
",",
"link",
".",
"name",
",",
"fn_name",
",",
"args",
",",
"kwargs",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"HTTPClientConnectionException",
"as",
"exp",
":",
"# A Connection error is raised when the daemon connection cannot be established",
"# No way with the configuration parameters!",
"if",
"not",
"link",
".",
"stopping",
":",
"logger",
".",
"warning",
"(",
"\"A daemon (%s/%s) that we must be related with \"",
"\"cannot be connected: %s\"",
",",
"link",
".",
"type",
",",
"link",
".",
"name",
",",
"exp",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Stopping... daemon (%s/%s) cannot be connected. \"",
"\"It is also probably stopping or yet stopped.\"",
",",
"link",
".",
"type",
",",
"link",
".",
"name",
")",
"link",
".",
"set_dead",
"(",
")",
"except",
"(",
"LinkError",
",",
"HTTPClientTimeoutException",
")",
"as",
"exp",
":",
"link",
".",
"add_failed_check_attempt",
"(",
"\"Connection timeout \"",
"\"with '%s': %s\"",
"%",
"(",
"fn_name",
",",
"str",
"(",
"exp",
")",
")",
")",
"return",
"False",
"except",
"HTTPClientDataException",
"as",
"exp",
":",
"# A Data error is raised when the daemon HTTP reponse is not 200!",
"# No way with the communication if some problems exist in the daemon interface!",
"# Abort all",
"err",
"=",
"\"Some daemons that we must be related with \"",
"\"have some interface problems. Sorry, I bail out\"",
"logger",
".",
"error",
"(",
"err",
")",
"os",
".",
"sys",
".",
"exit",
"(",
"err",
")",
"except",
"HTTPClientException",
"as",
"exp",
":",
"link",
".",
"add_failed_check_attempt",
"(",
"\"Error with '%s': %s\"",
"%",
"(",
"fn_name",
",",
"str",
"(",
"exp",
")",
")",
")",
"return",
"None",
"return",
"decorated",
"return",
"decorator"
] |
Check if the daemon connection is authorized and valid
|
[
"Check",
"if",
"the",
"daemon",
"connection",
"is",
"authorized",
"and",
"valid"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L594-L642
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/satellitelink.py
|
SatelliteLink.stop_request
|
def stop_request(self, stop_now=False):
"""Send a stop request to the daemon
:param stop_now: stop now or go to stop wait mode
:type stop_now: bool
:return: the daemon response (True)
"""
logger.debug("Sending stop request to %s, stop now: %s", self.name, stop_now)
res = self.con.get('stop_request', {'stop_now': '1' if stop_now else '0'})
return res
|
python
|
def stop_request(self, stop_now=False):
"""Send a stop request to the daemon
:param stop_now: stop now or go to stop wait mode
:type stop_now: bool
:return: the daemon response (True)
"""
logger.debug("Sending stop request to %s, stop now: %s", self.name, stop_now)
res = self.con.get('stop_request', {'stop_now': '1' if stop_now else '0'})
return res
|
[
"def",
"stop_request",
"(",
"self",
",",
"stop_now",
"=",
"False",
")",
":",
"logger",
".",
"debug",
"(",
"\"Sending stop request to %s, stop now: %s\"",
",",
"self",
".",
"name",
",",
"stop_now",
")",
"res",
"=",
"self",
".",
"con",
".",
"get",
"(",
"'stop_request'",
",",
"{",
"'stop_now'",
":",
"'1'",
"if",
"stop_now",
"else",
"'0'",
"}",
")",
"return",
"res"
] |
Send a stop request to the daemon
:param stop_now: stop now or go to stop wait mode
:type stop_now: bool
:return: the daemon response (True)
|
[
"Send",
"a",
"stop",
"request",
"to",
"the",
"daemon"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L689-L699
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/satellitelink.py
|
SatelliteLink.update_infos
|
def update_infos(self, forced=False, test=False):
"""Update satellite info each self.polling_interval seconds
so we smooth arbiter actions for just useful actions.
Raise a satellite update status Brok
If forced is True, then ignore the ping period. This is used when the configuration
has not yet been dispatched to the Arbiter satellites.
If test is True, do not really ping the daemon (useful for the unit tests only)
:param forced: ignore the ping smoothing
:type forced: bool
:param test:
:type test: bool
:return:
None if the last request is too recent,
False if a timeout was raised during the request,
else the managed configurations dictionary
"""
logger.debug("Update informations, forced: %s", forced)
# First look if it's not too early to ping
now = time.time()
if not forced and self.last_check and self.last_check + self.polling_interval > now:
logger.debug("Too early to ping %s, ping period is %ds!, last check: %d, now: %d",
self.name, self.polling_interval, self.last_check, now)
return None
self.get_conf(test=test)
# Update the daemon last check timestamp
self.last_check = time.time()
# Update the state of this element
self.broks.append(self.get_update_status_brok())
return self.cfg_managed
|
python
|
def update_infos(self, forced=False, test=False):
"""Update satellite info each self.polling_interval seconds
so we smooth arbiter actions for just useful actions.
Raise a satellite update status Brok
If forced is True, then ignore the ping period. This is used when the configuration
has not yet been dispatched to the Arbiter satellites.
If test is True, do not really ping the daemon (useful for the unit tests only)
:param forced: ignore the ping smoothing
:type forced: bool
:param test:
:type test: bool
:return:
None if the last request is too recent,
False if a timeout was raised during the request,
else the managed configurations dictionary
"""
logger.debug("Update informations, forced: %s", forced)
# First look if it's not too early to ping
now = time.time()
if not forced and self.last_check and self.last_check + self.polling_interval > now:
logger.debug("Too early to ping %s, ping period is %ds!, last check: %d, now: %d",
self.name, self.polling_interval, self.last_check, now)
return None
self.get_conf(test=test)
# Update the daemon last check timestamp
self.last_check = time.time()
# Update the state of this element
self.broks.append(self.get_update_status_brok())
return self.cfg_managed
|
[
"def",
"update_infos",
"(",
"self",
",",
"forced",
"=",
"False",
",",
"test",
"=",
"False",
")",
":",
"logger",
".",
"debug",
"(",
"\"Update informations, forced: %s\"",
",",
"forced",
")",
"# First look if it's not too early to ping",
"now",
"=",
"time",
".",
"time",
"(",
")",
"if",
"not",
"forced",
"and",
"self",
".",
"last_check",
"and",
"self",
".",
"last_check",
"+",
"self",
".",
"polling_interval",
">",
"now",
":",
"logger",
".",
"debug",
"(",
"\"Too early to ping %s, ping period is %ds!, last check: %d, now: %d\"",
",",
"self",
".",
"name",
",",
"self",
".",
"polling_interval",
",",
"self",
".",
"last_check",
",",
"now",
")",
"return",
"None",
"self",
".",
"get_conf",
"(",
"test",
"=",
"test",
")",
"# Update the daemon last check timestamp",
"self",
".",
"last_check",
"=",
"time",
".",
"time",
"(",
")",
"# Update the state of this element",
"self",
".",
"broks",
".",
"append",
"(",
"self",
".",
"get_update_status_brok",
"(",
")",
")",
"return",
"self",
".",
"cfg_managed"
] |
Update satellite info each self.polling_interval seconds
so we smooth arbiter actions for just useful actions.
Raise a satellite update status Brok
If forced is True, then ignore the ping period. This is used when the configuration
has not yet been dispatched to the Arbiter satellites.
If test is True, do not really ping the daemon (useful for the unit tests only)
:param forced: ignore the ping smoothing
:type forced: bool
:param test:
:type test: bool
:return:
None if the last request is too recent,
False if a timeout was raised during the request,
else the managed configurations dictionary
|
[
"Update",
"satellite",
"info",
"each",
"self",
".",
"polling_interval",
"seconds",
"so",
"we",
"smooth",
"arbiter",
"actions",
"for",
"just",
"useful",
"actions",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L703-L740
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/satellitelink.py
|
SatelliteLink.push_actions
|
def push_actions(self, actions, scheduler_instance_id):
"""Post the actions to execute to the satellite.
Indeed, a scheduler post its checks to a poller and its actions to a reactionner.
:param actions: Action list to send
:type actions: list
:param scheduler_instance_id: Scheduler instance identifier
:type scheduler_instance_id: uuid
:return: True on success, False on failure
:rtype: bool
"""
logger.debug("Pushing %d actions from %s", len(actions), scheduler_instance_id)
return self.con.post('_push_actions', {'actions': actions,
'scheduler_instance_id': scheduler_instance_id},
wait=True)
|
python
|
def push_actions(self, actions, scheduler_instance_id):
"""Post the actions to execute to the satellite.
Indeed, a scheduler post its checks to a poller and its actions to a reactionner.
:param actions: Action list to send
:type actions: list
:param scheduler_instance_id: Scheduler instance identifier
:type scheduler_instance_id: uuid
:return: True on success, False on failure
:rtype: bool
"""
logger.debug("Pushing %d actions from %s", len(actions), scheduler_instance_id)
return self.con.post('_push_actions', {'actions': actions,
'scheduler_instance_id': scheduler_instance_id},
wait=True)
|
[
"def",
"push_actions",
"(",
"self",
",",
"actions",
",",
"scheduler_instance_id",
")",
":",
"logger",
".",
"debug",
"(",
"\"Pushing %d actions from %s\"",
",",
"len",
"(",
"actions",
")",
",",
"scheduler_instance_id",
")",
"return",
"self",
".",
"con",
".",
"post",
"(",
"'_push_actions'",
",",
"{",
"'actions'",
":",
"actions",
",",
"'scheduler_instance_id'",
":",
"scheduler_instance_id",
"}",
",",
"wait",
"=",
"True",
")"
] |
Post the actions to execute to the satellite.
Indeed, a scheduler post its checks to a poller and its actions to a reactionner.
:param actions: Action list to send
:type actions: list
:param scheduler_instance_id: Scheduler instance identifier
:type scheduler_instance_id: uuid
:return: True on success, False on failure
:rtype: bool
|
[
"Post",
"the",
"actions",
"to",
"execute",
"to",
"the",
"satellite",
".",
"Indeed",
"a",
"scheduler",
"post",
"its",
"checks",
"to",
"a",
"poller",
"and",
"its",
"actions",
"to",
"a",
"reactionner",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L874-L888
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/satellitelink.py
|
SatelliteLinks.linkify
|
def linkify(self, modules):
"""Link modules and Satellite links
:param modules: Module object list
:type modules: alignak.objects.module.Modules
:return: None
"""
logger.debug("Linkify %s with %s", self, modules)
self.linkify_s_by_module(modules)
|
python
|
def linkify(self, modules):
"""Link modules and Satellite links
:param modules: Module object list
:type modules: alignak.objects.module.Modules
:return: None
"""
logger.debug("Linkify %s with %s", self, modules)
self.linkify_s_by_module(modules)
|
[
"def",
"linkify",
"(",
"self",
",",
"modules",
")",
":",
"logger",
".",
"debug",
"(",
"\"Linkify %s with %s\"",
",",
"self",
",",
"modules",
")",
"self",
".",
"linkify_s_by_module",
"(",
"modules",
")"
] |
Link modules and Satellite links
:param modules: Module object list
:type modules: alignak.objects.module.Modules
:return: None
|
[
"Link",
"modules",
"and",
"Satellite",
"links"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L1005-L1013
|
train
|
Alignak-monitoring/alignak
|
alignak/notification.py
|
Notification.get_return_from
|
def get_return_from(self, notif):
"""Setter of exit_status and execution_time attributes
:param notif: notification to get data from
:type notif: alignak.notification.Notification
:return: None
"""
self.exit_status = notif.exit_status
self.execution_time = notif.execution_time
|
python
|
def get_return_from(self, notif):
"""Setter of exit_status and execution_time attributes
:param notif: notification to get data from
:type notif: alignak.notification.Notification
:return: None
"""
self.exit_status = notif.exit_status
self.execution_time = notif.execution_time
|
[
"def",
"get_return_from",
"(",
"self",
",",
"notif",
")",
":",
"self",
".",
"exit_status",
"=",
"notif",
".",
"exit_status",
"self",
".",
"execution_time",
"=",
"notif",
".",
"execution_time"
] |
Setter of exit_status and execution_time attributes
:param notif: notification to get data from
:type notif: alignak.notification.Notification
:return: None
|
[
"Setter",
"of",
"exit_status",
"and",
"execution_time",
"attributes"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/notification.py#L164-L172
|
train
|
Alignak-monitoring/alignak
|
alignak/notification.py
|
Notification.get_initial_status_brok
|
def get_initial_status_brok(self):
"""Get a initial status brok
:return: brok with wanted data
:rtype: alignak.brok.Brok
"""
data = {'uuid': self.uuid}
self.fill_data_brok_from(data, 'full_status')
return Brok({'type': 'notification_raise', 'data': data})
|
python
|
def get_initial_status_brok(self):
"""Get a initial status brok
:return: brok with wanted data
:rtype: alignak.brok.Brok
"""
data = {'uuid': self.uuid}
self.fill_data_brok_from(data, 'full_status')
return Brok({'type': 'notification_raise', 'data': data})
|
[
"def",
"get_initial_status_brok",
"(",
"self",
")",
":",
"data",
"=",
"{",
"'uuid'",
":",
"self",
".",
"uuid",
"}",
"self",
".",
"fill_data_brok_from",
"(",
"data",
",",
"'full_status'",
")",
"return",
"Brok",
"(",
"{",
"'type'",
":",
"'notification_raise'",
",",
"'data'",
":",
"data",
"}",
")"
] |
Get a initial status brok
:return: brok with wanted data
:rtype: alignak.brok.Brok
|
[
"Get",
"a",
"initial",
"status",
"brok"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/notification.py#L191-L199
|
train
|
Alignak-monitoring/alignak
|
alignak/daemons/brokerdaemon.py
|
Broker.manage_brok
|
def manage_brok(self, brok):
"""Get a brok.
We put brok data to the modules
:param brok: object with data
:type brok: object
:return: None
"""
# Unserialize the brok before consuming it
brok.prepare()
for module in self.modules_manager.get_internal_instances():
try:
_t0 = time.time()
module.manage_brok(brok)
statsmgr.timer('manage-broks.internal.%s' % module.get_name(), time.time() - _t0)
except Exception as exp: # pylint: disable=broad-except
logger.warning("The module %s raised an exception: %s, "
"I'm tagging it to restart later", module.get_name(), str(exp))
logger.exception(exp)
self.modules_manager.set_to_restart(module)
|
python
|
def manage_brok(self, brok):
"""Get a brok.
We put brok data to the modules
:param brok: object with data
:type brok: object
:return: None
"""
# Unserialize the brok before consuming it
brok.prepare()
for module in self.modules_manager.get_internal_instances():
try:
_t0 = time.time()
module.manage_brok(brok)
statsmgr.timer('manage-broks.internal.%s' % module.get_name(), time.time() - _t0)
except Exception as exp: # pylint: disable=broad-except
logger.warning("The module %s raised an exception: %s, "
"I'm tagging it to restart later", module.get_name(), str(exp))
logger.exception(exp)
self.modules_manager.set_to_restart(module)
|
[
"def",
"manage_brok",
"(",
"self",
",",
"brok",
")",
":",
"# Unserialize the brok before consuming it",
"brok",
".",
"prepare",
"(",
")",
"for",
"module",
"in",
"self",
".",
"modules_manager",
".",
"get_internal_instances",
"(",
")",
":",
"try",
":",
"_t0",
"=",
"time",
".",
"time",
"(",
")",
"module",
".",
"manage_brok",
"(",
"brok",
")",
"statsmgr",
".",
"timer",
"(",
"'manage-broks.internal.%s'",
"%",
"module",
".",
"get_name",
"(",
")",
",",
"time",
".",
"time",
"(",
")",
"-",
"_t0",
")",
"except",
"Exception",
"as",
"exp",
":",
"# pylint: disable=broad-except",
"logger",
".",
"warning",
"(",
"\"The module %s raised an exception: %s, \"",
"\"I'm tagging it to restart later\"",
",",
"module",
".",
"get_name",
"(",
")",
",",
"str",
"(",
"exp",
")",
")",
"logger",
".",
"exception",
"(",
"exp",
")",
"self",
".",
"modules_manager",
".",
"set_to_restart",
"(",
"module",
")"
] |
Get a brok.
We put brok data to the modules
:param brok: object with data
:type brok: object
:return: None
|
[
"Get",
"a",
"brok",
".",
"We",
"put",
"brok",
"data",
"to",
"the",
"modules"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/brokerdaemon.py#L189-L209
|
train
|
Alignak-monitoring/alignak
|
alignak/daemons/brokerdaemon.py
|
Broker.get_internal_broks
|
def get_internal_broks(self):
"""Get all broks from self.broks_internal_raised and append them to our broks
to manage
:return: None
"""
statsmgr.gauge('get-new-broks-count.broker', len(self.internal_broks))
# Add the broks to our global list
self.external_broks.extend(self.internal_broks)
self.internal_broks = []
|
python
|
def get_internal_broks(self):
"""Get all broks from self.broks_internal_raised and append them to our broks
to manage
:return: None
"""
statsmgr.gauge('get-new-broks-count.broker', len(self.internal_broks))
# Add the broks to our global list
self.external_broks.extend(self.internal_broks)
self.internal_broks = []
|
[
"def",
"get_internal_broks",
"(",
"self",
")",
":",
"statsmgr",
".",
"gauge",
"(",
"'get-new-broks-count.broker'",
",",
"len",
"(",
"self",
".",
"internal_broks",
")",
")",
"# Add the broks to our global list",
"self",
".",
"external_broks",
".",
"extend",
"(",
"self",
".",
"internal_broks",
")",
"self",
".",
"internal_broks",
"=",
"[",
"]"
] |
Get all broks from self.broks_internal_raised and append them to our broks
to manage
:return: None
|
[
"Get",
"all",
"broks",
"from",
"self",
".",
"broks_internal_raised",
"and",
"append",
"them",
"to",
"our",
"broks",
"to",
"manage"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/brokerdaemon.py#L211-L220
|
train
|
Alignak-monitoring/alignak
|
alignak/daemons/brokerdaemon.py
|
Broker.get_arbiter_broks
|
def get_arbiter_broks(self):
"""Get the broks from the arbiters,
but as the arbiter_broks list can be push by arbiter without Global lock,
we must protect this with a lock
TODO: really? check this arbiter behavior!
:return: None
"""
with self.arbiter_broks_lock:
statsmgr.gauge('get-new-broks-count.arbiter', len(self.arbiter_broks))
# Add the broks to our global list
self.external_broks.extend(self.arbiter_broks)
self.arbiter_broks = []
|
python
|
def get_arbiter_broks(self):
"""Get the broks from the arbiters,
but as the arbiter_broks list can be push by arbiter without Global lock,
we must protect this with a lock
TODO: really? check this arbiter behavior!
:return: None
"""
with self.arbiter_broks_lock:
statsmgr.gauge('get-new-broks-count.arbiter', len(self.arbiter_broks))
# Add the broks to our global list
self.external_broks.extend(self.arbiter_broks)
self.arbiter_broks = []
|
[
"def",
"get_arbiter_broks",
"(",
"self",
")",
":",
"with",
"self",
".",
"arbiter_broks_lock",
":",
"statsmgr",
".",
"gauge",
"(",
"'get-new-broks-count.arbiter'",
",",
"len",
"(",
"self",
".",
"arbiter_broks",
")",
")",
"# Add the broks to our global list",
"self",
".",
"external_broks",
".",
"extend",
"(",
"self",
".",
"arbiter_broks",
")",
"self",
".",
"arbiter_broks",
"=",
"[",
"]"
] |
Get the broks from the arbiters,
but as the arbiter_broks list can be push by arbiter without Global lock,
we must protect this with a lock
TODO: really? check this arbiter behavior!
:return: None
|
[
"Get",
"the",
"broks",
"from",
"the",
"arbiters",
"but",
"as",
"the",
"arbiter_broks",
"list",
"can",
"be",
"push",
"by",
"arbiter",
"without",
"Global",
"lock",
"we",
"must",
"protect",
"this",
"with",
"a",
"lock"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/brokerdaemon.py#L222-L235
|
train
|
Alignak-monitoring/alignak
|
alignak/daemons/brokerdaemon.py
|
Broker.get_new_broks
|
def get_new_broks(self):
"""Get new broks from our satellites
:return: None
"""
for satellites in [self.schedulers, self.pollers, self.reactionners, self.receivers]:
for satellite_link in list(satellites.values()):
logger.debug("Getting broks from %s", satellite_link)
_t0 = time.time()
try:
tmp_broks = satellite_link.get_broks(self.name)
except LinkError:
logger.warning("Daemon %s connection failed, I could not get the broks!",
satellite_link)
else:
if tmp_broks:
logger.debug("Got %d Broks from %s in %s",
len(tmp_broks), satellite_link.name, time.time() - _t0)
statsmgr.gauge('get-new-broks-count.%s'
% (satellite_link.name), len(tmp_broks))
statsmgr.timer('get-new-broks-time.%s'
% (satellite_link.name), time.time() - _t0)
for brok in tmp_broks:
brok.instance_id = satellite_link.instance_id
# Add the broks to our global list
self.external_broks.extend(tmp_broks)
|
python
|
def get_new_broks(self):
"""Get new broks from our satellites
:return: None
"""
for satellites in [self.schedulers, self.pollers, self.reactionners, self.receivers]:
for satellite_link in list(satellites.values()):
logger.debug("Getting broks from %s", satellite_link)
_t0 = time.time()
try:
tmp_broks = satellite_link.get_broks(self.name)
except LinkError:
logger.warning("Daemon %s connection failed, I could not get the broks!",
satellite_link)
else:
if tmp_broks:
logger.debug("Got %d Broks from %s in %s",
len(tmp_broks), satellite_link.name, time.time() - _t0)
statsmgr.gauge('get-new-broks-count.%s'
% (satellite_link.name), len(tmp_broks))
statsmgr.timer('get-new-broks-time.%s'
% (satellite_link.name), time.time() - _t0)
for brok in tmp_broks:
brok.instance_id = satellite_link.instance_id
# Add the broks to our global list
self.external_broks.extend(tmp_broks)
|
[
"def",
"get_new_broks",
"(",
"self",
")",
":",
"for",
"satellites",
"in",
"[",
"self",
".",
"schedulers",
",",
"self",
".",
"pollers",
",",
"self",
".",
"reactionners",
",",
"self",
".",
"receivers",
"]",
":",
"for",
"satellite_link",
"in",
"list",
"(",
"satellites",
".",
"values",
"(",
")",
")",
":",
"logger",
".",
"debug",
"(",
"\"Getting broks from %s\"",
",",
"satellite_link",
")",
"_t0",
"=",
"time",
".",
"time",
"(",
")",
"try",
":",
"tmp_broks",
"=",
"satellite_link",
".",
"get_broks",
"(",
"self",
".",
"name",
")",
"except",
"LinkError",
":",
"logger",
".",
"warning",
"(",
"\"Daemon %s connection failed, I could not get the broks!\"",
",",
"satellite_link",
")",
"else",
":",
"if",
"tmp_broks",
":",
"logger",
".",
"debug",
"(",
"\"Got %d Broks from %s in %s\"",
",",
"len",
"(",
"tmp_broks",
")",
",",
"satellite_link",
".",
"name",
",",
"time",
".",
"time",
"(",
")",
"-",
"_t0",
")",
"statsmgr",
".",
"gauge",
"(",
"'get-new-broks-count.%s'",
"%",
"(",
"satellite_link",
".",
"name",
")",
",",
"len",
"(",
"tmp_broks",
")",
")",
"statsmgr",
".",
"timer",
"(",
"'get-new-broks-time.%s'",
"%",
"(",
"satellite_link",
".",
"name",
")",
",",
"time",
".",
"time",
"(",
")",
"-",
"_t0",
")",
"for",
"brok",
"in",
"tmp_broks",
":",
"brok",
".",
"instance_id",
"=",
"satellite_link",
".",
"instance_id",
"# Add the broks to our global list",
"self",
".",
"external_broks",
".",
"extend",
"(",
"tmp_broks",
")"
] |
Get new broks from our satellites
:return: None
|
[
"Get",
"new",
"broks",
"from",
"our",
"satellites"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/brokerdaemon.py#L237-L264
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/realm.py
|
Realm.add_group_members
|
def add_group_members(self, members):
"""Add a new group member to the groups list
:param members: member name
:type members: str
:return: None
"""
if not isinstance(members, list):
members = [members]
if not getattr(self, 'group_members', None):
self.group_members = members
else:
self.group_members.extend(members)
|
python
|
def add_group_members(self, members):
"""Add a new group member to the groups list
:param members: member name
:type members: str
:return: None
"""
if not isinstance(members, list):
members = [members]
if not getattr(self, 'group_members', None):
self.group_members = members
else:
self.group_members.extend(members)
|
[
"def",
"add_group_members",
"(",
"self",
",",
"members",
")",
":",
"if",
"not",
"isinstance",
"(",
"members",
",",
"list",
")",
":",
"members",
"=",
"[",
"members",
"]",
"if",
"not",
"getattr",
"(",
"self",
",",
"'group_members'",
",",
"None",
")",
":",
"self",
".",
"group_members",
"=",
"members",
"else",
":",
"self",
".",
"group_members",
".",
"extend",
"(",
"members",
")"
] |
Add a new group member to the groups list
:param members: member name
:type members: str
:return: None
|
[
"Add",
"a",
"new",
"group",
"member",
"to",
"the",
"groups",
"list"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/realm.py#L221-L234
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/realm.py
|
Realm.get_realms_by_explosion
|
def get_realms_by_explosion(self, realms):
"""Get all members of this realm including members of sub-realms on multi-levels
:param realms: realms list, used to look for a specific one
:type realms: alignak.objects.realm.Realms
:return: list of members and add realm to realm_members attribute
:rtype: list
"""
# If rec_tag is already set, then we detected a loop in the realms hierarchy!
if getattr(self, 'rec_tag', False):
self.add_error("Error: there is a loop in the realm definition %s" % self.get_name())
return None
# Ok, not in a loop, we tag the realm and parse its members
self.rec_tag = True
# Order realm members list by name
self.realm_members = sorted(self.realm_members)
for member in self.realm_members:
realm = realms.find_by_name(member)
if not realm:
self.add_unknown_members(member)
continue
children = realm.get_realms_by_explosion(realms)
if children is None:
# We got a loop in our children definition
self.all_sub_members = []
self.realm_members = []
return None
# Return the list of all unique members
return self.all_sub_members
|
python
|
def get_realms_by_explosion(self, realms):
"""Get all members of this realm including members of sub-realms on multi-levels
:param realms: realms list, used to look for a specific one
:type realms: alignak.objects.realm.Realms
:return: list of members and add realm to realm_members attribute
:rtype: list
"""
# If rec_tag is already set, then we detected a loop in the realms hierarchy!
if getattr(self, 'rec_tag', False):
self.add_error("Error: there is a loop in the realm definition %s" % self.get_name())
return None
# Ok, not in a loop, we tag the realm and parse its members
self.rec_tag = True
# Order realm members list by name
self.realm_members = sorted(self.realm_members)
for member in self.realm_members:
realm = realms.find_by_name(member)
if not realm:
self.add_unknown_members(member)
continue
children = realm.get_realms_by_explosion(realms)
if children is None:
# We got a loop in our children definition
self.all_sub_members = []
self.realm_members = []
return None
# Return the list of all unique members
return self.all_sub_members
|
[
"def",
"get_realms_by_explosion",
"(",
"self",
",",
"realms",
")",
":",
"# If rec_tag is already set, then we detected a loop in the realms hierarchy!",
"if",
"getattr",
"(",
"self",
",",
"'rec_tag'",
",",
"False",
")",
":",
"self",
".",
"add_error",
"(",
"\"Error: there is a loop in the realm definition %s\"",
"%",
"self",
".",
"get_name",
"(",
")",
")",
"return",
"None",
"# Ok, not in a loop, we tag the realm and parse its members",
"self",
".",
"rec_tag",
"=",
"True",
"# Order realm members list by name",
"self",
".",
"realm_members",
"=",
"sorted",
"(",
"self",
".",
"realm_members",
")",
"for",
"member",
"in",
"self",
".",
"realm_members",
":",
"realm",
"=",
"realms",
".",
"find_by_name",
"(",
"member",
")",
"if",
"not",
"realm",
":",
"self",
".",
"add_unknown_members",
"(",
"member",
")",
"continue",
"children",
"=",
"realm",
".",
"get_realms_by_explosion",
"(",
"realms",
")",
"if",
"children",
"is",
"None",
":",
"# We got a loop in our children definition",
"self",
".",
"all_sub_members",
"=",
"[",
"]",
"self",
".",
"realm_members",
"=",
"[",
"]",
"return",
"None",
"# Return the list of all unique members",
"return",
"self",
".",
"all_sub_members"
] |
Get all members of this realm including members of sub-realms on multi-levels
:param realms: realms list, used to look for a specific one
:type realms: alignak.objects.realm.Realms
:return: list of members and add realm to realm_members attribute
:rtype: list
|
[
"Get",
"all",
"members",
"of",
"this",
"realm",
"including",
"members",
"of",
"sub",
"-",
"realms",
"on",
"multi",
"-",
"levels"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/realm.py#L274-L306
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/realm.py
|
Realm.set_level
|
def set_level(self, level, realms):
"""Set the realm level in the realms hierarchy
:return: None
"""
self.level = level
if not self.level:
logger.info("- %s", self.get_name())
else:
logger.info(" %s %s", '+' * self.level, self.get_name())
self.all_sub_members = []
self.all_sub_members_names = []
for child in sorted(self.realm_members):
child = realms.find_by_name(child)
if not child:
continue
self.all_sub_members.append(child.uuid)
self.all_sub_members_names.append(child.get_name())
grand_children = child.set_level(self.level + 1, realms)
for grand_child in grand_children:
if grand_child in self.all_sub_members_names:
continue
grand_child = realms.find_by_name(grand_child)
if grand_child:
self.all_sub_members_names.append(grand_child.get_name())
self.all_sub_members.append(grand_child.uuid)
return self.all_sub_members_names
|
python
|
def set_level(self, level, realms):
"""Set the realm level in the realms hierarchy
:return: None
"""
self.level = level
if not self.level:
logger.info("- %s", self.get_name())
else:
logger.info(" %s %s", '+' * self.level, self.get_name())
self.all_sub_members = []
self.all_sub_members_names = []
for child in sorted(self.realm_members):
child = realms.find_by_name(child)
if not child:
continue
self.all_sub_members.append(child.uuid)
self.all_sub_members_names.append(child.get_name())
grand_children = child.set_level(self.level + 1, realms)
for grand_child in grand_children:
if grand_child in self.all_sub_members_names:
continue
grand_child = realms.find_by_name(grand_child)
if grand_child:
self.all_sub_members_names.append(grand_child.get_name())
self.all_sub_members.append(grand_child.uuid)
return self.all_sub_members_names
|
[
"def",
"set_level",
"(",
"self",
",",
"level",
",",
"realms",
")",
":",
"self",
".",
"level",
"=",
"level",
"if",
"not",
"self",
".",
"level",
":",
"logger",
".",
"info",
"(",
"\"- %s\"",
",",
"self",
".",
"get_name",
"(",
")",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\" %s %s\"",
",",
"'+'",
"*",
"self",
".",
"level",
",",
"self",
".",
"get_name",
"(",
")",
")",
"self",
".",
"all_sub_members",
"=",
"[",
"]",
"self",
".",
"all_sub_members_names",
"=",
"[",
"]",
"for",
"child",
"in",
"sorted",
"(",
"self",
".",
"realm_members",
")",
":",
"child",
"=",
"realms",
".",
"find_by_name",
"(",
"child",
")",
"if",
"not",
"child",
":",
"continue",
"self",
".",
"all_sub_members",
".",
"append",
"(",
"child",
".",
"uuid",
")",
"self",
".",
"all_sub_members_names",
".",
"append",
"(",
"child",
".",
"get_name",
"(",
")",
")",
"grand_children",
"=",
"child",
".",
"set_level",
"(",
"self",
".",
"level",
"+",
"1",
",",
"realms",
")",
"for",
"grand_child",
"in",
"grand_children",
":",
"if",
"grand_child",
"in",
"self",
".",
"all_sub_members_names",
":",
"continue",
"grand_child",
"=",
"realms",
".",
"find_by_name",
"(",
"grand_child",
")",
"if",
"grand_child",
":",
"self",
".",
"all_sub_members_names",
".",
"append",
"(",
"grand_child",
".",
"get_name",
"(",
")",
")",
"self",
".",
"all_sub_members",
".",
"append",
"(",
"grand_child",
".",
"uuid",
")",
"return",
"self",
".",
"all_sub_members_names"
] |
Set the realm level in the realms hierarchy
:return: None
|
[
"Set",
"the",
"realm",
"level",
"in",
"the",
"realms",
"hierarchy"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/realm.py#L308-L335
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/realm.py
|
Realm.get_all_subs_satellites_by_type
|
def get_all_subs_satellites_by_type(self, sat_type, realms):
"""Get all satellites of the wanted type in this realm recursively
:param sat_type: satellite type wanted (scheduler, poller ..)
:type sat_type:
:param realms: all realms
:type realms: list of realm object
:return: list of satellite in this realm
:rtype: list
"""
res = copy.copy(getattr(self, sat_type))
for member in self.all_sub_members:
res.extend(realms[member].get_all_subs_satellites_by_type(sat_type, realms))
return res
|
python
|
def get_all_subs_satellites_by_type(self, sat_type, realms):
"""Get all satellites of the wanted type in this realm recursively
:param sat_type: satellite type wanted (scheduler, poller ..)
:type sat_type:
:param realms: all realms
:type realms: list of realm object
:return: list of satellite in this realm
:rtype: list
"""
res = copy.copy(getattr(self, sat_type))
for member in self.all_sub_members:
res.extend(realms[member].get_all_subs_satellites_by_type(sat_type, realms))
return res
|
[
"def",
"get_all_subs_satellites_by_type",
"(",
"self",
",",
"sat_type",
",",
"realms",
")",
":",
"res",
"=",
"copy",
".",
"copy",
"(",
"getattr",
"(",
"self",
",",
"sat_type",
")",
")",
"for",
"member",
"in",
"self",
".",
"all_sub_members",
":",
"res",
".",
"extend",
"(",
"realms",
"[",
"member",
"]",
".",
"get_all_subs_satellites_by_type",
"(",
"sat_type",
",",
"realms",
")",
")",
"return",
"res"
] |
Get all satellites of the wanted type in this realm recursively
:param sat_type: satellite type wanted (scheduler, poller ..)
:type sat_type:
:param realms: all realms
:type realms: list of realm object
:return: list of satellite in this realm
:rtype: list
|
[
"Get",
"all",
"satellites",
"of",
"the",
"wanted",
"type",
"in",
"this",
"realm",
"recursively"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/realm.py#L337-L350
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/realm.py
|
Realm.get_links_for_a_broker
|
def get_links_for_a_broker(self, pollers, reactionners, receivers, realms,
manage_sub_realms=False):
"""Get a configuration dictionary with pollers, reactionners and receivers links
for a broker
:param pollers: pollers
:type pollers:
:param reactionners: reactionners
:type reactionners:
:param receivers: receivers
:type receivers:
:param realms: realms
:type realms:
:param manage_sub_realms:
:type manage_sub_realms: True if the borker manages sub realms
:return: dict containing pollers, reactionners and receivers links (key is satellite id)
:rtype: dict
"""
# Create void satellite links
cfg = {
'pollers': {},
'reactionners': {},
'receivers': {},
}
# Our self.daemons are only identifiers... that we use to fill the satellite links
for poller_id in self.pollers:
poller = pollers[poller_id]
cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()
for reactionner_id in self.reactionners:
reactionner = reactionners[reactionner_id]
cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()
for receiver_id in self.receivers:
receiver = receivers[receiver_id]
cfg['receivers'][receiver.uuid] = receiver.give_satellite_cfg()
# If the broker manages sub realms, fill the satellite links...
if manage_sub_realms:
# Now pollers
for poller_id in self.get_all_subs_satellites_by_type('pollers', realms):
poller = pollers[poller_id]
cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()
# Now reactionners
for reactionner_id in self.get_all_subs_satellites_by_type('reactionners', realms):
reactionner = reactionners[reactionner_id]
cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()
# Now receivers
for receiver_id in self.get_all_subs_satellites_by_type('receivers', realms):
receiver = receivers[receiver_id]
cfg['receivers'][receiver.uuid] = receiver.give_satellite_cfg()
return cfg
|
python
|
def get_links_for_a_broker(self, pollers, reactionners, receivers, realms,
manage_sub_realms=False):
"""Get a configuration dictionary with pollers, reactionners and receivers links
for a broker
:param pollers: pollers
:type pollers:
:param reactionners: reactionners
:type reactionners:
:param receivers: receivers
:type receivers:
:param realms: realms
:type realms:
:param manage_sub_realms:
:type manage_sub_realms: True if the borker manages sub realms
:return: dict containing pollers, reactionners and receivers links (key is satellite id)
:rtype: dict
"""
# Create void satellite links
cfg = {
'pollers': {},
'reactionners': {},
'receivers': {},
}
# Our self.daemons are only identifiers... that we use to fill the satellite links
for poller_id in self.pollers:
poller = pollers[poller_id]
cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()
for reactionner_id in self.reactionners:
reactionner = reactionners[reactionner_id]
cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()
for receiver_id in self.receivers:
receiver = receivers[receiver_id]
cfg['receivers'][receiver.uuid] = receiver.give_satellite_cfg()
# If the broker manages sub realms, fill the satellite links...
if manage_sub_realms:
# Now pollers
for poller_id in self.get_all_subs_satellites_by_type('pollers', realms):
poller = pollers[poller_id]
cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()
# Now reactionners
for reactionner_id in self.get_all_subs_satellites_by_type('reactionners', realms):
reactionner = reactionners[reactionner_id]
cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()
# Now receivers
for receiver_id in self.get_all_subs_satellites_by_type('receivers', realms):
receiver = receivers[receiver_id]
cfg['receivers'][receiver.uuid] = receiver.give_satellite_cfg()
return cfg
|
[
"def",
"get_links_for_a_broker",
"(",
"self",
",",
"pollers",
",",
"reactionners",
",",
"receivers",
",",
"realms",
",",
"manage_sub_realms",
"=",
"False",
")",
":",
"# Create void satellite links",
"cfg",
"=",
"{",
"'pollers'",
":",
"{",
"}",
",",
"'reactionners'",
":",
"{",
"}",
",",
"'receivers'",
":",
"{",
"}",
",",
"}",
"# Our self.daemons are only identifiers... that we use to fill the satellite links",
"for",
"poller_id",
"in",
"self",
".",
"pollers",
":",
"poller",
"=",
"pollers",
"[",
"poller_id",
"]",
"cfg",
"[",
"'pollers'",
"]",
"[",
"poller",
".",
"uuid",
"]",
"=",
"poller",
".",
"give_satellite_cfg",
"(",
")",
"for",
"reactionner_id",
"in",
"self",
".",
"reactionners",
":",
"reactionner",
"=",
"reactionners",
"[",
"reactionner_id",
"]",
"cfg",
"[",
"'reactionners'",
"]",
"[",
"reactionner",
".",
"uuid",
"]",
"=",
"reactionner",
".",
"give_satellite_cfg",
"(",
")",
"for",
"receiver_id",
"in",
"self",
".",
"receivers",
":",
"receiver",
"=",
"receivers",
"[",
"receiver_id",
"]",
"cfg",
"[",
"'receivers'",
"]",
"[",
"receiver",
".",
"uuid",
"]",
"=",
"receiver",
".",
"give_satellite_cfg",
"(",
")",
"# If the broker manages sub realms, fill the satellite links...",
"if",
"manage_sub_realms",
":",
"# Now pollers",
"for",
"poller_id",
"in",
"self",
".",
"get_all_subs_satellites_by_type",
"(",
"'pollers'",
",",
"realms",
")",
":",
"poller",
"=",
"pollers",
"[",
"poller_id",
"]",
"cfg",
"[",
"'pollers'",
"]",
"[",
"poller",
".",
"uuid",
"]",
"=",
"poller",
".",
"give_satellite_cfg",
"(",
")",
"# Now reactionners",
"for",
"reactionner_id",
"in",
"self",
".",
"get_all_subs_satellites_by_type",
"(",
"'reactionners'",
",",
"realms",
")",
":",
"reactionner",
"=",
"reactionners",
"[",
"reactionner_id",
"]",
"cfg",
"[",
"'reactionners'",
"]",
"[",
"reactionner",
".",
"uuid",
"]",
"=",
"reactionner",
".",
"give_satellite_cfg",
"(",
")",
"# Now receivers",
"for",
"receiver_id",
"in",
"self",
".",
"get_all_subs_satellites_by_type",
"(",
"'receivers'",
",",
"realms",
")",
":",
"receiver",
"=",
"receivers",
"[",
"receiver_id",
"]",
"cfg",
"[",
"'receivers'",
"]",
"[",
"receiver",
".",
"uuid",
"]",
"=",
"receiver",
".",
"give_satellite_cfg",
"(",
")",
"return",
"cfg"
] |
Get a configuration dictionary with pollers, reactionners and receivers links
for a broker
:param pollers: pollers
:type pollers:
:param reactionners: reactionners
:type reactionners:
:param receivers: receivers
:type receivers:
:param realms: realms
:type realms:
:param manage_sub_realms:
:type manage_sub_realms: True if the borker manages sub realms
:return: dict containing pollers, reactionners and receivers links (key is satellite id)
:rtype: dict
|
[
"Get",
"a",
"configuration",
"dictionary",
"with",
"pollers",
"reactionners",
"and",
"receivers",
"links",
"for",
"a",
"broker"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/realm.py#L415-L472
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/realm.py
|
Realm.get_links_for_a_scheduler
|
def get_links_for_a_scheduler(self, pollers, reactionners, brokers):
"""Get a configuration dictionary with pollers, reactionners and brokers links
for a scheduler
:return: dict containing pollers, reactionners and brokers links (key is satellite id)
:rtype: dict
"""
# Create void satellite links
cfg = {
'pollers': {},
'reactionners': {},
'brokers': {},
}
# Our self.daemons are only identifiers... that we use to fill the satellite links
try:
for poller in self.pollers + self.get_potential_satellites_by_type(pollers, "poller"):
if poller in pollers:
poller = pollers[poller]
cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()
for reactionner in self.reactionners + self.get_potential_satellites_by_type(
reactionners, "reactionner"):
if reactionner in reactionners:
reactionner = reactionners[reactionner]
cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()
for broker in self.brokers + self.get_potential_satellites_by_type(brokers, "broker"):
if broker in brokers:
broker = brokers[broker]
cfg['brokers'][broker.uuid] = broker.give_satellite_cfg()
except Exception as exp: # pylint: disable=broad-except
logger.exception("realm.get_links_for_a_scheduler: %s", exp)
# for poller in self.get_potential_satellites_by_type(pollers, "poller"):
# logger.info("Poller: %s", poller)
# cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()
#
# for reactionner in self.get_potential_satellites_by_type(reactionners, "reactionner"):
# cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()
#
# for broker in self.get_potential_satellites_by_type(brokers, "broker"):
# cfg['brokers'][broker.uuid] = broker.give_satellite_cfg()
return cfg
|
python
|
def get_links_for_a_scheduler(self, pollers, reactionners, brokers):
"""Get a configuration dictionary with pollers, reactionners and brokers links
for a scheduler
:return: dict containing pollers, reactionners and brokers links (key is satellite id)
:rtype: dict
"""
# Create void satellite links
cfg = {
'pollers': {},
'reactionners': {},
'brokers': {},
}
# Our self.daemons are only identifiers... that we use to fill the satellite links
try:
for poller in self.pollers + self.get_potential_satellites_by_type(pollers, "poller"):
if poller in pollers:
poller = pollers[poller]
cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()
for reactionner in self.reactionners + self.get_potential_satellites_by_type(
reactionners, "reactionner"):
if reactionner in reactionners:
reactionner = reactionners[reactionner]
cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()
for broker in self.brokers + self.get_potential_satellites_by_type(brokers, "broker"):
if broker in brokers:
broker = brokers[broker]
cfg['brokers'][broker.uuid] = broker.give_satellite_cfg()
except Exception as exp: # pylint: disable=broad-except
logger.exception("realm.get_links_for_a_scheduler: %s", exp)
# for poller in self.get_potential_satellites_by_type(pollers, "poller"):
# logger.info("Poller: %s", poller)
# cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()
#
# for reactionner in self.get_potential_satellites_by_type(reactionners, "reactionner"):
# cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()
#
# for broker in self.get_potential_satellites_by_type(brokers, "broker"):
# cfg['brokers'][broker.uuid] = broker.give_satellite_cfg()
return cfg
|
[
"def",
"get_links_for_a_scheduler",
"(",
"self",
",",
"pollers",
",",
"reactionners",
",",
"brokers",
")",
":",
"# Create void satellite links",
"cfg",
"=",
"{",
"'pollers'",
":",
"{",
"}",
",",
"'reactionners'",
":",
"{",
"}",
",",
"'brokers'",
":",
"{",
"}",
",",
"}",
"# Our self.daemons are only identifiers... that we use to fill the satellite links",
"try",
":",
"for",
"poller",
"in",
"self",
".",
"pollers",
"+",
"self",
".",
"get_potential_satellites_by_type",
"(",
"pollers",
",",
"\"poller\"",
")",
":",
"if",
"poller",
"in",
"pollers",
":",
"poller",
"=",
"pollers",
"[",
"poller",
"]",
"cfg",
"[",
"'pollers'",
"]",
"[",
"poller",
".",
"uuid",
"]",
"=",
"poller",
".",
"give_satellite_cfg",
"(",
")",
"for",
"reactionner",
"in",
"self",
".",
"reactionners",
"+",
"self",
".",
"get_potential_satellites_by_type",
"(",
"reactionners",
",",
"\"reactionner\"",
")",
":",
"if",
"reactionner",
"in",
"reactionners",
":",
"reactionner",
"=",
"reactionners",
"[",
"reactionner",
"]",
"cfg",
"[",
"'reactionners'",
"]",
"[",
"reactionner",
".",
"uuid",
"]",
"=",
"reactionner",
".",
"give_satellite_cfg",
"(",
")",
"for",
"broker",
"in",
"self",
".",
"brokers",
"+",
"self",
".",
"get_potential_satellites_by_type",
"(",
"brokers",
",",
"\"broker\"",
")",
":",
"if",
"broker",
"in",
"brokers",
":",
"broker",
"=",
"brokers",
"[",
"broker",
"]",
"cfg",
"[",
"'brokers'",
"]",
"[",
"broker",
".",
"uuid",
"]",
"=",
"broker",
".",
"give_satellite_cfg",
"(",
")",
"except",
"Exception",
"as",
"exp",
":",
"# pylint: disable=broad-except",
"logger",
".",
"exception",
"(",
"\"realm.get_links_for_a_scheduler: %s\"",
",",
"exp",
")",
"# for poller in self.get_potential_satellites_by_type(pollers, \"poller\"):",
"# logger.info(\"Poller: %s\", poller)",
"# cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()",
"#",
"# for reactionner in self.get_potential_satellites_by_type(reactionners, \"reactionner\"):",
"# cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()",
"#",
"# for broker in self.get_potential_satellites_by_type(brokers, \"broker\"):",
"# cfg['brokers'][broker.uuid] = broker.give_satellite_cfg()",
"return",
"cfg"
] |
Get a configuration dictionary with pollers, reactionners and brokers links
for a scheduler
:return: dict containing pollers, reactionners and brokers links (key is satellite id)
:rtype: dict
|
[
"Get",
"a",
"configuration",
"dictionary",
"with",
"pollers",
"reactionners",
"and",
"brokers",
"links",
"for",
"a",
"scheduler"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/realm.py#L474-L519
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/realm.py
|
Realms.explode
|
def explode(self):
"""Explode realms with each realm_members and higher_realms to get all the
realms sub realms.
:return: None
"""
# Manage higher realms where defined
for realm in [tmp_realm for tmp_realm in self if tmp_realm.higher_realms]:
for parent in realm.higher_realms:
higher_realm = self.find_by_name(parent)
if higher_realm:
# Add the realm to its parent realm members
higher_realm.realm_members.append(realm.get_name())
for realm in self:
# Set a recursion tag to protect against loop
for tmp_realm in self:
tmp_realm.rec_tag = False
realm.get_realms_by_explosion(self)
# Clean the recursion tag
for tmp_realm in self:
del tmp_realm.rec_tag
|
python
|
def explode(self):
"""Explode realms with each realm_members and higher_realms to get all the
realms sub realms.
:return: None
"""
# Manage higher realms where defined
for realm in [tmp_realm for tmp_realm in self if tmp_realm.higher_realms]:
for parent in realm.higher_realms:
higher_realm = self.find_by_name(parent)
if higher_realm:
# Add the realm to its parent realm members
higher_realm.realm_members.append(realm.get_name())
for realm in self:
# Set a recursion tag to protect against loop
for tmp_realm in self:
tmp_realm.rec_tag = False
realm.get_realms_by_explosion(self)
# Clean the recursion tag
for tmp_realm in self:
del tmp_realm.rec_tag
|
[
"def",
"explode",
"(",
"self",
")",
":",
"# Manage higher realms where defined",
"for",
"realm",
"in",
"[",
"tmp_realm",
"for",
"tmp_realm",
"in",
"self",
"if",
"tmp_realm",
".",
"higher_realms",
"]",
":",
"for",
"parent",
"in",
"realm",
".",
"higher_realms",
":",
"higher_realm",
"=",
"self",
".",
"find_by_name",
"(",
"parent",
")",
"if",
"higher_realm",
":",
"# Add the realm to its parent realm members",
"higher_realm",
".",
"realm_members",
".",
"append",
"(",
"realm",
".",
"get_name",
"(",
")",
")",
"for",
"realm",
"in",
"self",
":",
"# Set a recursion tag to protect against loop",
"for",
"tmp_realm",
"in",
"self",
":",
"tmp_realm",
".",
"rec_tag",
"=",
"False",
"realm",
".",
"get_realms_by_explosion",
"(",
"self",
")",
"# Clean the recursion tag",
"for",
"tmp_realm",
"in",
"self",
":",
"del",
"tmp_realm",
".",
"rec_tag"
] |
Explode realms with each realm_members and higher_realms to get all the
realms sub realms.
:return: None
|
[
"Explode",
"realms",
"with",
"each",
"realm_members",
"and",
"higher_realms",
"to",
"get",
"all",
"the",
"realms",
"sub",
"realms",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/realm.py#L565-L587
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/realm.py
|
Realms.get_default
|
def get_default(self, check=False):
"""Get the default realm
:param check: check correctness if True
:type check: bool
:return: Default realm of Alignak configuration
:rtype: alignak.objects.realm.Realm | None
"""
found = []
for realm in sorted(self, key=lambda r: r.level):
if getattr(realm, 'default', False):
found.append(realm)
if not found:
# Retain as default realm the first realm in name alphabetical order
found_names = sorted([r.get_name() for r in self])
if not found_names:
self.add_error("No realm is defined in this configuration! "
"This should not be possible!")
return None
default_realm_name = found_names[0]
default_realm = self.find_by_name(default_realm_name)
default_realm.default = True
found.append(default_realm)
if check:
self.add_error("No realm is defined as the default one! "
"I set %s as the default realm" % default_realm_name)
default_realm = found[0]
if len(found) > 1:
# Retain as default realm the first so-called default realms in name alphabetical order
found_names = sorted([r.get_name() for r in found])
default_realm_name = found_names[0]
default_realm = self.find_by_name(default_realm_name)
# Set all found realms as non-default realms
for realm in found:
if realm.get_name() != default_realm_name:
realm.default = False
if check:
self.add_warning("More than one realm is defined as the default one: %s. "
"I set %s as the default realm."
% (','.join(found_names), default_realm_name))
self.default = default_realm
return default_realm
|
python
|
def get_default(self, check=False):
"""Get the default realm
:param check: check correctness if True
:type check: bool
:return: Default realm of Alignak configuration
:rtype: alignak.objects.realm.Realm | None
"""
found = []
for realm in sorted(self, key=lambda r: r.level):
if getattr(realm, 'default', False):
found.append(realm)
if not found:
# Retain as default realm the first realm in name alphabetical order
found_names = sorted([r.get_name() for r in self])
if not found_names:
self.add_error("No realm is defined in this configuration! "
"This should not be possible!")
return None
default_realm_name = found_names[0]
default_realm = self.find_by_name(default_realm_name)
default_realm.default = True
found.append(default_realm)
if check:
self.add_error("No realm is defined as the default one! "
"I set %s as the default realm" % default_realm_name)
default_realm = found[0]
if len(found) > 1:
# Retain as default realm the first so-called default realms in name alphabetical order
found_names = sorted([r.get_name() for r in found])
default_realm_name = found_names[0]
default_realm = self.find_by_name(default_realm_name)
# Set all found realms as non-default realms
for realm in found:
if realm.get_name() != default_realm_name:
realm.default = False
if check:
self.add_warning("More than one realm is defined as the default one: %s. "
"I set %s as the default realm."
% (','.join(found_names), default_realm_name))
self.default = default_realm
return default_realm
|
[
"def",
"get_default",
"(",
"self",
",",
"check",
"=",
"False",
")",
":",
"found",
"=",
"[",
"]",
"for",
"realm",
"in",
"sorted",
"(",
"self",
",",
"key",
"=",
"lambda",
"r",
":",
"r",
".",
"level",
")",
":",
"if",
"getattr",
"(",
"realm",
",",
"'default'",
",",
"False",
")",
":",
"found",
".",
"append",
"(",
"realm",
")",
"if",
"not",
"found",
":",
"# Retain as default realm the first realm in name alphabetical order",
"found_names",
"=",
"sorted",
"(",
"[",
"r",
".",
"get_name",
"(",
")",
"for",
"r",
"in",
"self",
"]",
")",
"if",
"not",
"found_names",
":",
"self",
".",
"add_error",
"(",
"\"No realm is defined in this configuration! \"",
"\"This should not be possible!\"",
")",
"return",
"None",
"default_realm_name",
"=",
"found_names",
"[",
"0",
"]",
"default_realm",
"=",
"self",
".",
"find_by_name",
"(",
"default_realm_name",
")",
"default_realm",
".",
"default",
"=",
"True",
"found",
".",
"append",
"(",
"default_realm",
")",
"if",
"check",
":",
"self",
".",
"add_error",
"(",
"\"No realm is defined as the default one! \"",
"\"I set %s as the default realm\"",
"%",
"default_realm_name",
")",
"default_realm",
"=",
"found",
"[",
"0",
"]",
"if",
"len",
"(",
"found",
")",
">",
"1",
":",
"# Retain as default realm the first so-called default realms in name alphabetical order",
"found_names",
"=",
"sorted",
"(",
"[",
"r",
".",
"get_name",
"(",
")",
"for",
"r",
"in",
"found",
"]",
")",
"default_realm_name",
"=",
"found_names",
"[",
"0",
"]",
"default_realm",
"=",
"self",
".",
"find_by_name",
"(",
"default_realm_name",
")",
"# Set all found realms as non-default realms",
"for",
"realm",
"in",
"found",
":",
"if",
"realm",
".",
"get_name",
"(",
")",
"!=",
"default_realm_name",
":",
"realm",
".",
"default",
"=",
"False",
"if",
"check",
":",
"self",
".",
"add_warning",
"(",
"\"More than one realm is defined as the default one: %s. \"",
"\"I set %s as the default realm.\"",
"%",
"(",
"','",
".",
"join",
"(",
"found_names",
")",
",",
"default_realm_name",
")",
")",
"self",
".",
"default",
"=",
"default_realm",
"return",
"default_realm"
] |
Get the default realm
:param check: check correctness if True
:type check: bool
:return: Default realm of Alignak configuration
:rtype: alignak.objects.realm.Realm | None
|
[
"Get",
"the",
"default",
"realm"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/realm.py#L589-L637
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.