repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
Alignak-monitoring/alignak
|
alignak/action.py
|
no_block_read
|
def no_block_read(output):
"""Try to read a file descriptor in a non blocking mode
If the fcntl is available (unix only) we try to read in a
asynchronous mode, so we won't block the PIPE at 64K buffer
(deadlock...)
:param output: file or socket to read from
:type output: file
:return: data read from fd
:rtype: str
"""
_buffer = ""
if not fcntl:
return _buffer
o_fd = output.fileno()
o_fl = fcntl.fcntl(o_fd, fcntl.F_GETFL)
fcntl.fcntl(o_fd, fcntl.F_SETFL, o_fl | os.O_NONBLOCK)
try:
_buffer = output.read()
except Exception: # pylint: disable=broad-except
pass
return _buffer
|
python
|
def no_block_read(output):
"""Try to read a file descriptor in a non blocking mode
If the fcntl is available (unix only) we try to read in a
asynchronous mode, so we won't block the PIPE at 64K buffer
(deadlock...)
:param output: file or socket to read from
:type output: file
:return: data read from fd
:rtype: str
"""
_buffer = ""
if not fcntl:
return _buffer
o_fd = output.fileno()
o_fl = fcntl.fcntl(o_fd, fcntl.F_GETFL)
fcntl.fcntl(o_fd, fcntl.F_SETFL, o_fl | os.O_NONBLOCK)
try:
_buffer = output.read()
except Exception: # pylint: disable=broad-except
pass
return _buffer
|
[
"def",
"no_block_read",
"(",
"output",
")",
":",
"_buffer",
"=",
"\"\"",
"if",
"not",
"fcntl",
":",
"return",
"_buffer",
"o_fd",
"=",
"output",
".",
"fileno",
"(",
")",
"o_fl",
"=",
"fcntl",
".",
"fcntl",
"(",
"o_fd",
",",
"fcntl",
".",
"F_GETFL",
")",
"fcntl",
".",
"fcntl",
"(",
"o_fd",
",",
"fcntl",
".",
"F_SETFL",
",",
"o_fl",
"|",
"os",
".",
"O_NONBLOCK",
")",
"try",
":",
"_buffer",
"=",
"output",
".",
"read",
"(",
")",
"except",
"Exception",
":",
"# pylint: disable=broad-except",
"pass",
"return",
"_buffer"
] |
Try to read a file descriptor in a non blocking mode
If the fcntl is available (unix only) we try to read in a
asynchronous mode, so we won't block the PIPE at 64K buffer
(deadlock...)
:param output: file or socket to read from
:type output: file
:return: data read from fd
:rtype: str
|
[
"Try",
"to",
"read",
"a",
"file",
"descriptor",
"in",
"a",
"non",
"blocking",
"mode"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/action.py#L106-L130
|
train
|
Alignak-monitoring/alignak
|
alignak/action.py
|
ActionBase.get_local_environnement
|
def get_local_environnement(self):
"""
Mix the environment and the environment variables into a new local
environment dictionary
Note: We cannot just update the global os.environ because this
would effect all other checks.
:return: local environment variables
:rtype: dict
"""
# Do not use copy.copy() here, as the resulting copy still
# changes the real environment (it is still a os._Environment
# instance).
local_env = os.environ.copy()
for local_var in self.env:
local_env[local_var] = self.env[local_var]
return local_env
|
python
|
def get_local_environnement(self):
"""
Mix the environment and the environment variables into a new local
environment dictionary
Note: We cannot just update the global os.environ because this
would effect all other checks.
:return: local environment variables
:rtype: dict
"""
# Do not use copy.copy() here, as the resulting copy still
# changes the real environment (it is still a os._Environment
# instance).
local_env = os.environ.copy()
for local_var in self.env:
local_env[local_var] = self.env[local_var]
return local_env
|
[
"def",
"get_local_environnement",
"(",
"self",
")",
":",
"# Do not use copy.copy() here, as the resulting copy still",
"# changes the real environment (it is still a os._Environment",
"# instance).",
"local_env",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"for",
"local_var",
"in",
"self",
".",
"env",
":",
"local_env",
"[",
"local_var",
"]",
"=",
"self",
".",
"env",
"[",
"local_var",
"]",
"return",
"local_env"
] |
Mix the environment and the environment variables into a new local
environment dictionary
Note: We cannot just update the global os.environ because this
would effect all other checks.
:return: local environment variables
:rtype: dict
|
[
"Mix",
"the",
"environment",
"and",
"the",
"environment",
"variables",
"into",
"a",
"new",
"local",
"environment",
"dictionary"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/action.py#L245-L262
|
train
|
Alignak-monitoring/alignak
|
alignak/action.py
|
ActionBase.execute
|
def execute(self):
"""Start this action command in a subprocess.
:raise: ActionError
'toomanyopenfiles' if too many opened files on the system
'no_process_launched' if arguments parsing failed
'process_launch_failed': if the process launch failed
:return: reference to the started process
:rtype: psutil.Process
"""
self.status = ACT_STATUS_LAUNCHED
self.check_time = time.time()
self.wait_time = 0.0001
self.last_poll = self.check_time
# Get a local env variables with our additional values
self.local_env = self.get_local_environnement()
# Initialize stdout and stderr.
self.stdoutdata = ''
self.stderrdata = ''
logger.debug("Launch command: '%s', ref: %s, timeout: %s",
self.command, self.ref, self.timeout)
if self.log_actions:
if os.environ['ALIGNAK_LOG_ACTIONS'] == 'WARNING':
logger.warning("Launch command: '%s'", self.command)
else:
logger.info("Launch command: '%s'", self.command)
return self._execute()
|
python
|
def execute(self):
"""Start this action command in a subprocess.
:raise: ActionError
'toomanyopenfiles' if too many opened files on the system
'no_process_launched' if arguments parsing failed
'process_launch_failed': if the process launch failed
:return: reference to the started process
:rtype: psutil.Process
"""
self.status = ACT_STATUS_LAUNCHED
self.check_time = time.time()
self.wait_time = 0.0001
self.last_poll = self.check_time
# Get a local env variables with our additional values
self.local_env = self.get_local_environnement()
# Initialize stdout and stderr.
self.stdoutdata = ''
self.stderrdata = ''
logger.debug("Launch command: '%s', ref: %s, timeout: %s",
self.command, self.ref, self.timeout)
if self.log_actions:
if os.environ['ALIGNAK_LOG_ACTIONS'] == 'WARNING':
logger.warning("Launch command: '%s'", self.command)
else:
logger.info("Launch command: '%s'", self.command)
return self._execute()
|
[
"def",
"execute",
"(",
"self",
")",
":",
"self",
".",
"status",
"=",
"ACT_STATUS_LAUNCHED",
"self",
".",
"check_time",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"wait_time",
"=",
"0.0001",
"self",
".",
"last_poll",
"=",
"self",
".",
"check_time",
"# Get a local env variables with our additional values",
"self",
".",
"local_env",
"=",
"self",
".",
"get_local_environnement",
"(",
")",
"# Initialize stdout and stderr.",
"self",
".",
"stdoutdata",
"=",
"''",
"self",
".",
"stderrdata",
"=",
"''",
"logger",
".",
"debug",
"(",
"\"Launch command: '%s', ref: %s, timeout: %s\"",
",",
"self",
".",
"command",
",",
"self",
".",
"ref",
",",
"self",
".",
"timeout",
")",
"if",
"self",
".",
"log_actions",
":",
"if",
"os",
".",
"environ",
"[",
"'ALIGNAK_LOG_ACTIONS'",
"]",
"==",
"'WARNING'",
":",
"logger",
".",
"warning",
"(",
"\"Launch command: '%s'\"",
",",
"self",
".",
"command",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Launch command: '%s'\"",
",",
"self",
".",
"command",
")",
"return",
"self",
".",
"_execute",
"(",
")"
] |
Start this action command in a subprocess.
:raise: ActionError
'toomanyopenfiles' if too many opened files on the system
'no_process_launched' if arguments parsing failed
'process_launch_failed': if the process launch failed
:return: reference to the started process
:rtype: psutil.Process
|
[
"Start",
"this",
"action",
"command",
"in",
"a",
"subprocess",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/action.py#L264-L295
|
train
|
Alignak-monitoring/alignak
|
alignak/action.py
|
ActionBase.copy_shell__
|
def copy_shell__(self, new_i):
"""Create all attributes listed in 'ONLY_COPY_PROP' and return `self` with these attributes.
:param new_i: object to
:type new_i: object
:return: object with new properties added
:rtype: object
"""
for prop in ONLY_COPY_PROP:
setattr(new_i, prop, getattr(self, prop))
return new_i
|
python
|
def copy_shell__(self, new_i):
"""Create all attributes listed in 'ONLY_COPY_PROP' and return `self` with these attributes.
:param new_i: object to
:type new_i: object
:return: object with new properties added
:rtype: object
"""
for prop in ONLY_COPY_PROP:
setattr(new_i, prop, getattr(self, prop))
return new_i
|
[
"def",
"copy_shell__",
"(",
"self",
",",
"new_i",
")",
":",
"for",
"prop",
"in",
"ONLY_COPY_PROP",
":",
"setattr",
"(",
"new_i",
",",
"prop",
",",
"getattr",
"(",
"self",
",",
"prop",
")",
")",
"return",
"new_i"
] |
Create all attributes listed in 'ONLY_COPY_PROP' and return `self` with these attributes.
:param new_i: object to
:type new_i: object
:return: object with new properties added
:rtype: object
|
[
"Create",
"all",
"attributes",
"listed",
"in",
"ONLY_COPY_PROP",
"and",
"return",
"self",
"with",
"these",
"attributes",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/action.py#L507-L517
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/contactgroup.py
|
Contactgroup.get_contacts_by_explosion
|
def get_contacts_by_explosion(self, contactgroups):
# pylint: disable=access-member-before-definition
"""
Get contacts of this group
:param contactgroups: Contactgroups object, use to look for a specific one
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:return: list of contact of this group
:rtype: list[alignak.objects.contact.Contact]
"""
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_exploded = True
# Now the recursive part
# rec_tag is set to False every CG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[contactgroup::%s] got a loop in contactgroup definition",
self.get_name())
if hasattr(self, 'members'):
return self.members
return ''
# Ok, not a loop, we tag it and continue
self.rec_tag = True
cg_mbrs = self.get_contactgroup_members()
for cg_mbr in cg_mbrs:
contactgroup = contactgroups.find_by_name(cg_mbr.strip())
if contactgroup is not None:
value = contactgroup.get_contacts_by_explosion(contactgroups)
if value is not None:
self.add_members(value)
if hasattr(self, 'members'):
return self.members
return ''
|
python
|
def get_contacts_by_explosion(self, contactgroups):
# pylint: disable=access-member-before-definition
"""
Get contacts of this group
:param contactgroups: Contactgroups object, use to look for a specific one
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:return: list of contact of this group
:rtype: list[alignak.objects.contact.Contact]
"""
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_exploded = True
# Now the recursive part
# rec_tag is set to False every CG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[contactgroup::%s] got a loop in contactgroup definition",
self.get_name())
if hasattr(self, 'members'):
return self.members
return ''
# Ok, not a loop, we tag it and continue
self.rec_tag = True
cg_mbrs = self.get_contactgroup_members()
for cg_mbr in cg_mbrs:
contactgroup = contactgroups.find_by_name(cg_mbr.strip())
if contactgroup is not None:
value = contactgroup.get_contacts_by_explosion(contactgroups)
if value is not None:
self.add_members(value)
if hasattr(self, 'members'):
return self.members
return ''
|
[
"def",
"get_contacts_by_explosion",
"(",
"self",
",",
"contactgroups",
")",
":",
"# pylint: disable=access-member-before-definition",
"# First we tag the hg so it will not be explode",
"# if a son of it already call it",
"self",
".",
"already_exploded",
"=",
"True",
"# Now the recursive part",
"# rec_tag is set to False every CG we explode",
"# so if True here, it must be a loop in HG",
"# calls... not GOOD!",
"if",
"self",
".",
"rec_tag",
":",
"logger",
".",
"error",
"(",
"\"[contactgroup::%s] got a loop in contactgroup definition\"",
",",
"self",
".",
"get_name",
"(",
")",
")",
"if",
"hasattr",
"(",
"self",
",",
"'members'",
")",
":",
"return",
"self",
".",
"members",
"return",
"''",
"# Ok, not a loop, we tag it and continue",
"self",
".",
"rec_tag",
"=",
"True",
"cg_mbrs",
"=",
"self",
".",
"get_contactgroup_members",
"(",
")",
"for",
"cg_mbr",
"in",
"cg_mbrs",
":",
"contactgroup",
"=",
"contactgroups",
".",
"find_by_name",
"(",
"cg_mbr",
".",
"strip",
"(",
")",
")",
"if",
"contactgroup",
"is",
"not",
"None",
":",
"value",
"=",
"contactgroup",
".",
"get_contacts_by_explosion",
"(",
"contactgroups",
")",
"if",
"value",
"is",
"not",
"None",
":",
"self",
".",
"add_members",
"(",
"value",
")",
"if",
"hasattr",
"(",
"self",
",",
"'members'",
")",
":",
"return",
"self",
".",
"members",
"return",
"''"
] |
Get contacts of this group
:param contactgroups: Contactgroups object, use to look for a specific one
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:return: list of contact of this group
:rtype: list[alignak.objects.contact.Contact]
|
[
"Get",
"contacts",
"of",
"this",
"group"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/contactgroup.py#L110-L148
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/contactgroup.py
|
Contactgroups.add_member
|
def add_member(self, contact_name, contactgroup_name):
"""Add a contact string to a contact member
if the contact group do not exist, create it
:param contact_name: contact name
:type contact_name: str
:param contactgroup_name: contact group name
:type contactgroup_name: str
:return: None
"""
contactgroup = self.find_by_name(contactgroup_name)
if not contactgroup:
contactgroup = Contactgroup({'contactgroup_name': contactgroup_name,
'alias': contactgroup_name,
'members': contact_name})
self.add_contactgroup(contactgroup)
else:
contactgroup.add_members(contact_name)
|
python
|
def add_member(self, contact_name, contactgroup_name):
"""Add a contact string to a contact member
if the contact group do not exist, create it
:param contact_name: contact name
:type contact_name: str
:param contactgroup_name: contact group name
:type contactgroup_name: str
:return: None
"""
contactgroup = self.find_by_name(contactgroup_name)
if not contactgroup:
contactgroup = Contactgroup({'contactgroup_name': contactgroup_name,
'alias': contactgroup_name,
'members': contact_name})
self.add_contactgroup(contactgroup)
else:
contactgroup.add_members(contact_name)
|
[
"def",
"add_member",
"(",
"self",
",",
"contact_name",
",",
"contactgroup_name",
")",
":",
"contactgroup",
"=",
"self",
".",
"find_by_name",
"(",
"contactgroup_name",
")",
"if",
"not",
"contactgroup",
":",
"contactgroup",
"=",
"Contactgroup",
"(",
"{",
"'contactgroup_name'",
":",
"contactgroup_name",
",",
"'alias'",
":",
"contactgroup_name",
",",
"'members'",
":",
"contact_name",
"}",
")",
"self",
".",
"add_contactgroup",
"(",
"contactgroup",
")",
"else",
":",
"contactgroup",
".",
"add_members",
"(",
"contact_name",
")"
] |
Add a contact string to a contact member
if the contact group do not exist, create it
:param contact_name: contact name
:type contact_name: str
:param contactgroup_name: contact group name
:type contactgroup_name: str
:return: None
|
[
"Add",
"a",
"contact",
"string",
"to",
"a",
"contact",
"member",
"if",
"the",
"contact",
"group",
"do",
"not",
"exist",
"create",
"it"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/contactgroup.py#L159-L176
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/contactgroup.py
|
Contactgroups.linkify_contactgroups_contacts
|
def linkify_contactgroups_contacts(self, contacts):
"""Link the contacts with contactgroups
:param contacts: realms object to link with
:type contacts: alignak.objects.contact.Contacts
:return: None
"""
for contactgroup in self:
mbrs = contactgroup.get_contacts()
# The new member list, in id
new_mbrs = []
for mbr in mbrs:
mbr = mbr.strip() # protect with strip at the beginning so don't care about spaces
if mbr == '': # void entry, skip this
continue
member = contacts.find_by_name(mbr)
# Maybe the contact is missing, if so, must be put in unknown_members
if member is not None:
new_mbrs.append(member.uuid)
else:
contactgroup.add_unknown_members(mbr)
# Make members uniq
new_mbrs = list(set(new_mbrs))
# We find the id, we replace the names
contactgroup.replace_members(new_mbrs)
|
python
|
def linkify_contactgroups_contacts(self, contacts):
"""Link the contacts with contactgroups
:param contacts: realms object to link with
:type contacts: alignak.objects.contact.Contacts
:return: None
"""
for contactgroup in self:
mbrs = contactgroup.get_contacts()
# The new member list, in id
new_mbrs = []
for mbr in mbrs:
mbr = mbr.strip() # protect with strip at the beginning so don't care about spaces
if mbr == '': # void entry, skip this
continue
member = contacts.find_by_name(mbr)
# Maybe the contact is missing, if so, must be put in unknown_members
if member is not None:
new_mbrs.append(member.uuid)
else:
contactgroup.add_unknown_members(mbr)
# Make members uniq
new_mbrs = list(set(new_mbrs))
# We find the id, we replace the names
contactgroup.replace_members(new_mbrs)
|
[
"def",
"linkify_contactgroups_contacts",
"(",
"self",
",",
"contacts",
")",
":",
"for",
"contactgroup",
"in",
"self",
":",
"mbrs",
"=",
"contactgroup",
".",
"get_contacts",
"(",
")",
"# The new member list, in id",
"new_mbrs",
"=",
"[",
"]",
"for",
"mbr",
"in",
"mbrs",
":",
"mbr",
"=",
"mbr",
".",
"strip",
"(",
")",
"# protect with strip at the beginning so don't care about spaces",
"if",
"mbr",
"==",
"''",
":",
"# void entry, skip this",
"continue",
"member",
"=",
"contacts",
".",
"find_by_name",
"(",
"mbr",
")",
"# Maybe the contact is missing, if so, must be put in unknown_members",
"if",
"member",
"is",
"not",
"None",
":",
"new_mbrs",
".",
"append",
"(",
"member",
".",
"uuid",
")",
"else",
":",
"contactgroup",
".",
"add_unknown_members",
"(",
"mbr",
")",
"# Make members uniq",
"new_mbrs",
"=",
"list",
"(",
"set",
"(",
"new_mbrs",
")",
")",
"# We find the id, we replace the names",
"contactgroup",
".",
"replace_members",
"(",
"new_mbrs",
")"
] |
Link the contacts with contactgroups
:param contacts: realms object to link with
:type contacts: alignak.objects.contact.Contacts
:return: None
|
[
"Link",
"the",
"contacts",
"with",
"contactgroups"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/contactgroup.py#L212-L239
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/contactgroup.py
|
Contactgroups.explode
|
def explode(self):
"""
Fill members with contactgroup_members
:return:None
"""
# We do not want a same hg to be explode again and again
# so we tag it
for tmp_cg in list(self.items.values()):
tmp_cg.already_exploded = False
for contactgroup in list(self.items.values()):
if contactgroup.already_exploded:
continue
# get_contacts_by_explosion is a recursive
# function, so we must tag hg so we do not loop
for tmp_cg in list(self.items.values()):
tmp_cg.rec_tag = False
contactgroup.get_contacts_by_explosion(self)
# We clean the tags
for tmp_cg in list(self.items.values()):
if hasattr(tmp_cg, 'rec_tag'):
del tmp_cg.rec_tag
del tmp_cg.already_exploded
|
python
|
def explode(self):
"""
Fill members with contactgroup_members
:return:None
"""
# We do not want a same hg to be explode again and again
# so we tag it
for tmp_cg in list(self.items.values()):
tmp_cg.already_exploded = False
for contactgroup in list(self.items.values()):
if contactgroup.already_exploded:
continue
# get_contacts_by_explosion is a recursive
# function, so we must tag hg so we do not loop
for tmp_cg in list(self.items.values()):
tmp_cg.rec_tag = False
contactgroup.get_contacts_by_explosion(self)
# We clean the tags
for tmp_cg in list(self.items.values()):
if hasattr(tmp_cg, 'rec_tag'):
del tmp_cg.rec_tag
del tmp_cg.already_exploded
|
[
"def",
"explode",
"(",
"self",
")",
":",
"# We do not want a same hg to be explode again and again",
"# so we tag it",
"for",
"tmp_cg",
"in",
"list",
"(",
"self",
".",
"items",
".",
"values",
"(",
")",
")",
":",
"tmp_cg",
".",
"already_exploded",
"=",
"False",
"for",
"contactgroup",
"in",
"list",
"(",
"self",
".",
"items",
".",
"values",
"(",
")",
")",
":",
"if",
"contactgroup",
".",
"already_exploded",
":",
"continue",
"# get_contacts_by_explosion is a recursive",
"# function, so we must tag hg so we do not loop",
"for",
"tmp_cg",
"in",
"list",
"(",
"self",
".",
"items",
".",
"values",
"(",
")",
")",
":",
"tmp_cg",
".",
"rec_tag",
"=",
"False",
"contactgroup",
".",
"get_contacts_by_explosion",
"(",
"self",
")",
"# We clean the tags",
"for",
"tmp_cg",
"in",
"list",
"(",
"self",
".",
"items",
".",
"values",
"(",
")",
")",
":",
"if",
"hasattr",
"(",
"tmp_cg",
",",
"'rec_tag'",
")",
":",
"del",
"tmp_cg",
".",
"rec_tag",
"del",
"tmp_cg",
".",
"already_exploded"
] |
Fill members with contactgroup_members
:return:None
|
[
"Fill",
"members",
"with",
"contactgroup_members"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/contactgroup.py#L241-L266
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.add_flapping_change
|
def add_flapping_change(self, sample):
"""Add a flapping sample and keep cls.flap_history samples
:param sample: Sample to add
:type sample: bool
:return: None
"""
cls = self.__class__
# If this element is not in flapping check, or
# the flapping is globally disable, bailout
if not self.flap_detection_enabled or not cls.enable_flap_detection:
return
self.flapping_changes.append(sample)
# Keep just 20 changes (global flap_history value)
flap_history = cls.flap_history
if len(self.flapping_changes) > flap_history:
self.flapping_changes.pop(0)
|
python
|
def add_flapping_change(self, sample):
"""Add a flapping sample and keep cls.flap_history samples
:param sample: Sample to add
:type sample: bool
:return: None
"""
cls = self.__class__
# If this element is not in flapping check, or
# the flapping is globally disable, bailout
if not self.flap_detection_enabled or not cls.enable_flap_detection:
return
self.flapping_changes.append(sample)
# Keep just 20 changes (global flap_history value)
flap_history = cls.flap_history
if len(self.flapping_changes) > flap_history:
self.flapping_changes.pop(0)
|
[
"def",
"add_flapping_change",
"(",
"self",
",",
"sample",
")",
":",
"cls",
"=",
"self",
".",
"__class__",
"# If this element is not in flapping check, or",
"# the flapping is globally disable, bailout",
"if",
"not",
"self",
".",
"flap_detection_enabled",
"or",
"not",
"cls",
".",
"enable_flap_detection",
":",
"return",
"self",
".",
"flapping_changes",
".",
"append",
"(",
"sample",
")",
"# Keep just 20 changes (global flap_history value)",
"flap_history",
"=",
"cls",
".",
"flap_history",
"if",
"len",
"(",
"self",
".",
"flapping_changes",
")",
">",
"flap_history",
":",
"self",
".",
"flapping_changes",
".",
"pop",
"(",
"0",
")"
] |
Add a flapping sample and keep cls.flap_history samples
:param sample: Sample to add
:type sample: bool
:return: None
|
[
"Add",
"a",
"flapping",
"sample",
"and",
"keep",
"cls",
".",
"flap_history",
"samples"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L550-L570
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.add_attempt
|
def add_attempt(self):
"""Add an attempt when a object is a non-ok state
:return: None
"""
self.attempt += 1
self.attempt = min(self.attempt, self.max_check_attempts)
|
python
|
def add_attempt(self):
"""Add an attempt when a object is a non-ok state
:return: None
"""
self.attempt += 1
self.attempt = min(self.attempt, self.max_check_attempts)
|
[
"def",
"add_attempt",
"(",
"self",
")",
":",
"self",
".",
"attempt",
"+=",
"1",
"self",
".",
"attempt",
"=",
"min",
"(",
"self",
".",
"attempt",
",",
"self",
".",
"max_check_attempts",
")"
] |
Add an attempt when a object is a non-ok state
:return: None
|
[
"Add",
"an",
"attempt",
"when",
"a",
"object",
"is",
"a",
"non",
"-",
"ok",
"state"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L637-L643
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.do_check_freshness
|
def do_check_freshness(self, hosts, services, timeperiods, macromodulations, checkmodulations,
checks, when):
# pylint: disable=too-many-nested-blocks, too-many-branches
"""Check freshness and schedule a check now if necessary.
This function is called by the scheduler if Alignak is configured to check the freshness.
It is called for hosts that have the freshness check enabled if they are only
passively checked.
It is called for services that have the freshness check enabled if they are only
passively checked and if their depending host is not in a freshness expired state
(freshness_expiry = True).
A log is raised when the freshess expiry is detected and the item is set as
freshness_expiry.
:param hosts: hosts objects, used to launch checks
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used launch checks
:type services: alignak.objects.service.Services
:param timeperiods: Timeperiods objects, used to get check_period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param macromodulations: Macro modulations objects, used in commands (notif, check)
:type macromodulations: alignak.objects.macromodulation.Macromodulations
:param checkmodulations: Checkmodulations objects, used to change check command if necessary
:type checkmodulations: alignak.objects.checkmodulation.Checkmodulations
:param checks: checks dict, used to get checks_in_progress for the object
:type checks: dict
:return: A check or None
:rtype: None | object
"""
now = when
# Before, check if class (host or service) have check_freshness OK
# Then check if item want freshness, then check freshness
cls = self.__class__
if not self.in_checking and self.freshness_threshold and not self.freshness_expired:
# logger.debug("Checking freshness for %s, last state update: %s, now: %s.",
# self.get_full_name(), self.last_state_update, now)
if os.getenv('ALIGNAK_LOG_CHECKS', None):
logger.info("--ALC-- -> checking freshness for: %s", self.get_full_name())
# If we never checked this item, we begin the freshness period
if not self.last_state_update:
self.last_state_update = int(now)
if self.last_state_update < now - \
(self.freshness_threshold + cls.additional_freshness_latency):
timeperiod = timeperiods[self.check_period]
if timeperiod is None or timeperiod.is_time_valid(now):
# Create a new check for the scheduler
chk = self.launch_check(now, hosts, services, timeperiods,
macromodulations, checkmodulations, checks)
if not chk:
logger.warning("No raised freshness check for: %s", self)
return None
chk.freshness_expiry_check = True
chk.check_time = time.time()
chk.output = "Freshness period expired: %s" % (
datetime.utcfromtimestamp(int(chk.check_time)).strftime(
"%Y-%m-%d %H:%M:%S %Z"))
if self.my_type == 'host':
if self.freshness_state == 'o':
chk.exit_status = 0
elif self.freshness_state == 'd':
chk.exit_status = 2
elif self.freshness_state in ['u', 'x']:
chk.exit_status = 4
else:
chk.exit_status = 3
else:
if self.freshness_state == 'o':
chk.exit_status = 0
elif self.freshness_state == 'w':
chk.exit_status = 1
elif self.freshness_state == 'c':
chk.exit_status = 2
elif self.freshness_state == 'u':
chk.exit_status = 3
elif self.freshness_state == 'x':
chk.exit_status = 4
else:
chk.exit_status = 3
return chk
else:
logger.debug("Ignored freshness check for %s, because "
"we are not in the check period.", self.get_full_name())
return None
|
python
|
def do_check_freshness(self, hosts, services, timeperiods, macromodulations, checkmodulations,
checks, when):
# pylint: disable=too-many-nested-blocks, too-many-branches
"""Check freshness and schedule a check now if necessary.
This function is called by the scheduler if Alignak is configured to check the freshness.
It is called for hosts that have the freshness check enabled if they are only
passively checked.
It is called for services that have the freshness check enabled if they are only
passively checked and if their depending host is not in a freshness expired state
(freshness_expiry = True).
A log is raised when the freshess expiry is detected and the item is set as
freshness_expiry.
:param hosts: hosts objects, used to launch checks
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used launch checks
:type services: alignak.objects.service.Services
:param timeperiods: Timeperiods objects, used to get check_period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param macromodulations: Macro modulations objects, used in commands (notif, check)
:type macromodulations: alignak.objects.macromodulation.Macromodulations
:param checkmodulations: Checkmodulations objects, used to change check command if necessary
:type checkmodulations: alignak.objects.checkmodulation.Checkmodulations
:param checks: checks dict, used to get checks_in_progress for the object
:type checks: dict
:return: A check or None
:rtype: None | object
"""
now = when
# Before, check if class (host or service) have check_freshness OK
# Then check if item want freshness, then check freshness
cls = self.__class__
if not self.in_checking and self.freshness_threshold and not self.freshness_expired:
# logger.debug("Checking freshness for %s, last state update: %s, now: %s.",
# self.get_full_name(), self.last_state_update, now)
if os.getenv('ALIGNAK_LOG_CHECKS', None):
logger.info("--ALC-- -> checking freshness for: %s", self.get_full_name())
# If we never checked this item, we begin the freshness period
if not self.last_state_update:
self.last_state_update = int(now)
if self.last_state_update < now - \
(self.freshness_threshold + cls.additional_freshness_latency):
timeperiod = timeperiods[self.check_period]
if timeperiod is None or timeperiod.is_time_valid(now):
# Create a new check for the scheduler
chk = self.launch_check(now, hosts, services, timeperiods,
macromodulations, checkmodulations, checks)
if not chk:
logger.warning("No raised freshness check for: %s", self)
return None
chk.freshness_expiry_check = True
chk.check_time = time.time()
chk.output = "Freshness period expired: %s" % (
datetime.utcfromtimestamp(int(chk.check_time)).strftime(
"%Y-%m-%d %H:%M:%S %Z"))
if self.my_type == 'host':
if self.freshness_state == 'o':
chk.exit_status = 0
elif self.freshness_state == 'd':
chk.exit_status = 2
elif self.freshness_state in ['u', 'x']:
chk.exit_status = 4
else:
chk.exit_status = 3
else:
if self.freshness_state == 'o':
chk.exit_status = 0
elif self.freshness_state == 'w':
chk.exit_status = 1
elif self.freshness_state == 'c':
chk.exit_status = 2
elif self.freshness_state == 'u':
chk.exit_status = 3
elif self.freshness_state == 'x':
chk.exit_status = 4
else:
chk.exit_status = 3
return chk
else:
logger.debug("Ignored freshness check for %s, because "
"we are not in the check period.", self.get_full_name())
return None
|
[
"def",
"do_check_freshness",
"(",
"self",
",",
"hosts",
",",
"services",
",",
"timeperiods",
",",
"macromodulations",
",",
"checkmodulations",
",",
"checks",
",",
"when",
")",
":",
"# pylint: disable=too-many-nested-blocks, too-many-branches",
"now",
"=",
"when",
"# Before, check if class (host or service) have check_freshness OK",
"# Then check if item want freshness, then check freshness",
"cls",
"=",
"self",
".",
"__class__",
"if",
"not",
"self",
".",
"in_checking",
"and",
"self",
".",
"freshness_threshold",
"and",
"not",
"self",
".",
"freshness_expired",
":",
"# logger.debug(\"Checking freshness for %s, last state update: %s, now: %s.\",",
"# self.get_full_name(), self.last_state_update, now)",
"if",
"os",
".",
"getenv",
"(",
"'ALIGNAK_LOG_CHECKS'",
",",
"None",
")",
":",
"logger",
".",
"info",
"(",
"\"--ALC-- -> checking freshness for: %s\"",
",",
"self",
".",
"get_full_name",
"(",
")",
")",
"# If we never checked this item, we begin the freshness period",
"if",
"not",
"self",
".",
"last_state_update",
":",
"self",
".",
"last_state_update",
"=",
"int",
"(",
"now",
")",
"if",
"self",
".",
"last_state_update",
"<",
"now",
"-",
"(",
"self",
".",
"freshness_threshold",
"+",
"cls",
".",
"additional_freshness_latency",
")",
":",
"timeperiod",
"=",
"timeperiods",
"[",
"self",
".",
"check_period",
"]",
"if",
"timeperiod",
"is",
"None",
"or",
"timeperiod",
".",
"is_time_valid",
"(",
"now",
")",
":",
"# Create a new check for the scheduler",
"chk",
"=",
"self",
".",
"launch_check",
"(",
"now",
",",
"hosts",
",",
"services",
",",
"timeperiods",
",",
"macromodulations",
",",
"checkmodulations",
",",
"checks",
")",
"if",
"not",
"chk",
":",
"logger",
".",
"warning",
"(",
"\"No raised freshness check for: %s\"",
",",
"self",
")",
"return",
"None",
"chk",
".",
"freshness_expiry_check",
"=",
"True",
"chk",
".",
"check_time",
"=",
"time",
".",
"time",
"(",
")",
"chk",
".",
"output",
"=",
"\"Freshness period expired: %s\"",
"%",
"(",
"datetime",
".",
"utcfromtimestamp",
"(",
"int",
"(",
"chk",
".",
"check_time",
")",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S %Z\"",
")",
")",
"if",
"self",
".",
"my_type",
"==",
"'host'",
":",
"if",
"self",
".",
"freshness_state",
"==",
"'o'",
":",
"chk",
".",
"exit_status",
"=",
"0",
"elif",
"self",
".",
"freshness_state",
"==",
"'d'",
":",
"chk",
".",
"exit_status",
"=",
"2",
"elif",
"self",
".",
"freshness_state",
"in",
"[",
"'u'",
",",
"'x'",
"]",
":",
"chk",
".",
"exit_status",
"=",
"4",
"else",
":",
"chk",
".",
"exit_status",
"=",
"3",
"else",
":",
"if",
"self",
".",
"freshness_state",
"==",
"'o'",
":",
"chk",
".",
"exit_status",
"=",
"0",
"elif",
"self",
".",
"freshness_state",
"==",
"'w'",
":",
"chk",
".",
"exit_status",
"=",
"1",
"elif",
"self",
".",
"freshness_state",
"==",
"'c'",
":",
"chk",
".",
"exit_status",
"=",
"2",
"elif",
"self",
".",
"freshness_state",
"==",
"'u'",
":",
"chk",
".",
"exit_status",
"=",
"3",
"elif",
"self",
".",
"freshness_state",
"==",
"'x'",
":",
"chk",
".",
"exit_status",
"=",
"4",
"else",
":",
"chk",
".",
"exit_status",
"=",
"3",
"return",
"chk",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Ignored freshness check for %s, because \"",
"\"we are not in the check period.\"",
",",
"self",
".",
"get_full_name",
"(",
")",
")",
"return",
"None"
] |
Check freshness and schedule a check now if necessary.
This function is called by the scheduler if Alignak is configured to check the freshness.
It is called for hosts that have the freshness check enabled if they are only
passively checked.
It is called for services that have the freshness check enabled if they are only
passively checked and if their depending host is not in a freshness expired state
(freshness_expiry = True).
A log is raised when the freshess expiry is detected and the item is set as
freshness_expiry.
:param hosts: hosts objects, used to launch checks
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used launch checks
:type services: alignak.objects.service.Services
:param timeperiods: Timeperiods objects, used to get check_period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param macromodulations: Macro modulations objects, used in commands (notif, check)
:type macromodulations: alignak.objects.macromodulation.Macromodulations
:param checkmodulations: Checkmodulations objects, used to change check command if necessary
:type checkmodulations: alignak.objects.checkmodulation.Checkmodulations
:param checks: checks dict, used to get checks_in_progress for the object
:type checks: dict
:return: A check or None
:rtype: None | object
|
[
"Check",
"freshness",
"and",
"schedule",
"a",
"check",
"now",
"if",
"necessary",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L653-L740
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.update_business_impact_value
|
def update_business_impact_value(self, hosts, services, timeperiods, bi_modulations):
"""We update our 'business_impact' value with the max of
the impacts business_impact if we got impacts. And save our 'configuration'
business_impact if we do not have do it before
If we do not have impacts, we revert our value
:param hosts: hosts objects, used to get impacts
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get impacts
:type services: alignak.objects.service.Services
:param timeperiods: Timeperiods objects, used to get modulation_period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param bi_modulations: business impact modulations objects
:type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations
:return: None
TODO: SchedulingItem object should not handle other schedulingitem obj.
We should call obj.register* on both obj.
This is 'Java' style
"""
# First save our business_impact if not already do
if self.my_own_business_impact == -1:
self.my_own_business_impact = self.business_impact
# We look at our crit modulations. If one apply, we take apply it
# and it's done
in_modulation = False
for bi_modulation_id in self.business_impact_modulations:
bi_modulation = bi_modulations[bi_modulation_id]
now = time.time()
period = timeperiods[bi_modulation.modulation_period]
if period is None or period.is_time_valid(now):
self.business_impact = bi_modulation.business_impact
in_modulation = True
# We apply the first available, that's all
break
# If we truly have impacts, we get the max business_impact
# if it's huge than ourselves
if self.impacts:
bp_impacts = [hosts[elem].business_impact for elem in self.impacts if elem in hosts]
bp_impacts.extend([services[elem].business_impact for elem in self.impacts
if elem in services])
self.business_impact = max(self.business_impact, max(bp_impacts))
return
# If we are not a problem, we setup our own_crit if we are not in a
# modulation period
if self.my_own_business_impact != -1 and not in_modulation:
self.business_impact = self.my_own_business_impact
|
python
|
def update_business_impact_value(self, hosts, services, timeperiods, bi_modulations):
"""We update our 'business_impact' value with the max of
the impacts business_impact if we got impacts. And save our 'configuration'
business_impact if we do not have do it before
If we do not have impacts, we revert our value
:param hosts: hosts objects, used to get impacts
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get impacts
:type services: alignak.objects.service.Services
:param timeperiods: Timeperiods objects, used to get modulation_period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param bi_modulations: business impact modulations objects
:type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations
:return: None
TODO: SchedulingItem object should not handle other schedulingitem obj.
We should call obj.register* on both obj.
This is 'Java' style
"""
# First save our business_impact if not already do
if self.my_own_business_impact == -1:
self.my_own_business_impact = self.business_impact
# We look at our crit modulations. If one apply, we take apply it
# and it's done
in_modulation = False
for bi_modulation_id in self.business_impact_modulations:
bi_modulation = bi_modulations[bi_modulation_id]
now = time.time()
period = timeperiods[bi_modulation.modulation_period]
if period is None or period.is_time_valid(now):
self.business_impact = bi_modulation.business_impact
in_modulation = True
# We apply the first available, that's all
break
# If we truly have impacts, we get the max business_impact
# if it's huge than ourselves
if self.impacts:
bp_impacts = [hosts[elem].business_impact for elem in self.impacts if elem in hosts]
bp_impacts.extend([services[elem].business_impact for elem in self.impacts
if elem in services])
self.business_impact = max(self.business_impact, max(bp_impacts))
return
# If we are not a problem, we setup our own_crit if we are not in a
# modulation period
if self.my_own_business_impact != -1 and not in_modulation:
self.business_impact = self.my_own_business_impact
|
[
"def",
"update_business_impact_value",
"(",
"self",
",",
"hosts",
",",
"services",
",",
"timeperiods",
",",
"bi_modulations",
")",
":",
"# First save our business_impact if not already do",
"if",
"self",
".",
"my_own_business_impact",
"==",
"-",
"1",
":",
"self",
".",
"my_own_business_impact",
"=",
"self",
".",
"business_impact",
"# We look at our crit modulations. If one apply, we take apply it",
"# and it's done",
"in_modulation",
"=",
"False",
"for",
"bi_modulation_id",
"in",
"self",
".",
"business_impact_modulations",
":",
"bi_modulation",
"=",
"bi_modulations",
"[",
"bi_modulation_id",
"]",
"now",
"=",
"time",
".",
"time",
"(",
")",
"period",
"=",
"timeperiods",
"[",
"bi_modulation",
".",
"modulation_period",
"]",
"if",
"period",
"is",
"None",
"or",
"period",
".",
"is_time_valid",
"(",
"now",
")",
":",
"self",
".",
"business_impact",
"=",
"bi_modulation",
".",
"business_impact",
"in_modulation",
"=",
"True",
"# We apply the first available, that's all",
"break",
"# If we truly have impacts, we get the max business_impact",
"# if it's huge than ourselves",
"if",
"self",
".",
"impacts",
":",
"bp_impacts",
"=",
"[",
"hosts",
"[",
"elem",
"]",
".",
"business_impact",
"for",
"elem",
"in",
"self",
".",
"impacts",
"if",
"elem",
"in",
"hosts",
"]",
"bp_impacts",
".",
"extend",
"(",
"[",
"services",
"[",
"elem",
"]",
".",
"business_impact",
"for",
"elem",
"in",
"self",
".",
"impacts",
"if",
"elem",
"in",
"services",
"]",
")",
"self",
".",
"business_impact",
"=",
"max",
"(",
"self",
".",
"business_impact",
",",
"max",
"(",
"bp_impacts",
")",
")",
"return",
"# If we are not a problem, we setup our own_crit if we are not in a",
"# modulation period",
"if",
"self",
".",
"my_own_business_impact",
"!=",
"-",
"1",
"and",
"not",
"in_modulation",
":",
"self",
".",
"business_impact",
"=",
"self",
".",
"my_own_business_impact"
] |
We update our 'business_impact' value with the max of
the impacts business_impact if we got impacts. And save our 'configuration'
business_impact if we do not have do it before
If we do not have impacts, we revert our value
:param hosts: hosts objects, used to get impacts
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get impacts
:type services: alignak.objects.service.Services
:param timeperiods: Timeperiods objects, used to get modulation_period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param bi_modulations: business impact modulations objects
:type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations
:return: None
TODO: SchedulingItem object should not handle other schedulingitem obj.
We should call obj.register* on both obj.
This is 'Java' style
|
[
"We",
"update",
"our",
"business_impact",
"value",
"with",
"the",
"max",
"of",
"the",
"impacts",
"business_impact",
"if",
"we",
"got",
"impacts",
".",
"And",
"save",
"our",
"configuration",
"business_impact",
"if",
"we",
"do",
"not",
"have",
"do",
"it",
"before",
"If",
"we",
"do",
"not",
"have",
"impacts",
"we",
"revert",
"our",
"value"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L796-L844
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.no_more_a_problem
|
def no_more_a_problem(self, hosts, services, timeperiods, bi_modulations):
"""Remove this objects as an impact for other schedulingitem.
:param hosts: hosts objects, used to get impacts
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get impacts
:type services: alignak.objects.service.Services
:param timeperiods: Timeperiods objects, used for update_business_impact_value
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param bi_modulations: business impact modulation are used when setting myself as problem
:type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations
:return: None
TODO: SchedulingItem object should not handle other schedulingitem obj.
We should call obj.register* on both obj.
This is 'Java' style
"""
was_pb = self.is_problem
if self.is_problem:
self.is_problem = False
# we warn impacts that we are no more a problem
for impact_id in self.impacts:
if impact_id in hosts:
impact = hosts[impact_id]
else:
impact = services[impact_id]
impact.unregister_a_problem(self)
# we can just drop our impacts list
self.impacts = []
# We update our business_impact value, it's not a huge thing :)
self.update_business_impact_value(hosts, services, timeperiods, bi_modulations)
# If we were a problem, we say to everyone
# our new status, with good business_impact value
if was_pb:
# And we register a new broks for update status
self.broks.append(self.get_update_status_brok())
|
python
|
def no_more_a_problem(self, hosts, services, timeperiods, bi_modulations):
"""Remove this objects as an impact for other schedulingitem.
:param hosts: hosts objects, used to get impacts
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get impacts
:type services: alignak.objects.service.Services
:param timeperiods: Timeperiods objects, used for update_business_impact_value
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param bi_modulations: business impact modulation are used when setting myself as problem
:type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations
:return: None
TODO: SchedulingItem object should not handle other schedulingitem obj.
We should call obj.register* on both obj.
This is 'Java' style
"""
was_pb = self.is_problem
if self.is_problem:
self.is_problem = False
# we warn impacts that we are no more a problem
for impact_id in self.impacts:
if impact_id in hosts:
impact = hosts[impact_id]
else:
impact = services[impact_id]
impact.unregister_a_problem(self)
# we can just drop our impacts list
self.impacts = []
# We update our business_impact value, it's not a huge thing :)
self.update_business_impact_value(hosts, services, timeperiods, bi_modulations)
# If we were a problem, we say to everyone
# our new status, with good business_impact value
if was_pb:
# And we register a new broks for update status
self.broks.append(self.get_update_status_brok())
|
[
"def",
"no_more_a_problem",
"(",
"self",
",",
"hosts",
",",
"services",
",",
"timeperiods",
",",
"bi_modulations",
")",
":",
"was_pb",
"=",
"self",
".",
"is_problem",
"if",
"self",
".",
"is_problem",
":",
"self",
".",
"is_problem",
"=",
"False",
"# we warn impacts that we are no more a problem",
"for",
"impact_id",
"in",
"self",
".",
"impacts",
":",
"if",
"impact_id",
"in",
"hosts",
":",
"impact",
"=",
"hosts",
"[",
"impact_id",
"]",
"else",
":",
"impact",
"=",
"services",
"[",
"impact_id",
"]",
"impact",
".",
"unregister_a_problem",
"(",
"self",
")",
"# we can just drop our impacts list",
"self",
".",
"impacts",
"=",
"[",
"]",
"# We update our business_impact value, it's not a huge thing :)",
"self",
".",
"update_business_impact_value",
"(",
"hosts",
",",
"services",
",",
"timeperiods",
",",
"bi_modulations",
")",
"# If we were a problem, we say to everyone",
"# our new status, with good business_impact value",
"if",
"was_pb",
":",
"# And we register a new broks for update status",
"self",
".",
"broks",
".",
"append",
"(",
"self",
".",
"get_update_status_brok",
"(",
")",
")"
] |
Remove this objects as an impact for other schedulingitem.
:param hosts: hosts objects, used to get impacts
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get impacts
:type services: alignak.objects.service.Services
:param timeperiods: Timeperiods objects, used for update_business_impact_value
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param bi_modulations: business impact modulation are used when setting myself as problem
:type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations
:return: None
TODO: SchedulingItem object should not handle other schedulingitem obj.
We should call obj.register* on both obj.
This is 'Java' style
|
[
"Remove",
"this",
"objects",
"as",
"an",
"impact",
"for",
"other",
"schedulingitem",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L846-L884
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.register_a_problem
|
def register_a_problem(self, prob, hosts, services, timeperiods, bi_modulations):
# pylint: disable=too-many-locals
"""Call recursively by potentials impacts so they
update their source_problems list. But do not
go below if the problem is not a real one for me
like If I've got multiple parents for examples
:param prob: problem to register
:type prob: alignak.objects.schedulingitem.SchedulingItem
:param hosts: hosts objects, used to get object in act_depend_of_me
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get object in act_depend_of_me
:type services: alignak.objects.service.Services
:param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check)
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param bi_modulations: business impact modulation are used when setting myself as problem
:type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations
:return: list of host/service that are impacts
:rtype: list[alignak.objects.schedulingitem.SchedulingItem]
TODO: SchedulingItem object should not handle other schedulingitem obj.
We should call obj.register* on both obj.
This is 'Java' style
"""
# Maybe we already have this problem? If so, bailout too
if prob.uuid in self.source_problems:
return []
now = time.time()
was_an_impact = self.is_impact
# Our father already look if he impacts us. So if we are here,
# it's that we really are impacted
self.is_impact = True
impacts = []
# Ok, if we are impacted, we can add it in our
# problem list
# TODO: remove this unused check
if self.is_impact:
logger.debug("I am impacted: %s", self)
# Maybe I was a problem myself, now I can say: not my fault!
if self.is_problem:
self.no_more_a_problem(hosts, services, timeperiods, bi_modulations)
# Ok, we are now impacted, we should take the good state
# but only when we just go to the impacted state
if not was_an_impact:
self.set_impact_state()
# Ok now we can be a simple impact
impacts.append(self.uuid)
if prob.uuid not in self.source_problems:
self.source_problems.append(prob.uuid)
# we should send this problem to all potential impacted that
# depend on us
for (impacted_item_id, status, timeperiod_id, _) in self.act_depend_of_me:
# Check if the status is ok for impact
if impacted_item_id in hosts:
impact = hosts[impacted_item_id]
else:
impact = services[impacted_item_id]
timeperiod = timeperiods[timeperiod_id]
for stat in status:
if self.is_state(stat):
# now check if we should bailout because of a
# not good timeperiod for dep
if timeperiod is None or timeperiod.is_time_valid(now):
new_impacts = impact.register_a_problem(prob, hosts,
services, timeperiods,
bi_modulations)
impacts.extend(new_impacts)
# And we register a new broks for update status
self.broks.append(self.get_update_status_brok())
# now we return all impacts (can be void of course)
return impacts
|
python
|
def register_a_problem(self, prob, hosts, services, timeperiods, bi_modulations):
# pylint: disable=too-many-locals
"""Call recursively by potentials impacts so they
update their source_problems list. But do not
go below if the problem is not a real one for me
like If I've got multiple parents for examples
:param prob: problem to register
:type prob: alignak.objects.schedulingitem.SchedulingItem
:param hosts: hosts objects, used to get object in act_depend_of_me
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get object in act_depend_of_me
:type services: alignak.objects.service.Services
:param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check)
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param bi_modulations: business impact modulation are used when setting myself as problem
:type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations
:return: list of host/service that are impacts
:rtype: list[alignak.objects.schedulingitem.SchedulingItem]
TODO: SchedulingItem object should not handle other schedulingitem obj.
We should call obj.register* on both obj.
This is 'Java' style
"""
# Maybe we already have this problem? If so, bailout too
if prob.uuid in self.source_problems:
return []
now = time.time()
was_an_impact = self.is_impact
# Our father already look if he impacts us. So if we are here,
# it's that we really are impacted
self.is_impact = True
impacts = []
# Ok, if we are impacted, we can add it in our
# problem list
# TODO: remove this unused check
if self.is_impact:
logger.debug("I am impacted: %s", self)
# Maybe I was a problem myself, now I can say: not my fault!
if self.is_problem:
self.no_more_a_problem(hosts, services, timeperiods, bi_modulations)
# Ok, we are now impacted, we should take the good state
# but only when we just go to the impacted state
if not was_an_impact:
self.set_impact_state()
# Ok now we can be a simple impact
impacts.append(self.uuid)
if prob.uuid not in self.source_problems:
self.source_problems.append(prob.uuid)
# we should send this problem to all potential impacted that
# depend on us
for (impacted_item_id, status, timeperiod_id, _) in self.act_depend_of_me:
# Check if the status is ok for impact
if impacted_item_id in hosts:
impact = hosts[impacted_item_id]
else:
impact = services[impacted_item_id]
timeperiod = timeperiods[timeperiod_id]
for stat in status:
if self.is_state(stat):
# now check if we should bailout because of a
# not good timeperiod for dep
if timeperiod is None or timeperiod.is_time_valid(now):
new_impacts = impact.register_a_problem(prob, hosts,
services, timeperiods,
bi_modulations)
impacts.extend(new_impacts)
# And we register a new broks for update status
self.broks.append(self.get_update_status_brok())
# now we return all impacts (can be void of course)
return impacts
|
[
"def",
"register_a_problem",
"(",
"self",
",",
"prob",
",",
"hosts",
",",
"services",
",",
"timeperiods",
",",
"bi_modulations",
")",
":",
"# pylint: disable=too-many-locals",
"# Maybe we already have this problem? If so, bailout too",
"if",
"prob",
".",
"uuid",
"in",
"self",
".",
"source_problems",
":",
"return",
"[",
"]",
"now",
"=",
"time",
".",
"time",
"(",
")",
"was_an_impact",
"=",
"self",
".",
"is_impact",
"# Our father already look if he impacts us. So if we are here,",
"# it's that we really are impacted",
"self",
".",
"is_impact",
"=",
"True",
"impacts",
"=",
"[",
"]",
"# Ok, if we are impacted, we can add it in our",
"# problem list",
"# TODO: remove this unused check",
"if",
"self",
".",
"is_impact",
":",
"logger",
".",
"debug",
"(",
"\"I am impacted: %s\"",
",",
"self",
")",
"# Maybe I was a problem myself, now I can say: not my fault!",
"if",
"self",
".",
"is_problem",
":",
"self",
".",
"no_more_a_problem",
"(",
"hosts",
",",
"services",
",",
"timeperiods",
",",
"bi_modulations",
")",
"# Ok, we are now impacted, we should take the good state",
"# but only when we just go to the impacted state",
"if",
"not",
"was_an_impact",
":",
"self",
".",
"set_impact_state",
"(",
")",
"# Ok now we can be a simple impact",
"impacts",
".",
"append",
"(",
"self",
".",
"uuid",
")",
"if",
"prob",
".",
"uuid",
"not",
"in",
"self",
".",
"source_problems",
":",
"self",
".",
"source_problems",
".",
"append",
"(",
"prob",
".",
"uuid",
")",
"# we should send this problem to all potential impacted that",
"# depend on us",
"for",
"(",
"impacted_item_id",
",",
"status",
",",
"timeperiod_id",
",",
"_",
")",
"in",
"self",
".",
"act_depend_of_me",
":",
"# Check if the status is ok for impact",
"if",
"impacted_item_id",
"in",
"hosts",
":",
"impact",
"=",
"hosts",
"[",
"impacted_item_id",
"]",
"else",
":",
"impact",
"=",
"services",
"[",
"impacted_item_id",
"]",
"timeperiod",
"=",
"timeperiods",
"[",
"timeperiod_id",
"]",
"for",
"stat",
"in",
"status",
":",
"if",
"self",
".",
"is_state",
"(",
"stat",
")",
":",
"# now check if we should bailout because of a",
"# not good timeperiod for dep",
"if",
"timeperiod",
"is",
"None",
"or",
"timeperiod",
".",
"is_time_valid",
"(",
"now",
")",
":",
"new_impacts",
"=",
"impact",
".",
"register_a_problem",
"(",
"prob",
",",
"hosts",
",",
"services",
",",
"timeperiods",
",",
"bi_modulations",
")",
"impacts",
".",
"extend",
"(",
"new_impacts",
")",
"# And we register a new broks for update status",
"self",
".",
"broks",
".",
"append",
"(",
"self",
".",
"get_update_status_brok",
"(",
")",
")",
"# now we return all impacts (can be void of course)",
"return",
"impacts"
] |
Call recursively by potentials impacts so they
update their source_problems list. But do not
go below if the problem is not a real one for me
like If I've got multiple parents for examples
:param prob: problem to register
:type prob: alignak.objects.schedulingitem.SchedulingItem
:param hosts: hosts objects, used to get object in act_depend_of_me
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get object in act_depend_of_me
:type services: alignak.objects.service.Services
:param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check)
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param bi_modulations: business impact modulation are used when setting myself as problem
:type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations
:return: list of host/service that are impacts
:rtype: list[alignak.objects.schedulingitem.SchedulingItem]
TODO: SchedulingItem object should not handle other schedulingitem obj.
We should call obj.register* on both obj.
This is 'Java' style
|
[
"Call",
"recursively",
"by",
"potentials",
"impacts",
"so",
"they",
"update",
"their",
"source_problems",
"list",
".",
"But",
"do",
"not",
"go",
"below",
"if",
"the",
"problem",
"is",
"not",
"a",
"real",
"one",
"for",
"me",
"like",
"If",
"I",
"ve",
"got",
"multiple",
"parents",
"for",
"examples"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L886-L961
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.unregister_a_problem
|
def unregister_a_problem(self, prob):
"""Remove the problem from our problems list
and check if we are still 'impacted'
:param prob: problem to remove
:type prob: alignak.objects.schedulingitem.SchedulingItem
:return: None
"""
self.source_problems.remove(prob.uuid)
# For know if we are still an impact, maybe our dependencies
# are not aware of the remove of the impact state because it's not ordered
# so we can just look at if we still have some problem in our list
if not self.source_problems:
self.is_impact = False
# No more an impact, we can unset the impact state
self.unset_impact_state()
# And we register a new broks for update status
self.broks.append(self.get_update_status_brok())
|
python
|
def unregister_a_problem(self, prob):
"""Remove the problem from our problems list
and check if we are still 'impacted'
:param prob: problem to remove
:type prob: alignak.objects.schedulingitem.SchedulingItem
:return: None
"""
self.source_problems.remove(prob.uuid)
# For know if we are still an impact, maybe our dependencies
# are not aware of the remove of the impact state because it's not ordered
# so we can just look at if we still have some problem in our list
if not self.source_problems:
self.is_impact = False
# No more an impact, we can unset the impact state
self.unset_impact_state()
# And we register a new broks for update status
self.broks.append(self.get_update_status_brok())
|
[
"def",
"unregister_a_problem",
"(",
"self",
",",
"prob",
")",
":",
"self",
".",
"source_problems",
".",
"remove",
"(",
"prob",
".",
"uuid",
")",
"# For know if we are still an impact, maybe our dependencies",
"# are not aware of the remove of the impact state because it's not ordered",
"# so we can just look at if we still have some problem in our list",
"if",
"not",
"self",
".",
"source_problems",
":",
"self",
".",
"is_impact",
"=",
"False",
"# No more an impact, we can unset the impact state",
"self",
".",
"unset_impact_state",
"(",
")",
"# And we register a new broks for update status",
"self",
".",
"broks",
".",
"append",
"(",
"self",
".",
"get_update_status_brok",
"(",
")",
")"
] |
Remove the problem from our problems list
and check if we are still 'impacted'
:param prob: problem to remove
:type prob: alignak.objects.schedulingitem.SchedulingItem
:return: None
|
[
"Remove",
"the",
"problem",
"from",
"our",
"problems",
"list",
"and",
"check",
"if",
"we",
"are",
"still",
"impacted"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L963-L982
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.is_enable_action_dependent
|
def is_enable_action_dependent(self, hosts, services):
"""
Check if dependencies states match dependencies statuses
This basically means that a dependency is in a bad state and
it can explain this object state.
:param hosts: hosts objects, used to get object in act_depend_of
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get object in act_depend_of
:type services: alignak.objects.service.Services
:return: True if all dependencies matches the status, false otherwise
:rtype: bool
"""
# Use to know if notification is raise or not
enable_action = False
for (dep_id, status, _, _) in self.act_depend_of:
if 'n' in status:
enable_action = True
else:
if dep_id in hosts:
dep = hosts[dep_id]
else:
dep = services[dep_id]
p_is_down = False
dep_match = [dep.is_state(stat) for stat in status]
# check if the parent match a case, so he is down
if True in dep_match:
p_is_down = True
if not p_is_down:
enable_action = True
return enable_action
|
python
|
def is_enable_action_dependent(self, hosts, services):
"""
Check if dependencies states match dependencies statuses
This basically means that a dependency is in a bad state and
it can explain this object state.
:param hosts: hosts objects, used to get object in act_depend_of
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get object in act_depend_of
:type services: alignak.objects.service.Services
:return: True if all dependencies matches the status, false otherwise
:rtype: bool
"""
# Use to know if notification is raise or not
enable_action = False
for (dep_id, status, _, _) in self.act_depend_of:
if 'n' in status:
enable_action = True
else:
if dep_id in hosts:
dep = hosts[dep_id]
else:
dep = services[dep_id]
p_is_down = False
dep_match = [dep.is_state(stat) for stat in status]
# check if the parent match a case, so he is down
if True in dep_match:
p_is_down = True
if not p_is_down:
enable_action = True
return enable_action
|
[
"def",
"is_enable_action_dependent",
"(",
"self",
",",
"hosts",
",",
"services",
")",
":",
"# Use to know if notification is raise or not",
"enable_action",
"=",
"False",
"for",
"(",
"dep_id",
",",
"status",
",",
"_",
",",
"_",
")",
"in",
"self",
".",
"act_depend_of",
":",
"if",
"'n'",
"in",
"status",
":",
"enable_action",
"=",
"True",
"else",
":",
"if",
"dep_id",
"in",
"hosts",
":",
"dep",
"=",
"hosts",
"[",
"dep_id",
"]",
"else",
":",
"dep",
"=",
"services",
"[",
"dep_id",
"]",
"p_is_down",
"=",
"False",
"dep_match",
"=",
"[",
"dep",
".",
"is_state",
"(",
"stat",
")",
"for",
"stat",
"in",
"status",
"]",
"# check if the parent match a case, so he is down",
"if",
"True",
"in",
"dep_match",
":",
"p_is_down",
"=",
"True",
"if",
"not",
"p_is_down",
":",
"enable_action",
"=",
"True",
"return",
"enable_action"
] |
Check if dependencies states match dependencies statuses
This basically means that a dependency is in a bad state and
it can explain this object state.
:param hosts: hosts objects, used to get object in act_depend_of
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get object in act_depend_of
:type services: alignak.objects.service.Services
:return: True if all dependencies matches the status, false otherwise
:rtype: bool
|
[
"Check",
"if",
"dependencies",
"states",
"match",
"dependencies",
"statuses",
"This",
"basically",
"means",
"that",
"a",
"dependency",
"is",
"in",
"a",
"bad",
"state",
"and",
"it",
"can",
"explain",
"this",
"object",
"state",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L984-L1014
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.check_and_set_unreachability
|
def check_and_set_unreachability(self, hosts, services):
"""
Check if all dependencies are down, if yes set this object
as unreachable.
todo: this function do not care about execution_failure_criteria!
:param hosts: hosts objects, used to get object in act_depend_of
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get object in act_depend_of
:type services: alignak.objects.service.Services
:return: None
"""
parent_is_down = []
for (dep_id, _, _, _) in self.act_depend_of:
if dep_id in hosts:
dep = hosts[dep_id]
else:
dep = services[dep_id]
if dep.state in ['d', 'DOWN', 'c', 'CRITICAL', 'u', 'UNKNOWN', 'x', 'UNREACHABLE']:
parent_is_down.append(True)
else:
parent_is_down.append(False)
if False in parent_is_down:
return
# all parents down
self.set_unreachable()
|
python
|
def check_and_set_unreachability(self, hosts, services):
"""
Check if all dependencies are down, if yes set this object
as unreachable.
todo: this function do not care about execution_failure_criteria!
:param hosts: hosts objects, used to get object in act_depend_of
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get object in act_depend_of
:type services: alignak.objects.service.Services
:return: None
"""
parent_is_down = []
for (dep_id, _, _, _) in self.act_depend_of:
if dep_id in hosts:
dep = hosts[dep_id]
else:
dep = services[dep_id]
if dep.state in ['d', 'DOWN', 'c', 'CRITICAL', 'u', 'UNKNOWN', 'x', 'UNREACHABLE']:
parent_is_down.append(True)
else:
parent_is_down.append(False)
if False in parent_is_down:
return
# all parents down
self.set_unreachable()
|
[
"def",
"check_and_set_unreachability",
"(",
"self",
",",
"hosts",
",",
"services",
")",
":",
"parent_is_down",
"=",
"[",
"]",
"for",
"(",
"dep_id",
",",
"_",
",",
"_",
",",
"_",
")",
"in",
"self",
".",
"act_depend_of",
":",
"if",
"dep_id",
"in",
"hosts",
":",
"dep",
"=",
"hosts",
"[",
"dep_id",
"]",
"else",
":",
"dep",
"=",
"services",
"[",
"dep_id",
"]",
"if",
"dep",
".",
"state",
"in",
"[",
"'d'",
",",
"'DOWN'",
",",
"'c'",
",",
"'CRITICAL'",
",",
"'u'",
",",
"'UNKNOWN'",
",",
"'x'",
",",
"'UNREACHABLE'",
"]",
":",
"parent_is_down",
".",
"append",
"(",
"True",
")",
"else",
":",
"parent_is_down",
".",
"append",
"(",
"False",
")",
"if",
"False",
"in",
"parent_is_down",
":",
"return",
"# all parents down",
"self",
".",
"set_unreachable",
"(",
")"
] |
Check if all dependencies are down, if yes set this object
as unreachable.
todo: this function do not care about execution_failure_criteria!
:param hosts: hosts objects, used to get object in act_depend_of
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get object in act_depend_of
:type services: alignak.objects.service.Services
:return: None
|
[
"Check",
"if",
"all",
"dependencies",
"are",
"down",
"if",
"yes",
"set",
"this",
"object",
"as",
"unreachable",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L1016-L1042
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.compensate_system_time_change
|
def compensate_system_time_change(self, difference): # pragma: no cover,
# not with unit tests
"""If a system time change occurs we have to update
properties time related to reflect change
:param difference: difference between new time and old time
:type difference:
:return: None
"""
# We only need to change some value
for prop in ('last_notification', 'last_state_change', 'last_hard_state_change'):
val = getattr(self, prop) # current value
# Do not go below 1970 :)
val = max(0, val + difference) # diff may be negative
setattr(self, prop, val)
|
python
|
def compensate_system_time_change(self, difference): # pragma: no cover,
# not with unit tests
"""If a system time change occurs we have to update
properties time related to reflect change
:param difference: difference between new time and old time
:type difference:
:return: None
"""
# We only need to change some value
for prop in ('last_notification', 'last_state_change', 'last_hard_state_change'):
val = getattr(self, prop) # current value
# Do not go below 1970 :)
val = max(0, val + difference) # diff may be negative
setattr(self, prop, val)
|
[
"def",
"compensate_system_time_change",
"(",
"self",
",",
"difference",
")",
":",
"# pragma: no cover,",
"# not with unit tests",
"# We only need to change some value",
"for",
"prop",
"in",
"(",
"'last_notification'",
",",
"'last_state_change'",
",",
"'last_hard_state_change'",
")",
":",
"val",
"=",
"getattr",
"(",
"self",
",",
"prop",
")",
"# current value",
"# Do not go below 1970 :)",
"val",
"=",
"max",
"(",
"0",
",",
"val",
"+",
"difference",
")",
"# diff may be negative",
"setattr",
"(",
"self",
",",
"prop",
",",
"val",
")"
] |
If a system time change occurs we have to update
properties time related to reflect change
:param difference: difference between new time and old time
:type difference:
:return: None
|
[
"If",
"a",
"system",
"time",
"change",
"occurs",
"we",
"have",
"to",
"update",
"properties",
"time",
"related",
"to",
"reflect",
"change"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L1300-L1314
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.remove_in_progress_check
|
def remove_in_progress_check(self, check):
"""Remove check from check in progress
:param check: Check to remove
:type check: alignak.objects.check.Check
:return: None
"""
# The check is consumed, update the in_checking properties
if check in self.checks_in_progress:
self.checks_in_progress.remove(check)
self.update_in_checking()
|
python
|
def remove_in_progress_check(self, check):
"""Remove check from check in progress
:param check: Check to remove
:type check: alignak.objects.check.Check
:return: None
"""
# The check is consumed, update the in_checking properties
if check in self.checks_in_progress:
self.checks_in_progress.remove(check)
self.update_in_checking()
|
[
"def",
"remove_in_progress_check",
"(",
"self",
",",
"check",
")",
":",
"# The check is consumed, update the in_checking properties",
"if",
"check",
"in",
"self",
".",
"checks_in_progress",
":",
"self",
".",
"checks_in_progress",
".",
"remove",
"(",
"check",
")",
"self",
".",
"update_in_checking",
"(",
")"
] |
Remove check from check in progress
:param check: Check to remove
:type check: alignak.objects.check.Check
:return: None
|
[
"Remove",
"check",
"from",
"check",
"in",
"progress"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L1334-L1344
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.remove_in_progress_notification
|
def remove_in_progress_notification(self, notification):
"""
Remove a notification and mark them as zombie
:param notification: the notification to remove
:type notification: alignak.notification.Notification
:return: None
"""
if notification.uuid in self.notifications_in_progress:
notification.status = ACT_STATUS_ZOMBIE
del self.notifications_in_progress[notification.uuid]
|
python
|
def remove_in_progress_notification(self, notification):
"""
Remove a notification and mark them as zombie
:param notification: the notification to remove
:type notification: alignak.notification.Notification
:return: None
"""
if notification.uuid in self.notifications_in_progress:
notification.status = ACT_STATUS_ZOMBIE
del self.notifications_in_progress[notification.uuid]
|
[
"def",
"remove_in_progress_notification",
"(",
"self",
",",
"notification",
")",
":",
"if",
"notification",
".",
"uuid",
"in",
"self",
".",
"notifications_in_progress",
":",
"notification",
".",
"status",
"=",
"ACT_STATUS_ZOMBIE",
"del",
"self",
".",
"notifications_in_progress",
"[",
"notification",
".",
"uuid",
"]"
] |
Remove a notification and mark them as zombie
:param notification: the notification to remove
:type notification: alignak.notification.Notification
:return: None
|
[
"Remove",
"a",
"notification",
"and",
"mark",
"them",
"as",
"zombie"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L1354-L1364
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.remove_in_progress_notifications
|
def remove_in_progress_notifications(self, master=True):
"""Remove all notifications from notifications_in_progress
Preserves some specific notifications (downtime, ...)
:param master: remove master notifications only if True (default value)
:type master: bool
:param force: force remove all notifications except if False
:type force: bool
:return:None
"""
for notification in list(self.notifications_in_progress.values()):
if master and notification.contact:
continue
# Do not remove some specific notifications
if notification.type in [u'DOWNTIMESTART', u'DOWNTIMEEND', u'DOWNTIMECANCELLED',
u'CUSTOM', u'ACKNOWLEDGEMENT']:
continue
self.remove_in_progress_notification(notification)
|
python
|
def remove_in_progress_notifications(self, master=True):
"""Remove all notifications from notifications_in_progress
Preserves some specific notifications (downtime, ...)
:param master: remove master notifications only if True (default value)
:type master: bool
:param force: force remove all notifications except if False
:type force: bool
:return:None
"""
for notification in list(self.notifications_in_progress.values()):
if master and notification.contact:
continue
# Do not remove some specific notifications
if notification.type in [u'DOWNTIMESTART', u'DOWNTIMEEND', u'DOWNTIMECANCELLED',
u'CUSTOM', u'ACKNOWLEDGEMENT']:
continue
self.remove_in_progress_notification(notification)
|
[
"def",
"remove_in_progress_notifications",
"(",
"self",
",",
"master",
"=",
"True",
")",
":",
"for",
"notification",
"in",
"list",
"(",
"self",
".",
"notifications_in_progress",
".",
"values",
"(",
")",
")",
":",
"if",
"master",
"and",
"notification",
".",
"contact",
":",
"continue",
"# Do not remove some specific notifications",
"if",
"notification",
".",
"type",
"in",
"[",
"u'DOWNTIMESTART'",
",",
"u'DOWNTIMEEND'",
",",
"u'DOWNTIMECANCELLED'",
",",
"u'CUSTOM'",
",",
"u'ACKNOWLEDGEMENT'",
"]",
":",
"continue",
"self",
".",
"remove_in_progress_notification",
"(",
"notification",
")"
] |
Remove all notifications from notifications_in_progress
Preserves some specific notifications (downtime, ...)
:param master: remove master notifications only if True (default value)
:type master: bool
:param force: force remove all notifications except if False
:type force: bool
:return:None
|
[
"Remove",
"all",
"notifications",
"from",
"notifications_in_progress"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L1366-L1384
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.check_for_flexible_downtime
|
def check_for_flexible_downtime(self, timeperiods, hosts, services):
"""Enter in a downtime if necessary and raise start notification
When a non Ok state occurs we try to raise a flexible downtime.
:param timeperiods: Timeperiods objects, used for downtime period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param hosts: hosts objects, used to enter downtime
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to enter downtime
:type services: alignak.objects.service.Services
:return: None
"""
status_updated = False
for downtime_id in self.downtimes:
downtime = self.downtimes[downtime_id]
# Activate flexible downtimes (do not activate triggered downtimes)
# Note: only activate if we are between downtime start and end time!
if downtime.fixed or downtime.is_in_effect:
continue
if downtime.start_time <= self.last_chk and downtime.end_time >= self.last_chk \
and self.state_id != 0 and downtime.trigger_id in ['', '0']:
# returns downtimestart notifications
self.broks.extend(downtime.enter(timeperiods, hosts, services))
status_updated = True
if status_updated is True:
self.broks.append(self.get_update_status_brok())
|
python
|
def check_for_flexible_downtime(self, timeperiods, hosts, services):
"""Enter in a downtime if necessary and raise start notification
When a non Ok state occurs we try to raise a flexible downtime.
:param timeperiods: Timeperiods objects, used for downtime period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param hosts: hosts objects, used to enter downtime
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to enter downtime
:type services: alignak.objects.service.Services
:return: None
"""
status_updated = False
for downtime_id in self.downtimes:
downtime = self.downtimes[downtime_id]
# Activate flexible downtimes (do not activate triggered downtimes)
# Note: only activate if we are between downtime start and end time!
if downtime.fixed or downtime.is_in_effect:
continue
if downtime.start_time <= self.last_chk and downtime.end_time >= self.last_chk \
and self.state_id != 0 and downtime.trigger_id in ['', '0']:
# returns downtimestart notifications
self.broks.extend(downtime.enter(timeperiods, hosts, services))
status_updated = True
if status_updated is True:
self.broks.append(self.get_update_status_brok())
|
[
"def",
"check_for_flexible_downtime",
"(",
"self",
",",
"timeperiods",
",",
"hosts",
",",
"services",
")",
":",
"status_updated",
"=",
"False",
"for",
"downtime_id",
"in",
"self",
".",
"downtimes",
":",
"downtime",
"=",
"self",
".",
"downtimes",
"[",
"downtime_id",
"]",
"# Activate flexible downtimes (do not activate triggered downtimes)",
"# Note: only activate if we are between downtime start and end time!",
"if",
"downtime",
".",
"fixed",
"or",
"downtime",
".",
"is_in_effect",
":",
"continue",
"if",
"downtime",
".",
"start_time",
"<=",
"self",
".",
"last_chk",
"and",
"downtime",
".",
"end_time",
">=",
"self",
".",
"last_chk",
"and",
"self",
".",
"state_id",
"!=",
"0",
"and",
"downtime",
".",
"trigger_id",
"in",
"[",
"''",
",",
"'0'",
"]",
":",
"# returns downtimestart notifications",
"self",
".",
"broks",
".",
"extend",
"(",
"downtime",
".",
"enter",
"(",
"timeperiods",
",",
"hosts",
",",
"services",
")",
")",
"status_updated",
"=",
"True",
"if",
"status_updated",
"is",
"True",
":",
"self",
".",
"broks",
".",
"append",
"(",
"self",
".",
"get_update_status_brok",
"(",
")",
")"
] |
Enter in a downtime if necessary and raise start notification
When a non Ok state occurs we try to raise a flexible downtime.
:param timeperiods: Timeperiods objects, used for downtime period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param hosts: hosts objects, used to enter downtime
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to enter downtime
:type services: alignak.objects.service.Services
:return: None
|
[
"Enter",
"in",
"a",
"downtime",
"if",
"necessary",
"and",
"raise",
"start",
"notification",
"When",
"a",
"non",
"Ok",
"state",
"occurs",
"we",
"try",
"to",
"raise",
"a",
"flexible",
"downtime",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L1505-L1530
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.update_hard_unknown_phase_state
|
def update_hard_unknown_phase_state(self):
"""Update in_hard_unknown_reach_phase attribute and
was_in_hard_unknown_reach_phase
UNKNOWN during a HARD state are not so important, and they should
not raise notif about it
:return: None
"""
self.was_in_hard_unknown_reach_phase = self.in_hard_unknown_reach_phase
# We do not care about SOFT state at all
# and we are sure we are no more in such a phase
if self.state_type != 'HARD' or self.last_state_type != 'HARD':
self.in_hard_unknown_reach_phase = False
# So if we are not in already in such a phase, we check for
# a start or not. So here we are sure to be in a HARD/HARD following
# state
if not self.in_hard_unknown_reach_phase:
if self.state == 'UNKNOWN' and self.last_state != 'UNKNOWN' \
or self.state == 'UNREACHABLE' and self.last_state != 'UNREACHABLE':
self.in_hard_unknown_reach_phase = True
# We also backup with which state we was before enter this phase
self.state_before_hard_unknown_reach_phase = self.last_state
return
else:
# if we were already in such a phase, look for its end
if self.state != 'UNKNOWN' and self.state != 'UNREACHABLE':
self.in_hard_unknown_reach_phase = False
# If we just exit the phase, look if we exit with a different state
# than we enter or not. If so, lie and say we were not in such phase
# because we need so to raise a new notif
if not self.in_hard_unknown_reach_phase and self.was_in_hard_unknown_reach_phase:
if self.state != self.state_before_hard_unknown_reach_phase:
self.was_in_hard_unknown_reach_phase = False
|
python
|
def update_hard_unknown_phase_state(self):
"""Update in_hard_unknown_reach_phase attribute and
was_in_hard_unknown_reach_phase
UNKNOWN during a HARD state are not so important, and they should
not raise notif about it
:return: None
"""
self.was_in_hard_unknown_reach_phase = self.in_hard_unknown_reach_phase
# We do not care about SOFT state at all
# and we are sure we are no more in such a phase
if self.state_type != 'HARD' or self.last_state_type != 'HARD':
self.in_hard_unknown_reach_phase = False
# So if we are not in already in such a phase, we check for
# a start or not. So here we are sure to be in a HARD/HARD following
# state
if not self.in_hard_unknown_reach_phase:
if self.state == 'UNKNOWN' and self.last_state != 'UNKNOWN' \
or self.state == 'UNREACHABLE' and self.last_state != 'UNREACHABLE':
self.in_hard_unknown_reach_phase = True
# We also backup with which state we was before enter this phase
self.state_before_hard_unknown_reach_phase = self.last_state
return
else:
# if we were already in such a phase, look for its end
if self.state != 'UNKNOWN' and self.state != 'UNREACHABLE':
self.in_hard_unknown_reach_phase = False
# If we just exit the phase, look if we exit with a different state
# than we enter or not. If so, lie and say we were not in such phase
# because we need so to raise a new notif
if not self.in_hard_unknown_reach_phase and self.was_in_hard_unknown_reach_phase:
if self.state != self.state_before_hard_unknown_reach_phase:
self.was_in_hard_unknown_reach_phase = False
|
[
"def",
"update_hard_unknown_phase_state",
"(",
"self",
")",
":",
"self",
".",
"was_in_hard_unknown_reach_phase",
"=",
"self",
".",
"in_hard_unknown_reach_phase",
"# We do not care about SOFT state at all",
"# and we are sure we are no more in such a phase",
"if",
"self",
".",
"state_type",
"!=",
"'HARD'",
"or",
"self",
".",
"last_state_type",
"!=",
"'HARD'",
":",
"self",
".",
"in_hard_unknown_reach_phase",
"=",
"False",
"# So if we are not in already in such a phase, we check for",
"# a start or not. So here we are sure to be in a HARD/HARD following",
"# state",
"if",
"not",
"self",
".",
"in_hard_unknown_reach_phase",
":",
"if",
"self",
".",
"state",
"==",
"'UNKNOWN'",
"and",
"self",
".",
"last_state",
"!=",
"'UNKNOWN'",
"or",
"self",
".",
"state",
"==",
"'UNREACHABLE'",
"and",
"self",
".",
"last_state",
"!=",
"'UNREACHABLE'",
":",
"self",
".",
"in_hard_unknown_reach_phase",
"=",
"True",
"# We also backup with which state we was before enter this phase",
"self",
".",
"state_before_hard_unknown_reach_phase",
"=",
"self",
".",
"last_state",
"return",
"else",
":",
"# if we were already in such a phase, look for its end",
"if",
"self",
".",
"state",
"!=",
"'UNKNOWN'",
"and",
"self",
".",
"state",
"!=",
"'UNREACHABLE'",
":",
"self",
".",
"in_hard_unknown_reach_phase",
"=",
"False",
"# If we just exit the phase, look if we exit with a different state",
"# than we enter or not. If so, lie and say we were not in such phase",
"# because we need so to raise a new notif",
"if",
"not",
"self",
".",
"in_hard_unknown_reach_phase",
"and",
"self",
".",
"was_in_hard_unknown_reach_phase",
":",
"if",
"self",
".",
"state",
"!=",
"self",
".",
"state_before_hard_unknown_reach_phase",
":",
"self",
".",
"was_in_hard_unknown_reach_phase",
"=",
"False"
] |
Update in_hard_unknown_reach_phase attribute and
was_in_hard_unknown_reach_phase
UNKNOWN during a HARD state are not so important, and they should
not raise notif about it
:return: None
|
[
"Update",
"in_hard_unknown_reach_phase",
"attribute",
"and",
"was_in_hard_unknown_reach_phase",
"UNKNOWN",
"during",
"a",
"HARD",
"state",
"are",
"not",
"so",
"important",
"and",
"they",
"should",
"not",
"raise",
"notif",
"about",
"it"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L1532-L1567
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.update_notification_command
|
def update_notification_command(self, notif, contact, macromodulations, timeperiods,
host_ref=None):
"""Update the notification command by resolving Macros
And because we are just launching the notification, we can say
that this contact has been notified
:param notif: notification to send
:type notif: alignak.objects.notification.Notification
:param contact: contact for this host/service
:type contact: alignak.object.contact.Contact
:param macromodulations: Macro modulations objects, used in the notification command
:type macromodulations: alignak.objects.macromodulation.Macromodulations
:param timeperiods: Timeperiods objects, used to get modulation period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param host_ref: reference host (used for a service)
:type host_ref: alignak.object.host.Host
:return: None
"""
cls = self.__class__
macrosolver = MacroResolver()
data = self.get_data_for_notifications(contact, notif, host_ref)
notif.command = macrosolver.resolve_command(notif.command_call, data, macromodulations,
timeperiods)
if cls.enable_environment_macros or notif.enable_environment_macros:
notif.env = macrosolver.get_env_macros(data)
|
python
|
def update_notification_command(self, notif, contact, macromodulations, timeperiods,
host_ref=None):
"""Update the notification command by resolving Macros
And because we are just launching the notification, we can say
that this contact has been notified
:param notif: notification to send
:type notif: alignak.objects.notification.Notification
:param contact: contact for this host/service
:type contact: alignak.object.contact.Contact
:param macromodulations: Macro modulations objects, used in the notification command
:type macromodulations: alignak.objects.macromodulation.Macromodulations
:param timeperiods: Timeperiods objects, used to get modulation period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param host_ref: reference host (used for a service)
:type host_ref: alignak.object.host.Host
:return: None
"""
cls = self.__class__
macrosolver = MacroResolver()
data = self.get_data_for_notifications(contact, notif, host_ref)
notif.command = macrosolver.resolve_command(notif.command_call, data, macromodulations,
timeperiods)
if cls.enable_environment_macros or notif.enable_environment_macros:
notif.env = macrosolver.get_env_macros(data)
|
[
"def",
"update_notification_command",
"(",
"self",
",",
"notif",
",",
"contact",
",",
"macromodulations",
",",
"timeperiods",
",",
"host_ref",
"=",
"None",
")",
":",
"cls",
"=",
"self",
".",
"__class__",
"macrosolver",
"=",
"MacroResolver",
"(",
")",
"data",
"=",
"self",
".",
"get_data_for_notifications",
"(",
"contact",
",",
"notif",
",",
"host_ref",
")",
"notif",
".",
"command",
"=",
"macrosolver",
".",
"resolve_command",
"(",
"notif",
".",
"command_call",
",",
"data",
",",
"macromodulations",
",",
"timeperiods",
")",
"if",
"cls",
".",
"enable_environment_macros",
"or",
"notif",
".",
"enable_environment_macros",
":",
"notif",
".",
"env",
"=",
"macrosolver",
".",
"get_env_macros",
"(",
"data",
")"
] |
Update the notification command by resolving Macros
And because we are just launching the notification, we can say
that this contact has been notified
:param notif: notification to send
:type notif: alignak.objects.notification.Notification
:param contact: contact for this host/service
:type contact: alignak.object.contact.Contact
:param macromodulations: Macro modulations objects, used in the notification command
:type macromodulations: alignak.objects.macromodulation.Macromodulations
:param timeperiods: Timeperiods objects, used to get modulation period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param host_ref: reference host (used for a service)
:type host_ref: alignak.object.host.Host
:return: None
|
[
"Update",
"the",
"notification",
"command",
"by",
"resolving",
"Macros",
"And",
"because",
"we",
"are",
"just",
"launching",
"the",
"notification",
"we",
"can",
"say",
"that",
"this",
"contact",
"has",
"been",
"notified"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L1997-L2021
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.is_escalable
|
def is_escalable(self, notification, escalations, timeperiods):
"""Check if a notification can be escalated.
Basically call is_eligible for each escalation
:param notification: notification we would like to escalate
:type notification: alignak.objects.notification.Notification
:param escalations: Esclations objects, used to get escalation objects (period)
:type escalations: alignak.objects.escalation.Escalations
:param timeperiods: Timeperiods objects, used to get escalation period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: True if notification can be escalated, otherwise False
:rtype: bool
"""
cls = self.__class__
# We search since when we are in notification for escalations
# that are based on time
in_notif_time = time.time() - notification.creation_time
# Check is an escalation match the current_notification_number
for escalation_id in self.escalations:
escalation = escalations[escalation_id]
escalation_period = timeperiods[escalation.escalation_period]
if escalation.is_eligible(notification.t_to_go, self.state, notification.notif_nb,
in_notif_time, cls.interval_length, escalation_period):
return True
return False
|
python
|
def is_escalable(self, notification, escalations, timeperiods):
"""Check if a notification can be escalated.
Basically call is_eligible for each escalation
:param notification: notification we would like to escalate
:type notification: alignak.objects.notification.Notification
:param escalations: Esclations objects, used to get escalation objects (period)
:type escalations: alignak.objects.escalation.Escalations
:param timeperiods: Timeperiods objects, used to get escalation period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: True if notification can be escalated, otherwise False
:rtype: bool
"""
cls = self.__class__
# We search since when we are in notification for escalations
# that are based on time
in_notif_time = time.time() - notification.creation_time
# Check is an escalation match the current_notification_number
for escalation_id in self.escalations:
escalation = escalations[escalation_id]
escalation_period = timeperiods[escalation.escalation_period]
if escalation.is_eligible(notification.t_to_go, self.state, notification.notif_nb,
in_notif_time, cls.interval_length, escalation_period):
return True
return False
|
[
"def",
"is_escalable",
"(",
"self",
",",
"notification",
",",
"escalations",
",",
"timeperiods",
")",
":",
"cls",
"=",
"self",
".",
"__class__",
"# We search since when we are in notification for escalations",
"# that are based on time",
"in_notif_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"notification",
".",
"creation_time",
"# Check is an escalation match the current_notification_number",
"for",
"escalation_id",
"in",
"self",
".",
"escalations",
":",
"escalation",
"=",
"escalations",
"[",
"escalation_id",
"]",
"escalation_period",
"=",
"timeperiods",
"[",
"escalation",
".",
"escalation_period",
"]",
"if",
"escalation",
".",
"is_eligible",
"(",
"notification",
".",
"t_to_go",
",",
"self",
".",
"state",
",",
"notification",
".",
"notif_nb",
",",
"in_notif_time",
",",
"cls",
".",
"interval_length",
",",
"escalation_period",
")",
":",
"return",
"True",
"return",
"False"
] |
Check if a notification can be escalated.
Basically call is_eligible for each escalation
:param notification: notification we would like to escalate
:type notification: alignak.objects.notification.Notification
:param escalations: Esclations objects, used to get escalation objects (period)
:type escalations: alignak.objects.escalation.Escalations
:param timeperiods: Timeperiods objects, used to get escalation period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: True if notification can be escalated, otherwise False
:rtype: bool
|
[
"Check",
"if",
"a",
"notification",
"can",
"be",
"escalated",
".",
"Basically",
"call",
"is_eligible",
"for",
"each",
"escalation"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L2023-L2050
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.get_next_notification_time
|
def get_next_notification_time(self, notif, escalations, timeperiods):
# pylint: disable=too-many-locals
"""Get the next notification time for a notification
Take the standard notification_interval or ask for our escalation
if one of them need a smaller value to escalade
:param notif: Notification we need time
:type notif: alignak.objects.notification.Notification
:param escalations: Esclations objects, used to get escalation objects (interval, period)
:type escalations: alignak.objects.escalation.Escalations
:param timeperiods: Timeperiods objects, used to get escalation period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: Timestamp of next notification
:rtype: int
"""
res = None
now = time.time()
cls = self.__class__
# Look at the minimum notification interval
notification_interval = self.notification_interval
# and then look for currently active notifications, and take notification_interval
# if filled and less than the self value
in_notif_time = time.time() - notif.creation_time
for escalation_id in self.escalations:
escalation = escalations[escalation_id]
escalation_period = timeperiods[escalation.escalation_period]
if escalation.is_eligible(notif.t_to_go, self.state, notif.notif_nb,
in_notif_time, cls.interval_length, escalation_period):
if escalation.notification_interval != -1 and \
escalation.notification_interval < notification_interval:
notification_interval = escalation.notification_interval
# So take the by default time
std_time = notif.t_to_go + notification_interval * cls.interval_length
# Maybe the notification comes from retention data and
# next notification alert is in the past
# if so let use the now value instead
if std_time < now:
std_time = now + notification_interval * cls.interval_length
# standard time is a good one
res = std_time
creation_time = notif.creation_time
in_notif_time = now - notif.creation_time
for escalation_id in self.escalations:
escalation = escalations[escalation_id]
# If the escalation was already raised, we do not look for a new "early start"
if escalation.get_name() not in notif.already_start_escalations:
escalation_period = timeperiods[escalation.escalation_period]
next_t = escalation.get_next_notif_time(std_time, self.state,
creation_time, cls.interval_length,
escalation_period)
# If we got a real result (time base escalation), we add it
if next_t is not None and now < next_t < res:
res = next_t
# And we take the minimum of this result. Can be standard or escalation asked
return res
|
python
|
def get_next_notification_time(self, notif, escalations, timeperiods):
# pylint: disable=too-many-locals
"""Get the next notification time for a notification
Take the standard notification_interval or ask for our escalation
if one of them need a smaller value to escalade
:param notif: Notification we need time
:type notif: alignak.objects.notification.Notification
:param escalations: Esclations objects, used to get escalation objects (interval, period)
:type escalations: alignak.objects.escalation.Escalations
:param timeperiods: Timeperiods objects, used to get escalation period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: Timestamp of next notification
:rtype: int
"""
res = None
now = time.time()
cls = self.__class__
# Look at the minimum notification interval
notification_interval = self.notification_interval
# and then look for currently active notifications, and take notification_interval
# if filled and less than the self value
in_notif_time = time.time() - notif.creation_time
for escalation_id in self.escalations:
escalation = escalations[escalation_id]
escalation_period = timeperiods[escalation.escalation_period]
if escalation.is_eligible(notif.t_to_go, self.state, notif.notif_nb,
in_notif_time, cls.interval_length, escalation_period):
if escalation.notification_interval != -1 and \
escalation.notification_interval < notification_interval:
notification_interval = escalation.notification_interval
# So take the by default time
std_time = notif.t_to_go + notification_interval * cls.interval_length
# Maybe the notification comes from retention data and
# next notification alert is in the past
# if so let use the now value instead
if std_time < now:
std_time = now + notification_interval * cls.interval_length
# standard time is a good one
res = std_time
creation_time = notif.creation_time
in_notif_time = now - notif.creation_time
for escalation_id in self.escalations:
escalation = escalations[escalation_id]
# If the escalation was already raised, we do not look for a new "early start"
if escalation.get_name() not in notif.already_start_escalations:
escalation_period = timeperiods[escalation.escalation_period]
next_t = escalation.get_next_notif_time(std_time, self.state,
creation_time, cls.interval_length,
escalation_period)
# If we got a real result (time base escalation), we add it
if next_t is not None and now < next_t < res:
res = next_t
# And we take the minimum of this result. Can be standard or escalation asked
return res
|
[
"def",
"get_next_notification_time",
"(",
"self",
",",
"notif",
",",
"escalations",
",",
"timeperiods",
")",
":",
"# pylint: disable=too-many-locals",
"res",
"=",
"None",
"now",
"=",
"time",
".",
"time",
"(",
")",
"cls",
"=",
"self",
".",
"__class__",
"# Look at the minimum notification interval",
"notification_interval",
"=",
"self",
".",
"notification_interval",
"# and then look for currently active notifications, and take notification_interval",
"# if filled and less than the self value",
"in_notif_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"notif",
".",
"creation_time",
"for",
"escalation_id",
"in",
"self",
".",
"escalations",
":",
"escalation",
"=",
"escalations",
"[",
"escalation_id",
"]",
"escalation_period",
"=",
"timeperiods",
"[",
"escalation",
".",
"escalation_period",
"]",
"if",
"escalation",
".",
"is_eligible",
"(",
"notif",
".",
"t_to_go",
",",
"self",
".",
"state",
",",
"notif",
".",
"notif_nb",
",",
"in_notif_time",
",",
"cls",
".",
"interval_length",
",",
"escalation_period",
")",
":",
"if",
"escalation",
".",
"notification_interval",
"!=",
"-",
"1",
"and",
"escalation",
".",
"notification_interval",
"<",
"notification_interval",
":",
"notification_interval",
"=",
"escalation",
".",
"notification_interval",
"# So take the by default time",
"std_time",
"=",
"notif",
".",
"t_to_go",
"+",
"notification_interval",
"*",
"cls",
".",
"interval_length",
"# Maybe the notification comes from retention data and",
"# next notification alert is in the past",
"# if so let use the now value instead",
"if",
"std_time",
"<",
"now",
":",
"std_time",
"=",
"now",
"+",
"notification_interval",
"*",
"cls",
".",
"interval_length",
"# standard time is a good one",
"res",
"=",
"std_time",
"creation_time",
"=",
"notif",
".",
"creation_time",
"in_notif_time",
"=",
"now",
"-",
"notif",
".",
"creation_time",
"for",
"escalation_id",
"in",
"self",
".",
"escalations",
":",
"escalation",
"=",
"escalations",
"[",
"escalation_id",
"]",
"# If the escalation was already raised, we do not look for a new \"early start\"",
"if",
"escalation",
".",
"get_name",
"(",
")",
"not",
"in",
"notif",
".",
"already_start_escalations",
":",
"escalation_period",
"=",
"timeperiods",
"[",
"escalation",
".",
"escalation_period",
"]",
"next_t",
"=",
"escalation",
".",
"get_next_notif_time",
"(",
"std_time",
",",
"self",
".",
"state",
",",
"creation_time",
",",
"cls",
".",
"interval_length",
",",
"escalation_period",
")",
"# If we got a real result (time base escalation), we add it",
"if",
"next_t",
"is",
"not",
"None",
"and",
"now",
"<",
"next_t",
"<",
"res",
":",
"res",
"=",
"next_t",
"# And we take the minimum of this result. Can be standard or escalation asked",
"return",
"res"
] |
Get the next notification time for a notification
Take the standard notification_interval or ask for our escalation
if one of them need a smaller value to escalade
:param notif: Notification we need time
:type notif: alignak.objects.notification.Notification
:param escalations: Esclations objects, used to get escalation objects (interval, period)
:type escalations: alignak.objects.escalation.Escalations
:param timeperiods: Timeperiods objects, used to get escalation period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: Timestamp of next notification
:rtype: int
|
[
"Get",
"the",
"next",
"notification",
"time",
"for",
"a",
"notification",
"Take",
"the",
"standard",
"notification_interval",
"or",
"ask",
"for",
"our",
"escalation",
"if",
"one",
"of",
"them",
"need",
"a",
"smaller",
"value",
"to",
"escalade"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L2052-L2113
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.get_business_rule_output
|
def get_business_rule_output(self, hosts, services, macromodulations, timeperiods):
# pylint: disable=too-many-locals, too-many-branches
"""
Returns a status string for business rules based items formatted
using business_rule_output_template attribute as template.
The template may embed output formatting for itself, and for its child
(dependent) items. Child format string is expanded into the $( and )$,
using the string between brackets as format string.
Any business rule based item or child macro may be used. In addition,
the $STATUS$, $SHORTSTATUS$ and $FULLNAME$ macro which name is common
to hosts and services may be used to ease template writing.
Caution: only children in state not OK are displayed.
Example:
A business rule with a format string looking like
"$STATUS$ [ $($TATUS$: $HOSTNAME$,$SERVICEDESC$ )$ ]"
Would return
"CRITICAL [ CRITICAL: host1,srv1 WARNING: host2,srv2 ]"
:param hosts: Hosts object to look for objects
:type hosts: alignak.objects.host.Hosts
:param services: Services object to look for objects
:type services: alignak.objects.service.Services
:param macromodulations: Macromodulations object to look for objects
:type macromodulations: alignak.objects.macromodulation.Macromodulations
:param timeperiods: Timeperiods object to look for objects
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: status for business rules
:rtype: str
"""
got_business_rule = getattr(self, 'got_business_rule', False)
# Checks that the service is a business rule.
if got_business_rule is False or self.business_rule is None:
return ""
# Checks that the business rule has a format specified.
output_template = self.business_rule_output_template
if not output_template:
return ""
macroresolver = MacroResolver()
# Extracts children template strings
elts = re.findall(r"\$\((.*)\)\$", output_template)
if not elts:
child_template_string = ""
else:
child_template_string = elts[0]
# Processes child services output
children_output = ""
ok_count = 0
# Expands child items format string macros.
items = self.business_rule.list_all_elements()
for item_uuid in items:
if item_uuid in hosts:
item = hosts[item_uuid]
elif item_uuid in services:
item = services[item_uuid]
# Do not display children in OK state
# todo: last_hard_state ? why not current state if state type is hard ?
if item.last_hard_state_id == 0:
ok_count += 1
continue
data = item.get_data_for_checks(hosts)
children_output += macroresolver.resolve_simple_macros_in_string(child_template_string,
data,
macromodulations,
timeperiods)
if ok_count == len(items):
children_output = "all checks were successful."
# Replaces children output string
template_string = re.sub(r"\$\(.*\)\$", children_output, output_template)
data = self.get_data_for_checks(hosts)
output = macroresolver.resolve_simple_macros_in_string(template_string, data,
macromodulations, timeperiods)
return output.strip()
|
python
|
def get_business_rule_output(self, hosts, services, macromodulations, timeperiods):
# pylint: disable=too-many-locals, too-many-branches
"""
Returns a status string for business rules based items formatted
using business_rule_output_template attribute as template.
The template may embed output formatting for itself, and for its child
(dependent) items. Child format string is expanded into the $( and )$,
using the string between brackets as format string.
Any business rule based item or child macro may be used. In addition,
the $STATUS$, $SHORTSTATUS$ and $FULLNAME$ macro which name is common
to hosts and services may be used to ease template writing.
Caution: only children in state not OK are displayed.
Example:
A business rule with a format string looking like
"$STATUS$ [ $($TATUS$: $HOSTNAME$,$SERVICEDESC$ )$ ]"
Would return
"CRITICAL [ CRITICAL: host1,srv1 WARNING: host2,srv2 ]"
:param hosts: Hosts object to look for objects
:type hosts: alignak.objects.host.Hosts
:param services: Services object to look for objects
:type services: alignak.objects.service.Services
:param macromodulations: Macromodulations object to look for objects
:type macromodulations: alignak.objects.macromodulation.Macromodulations
:param timeperiods: Timeperiods object to look for objects
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: status for business rules
:rtype: str
"""
got_business_rule = getattr(self, 'got_business_rule', False)
# Checks that the service is a business rule.
if got_business_rule is False or self.business_rule is None:
return ""
# Checks that the business rule has a format specified.
output_template = self.business_rule_output_template
if not output_template:
return ""
macroresolver = MacroResolver()
# Extracts children template strings
elts = re.findall(r"\$\((.*)\)\$", output_template)
if not elts:
child_template_string = ""
else:
child_template_string = elts[0]
# Processes child services output
children_output = ""
ok_count = 0
# Expands child items format string macros.
items = self.business_rule.list_all_elements()
for item_uuid in items:
if item_uuid in hosts:
item = hosts[item_uuid]
elif item_uuid in services:
item = services[item_uuid]
# Do not display children in OK state
# todo: last_hard_state ? why not current state if state type is hard ?
if item.last_hard_state_id == 0:
ok_count += 1
continue
data = item.get_data_for_checks(hosts)
children_output += macroresolver.resolve_simple_macros_in_string(child_template_string,
data,
macromodulations,
timeperiods)
if ok_count == len(items):
children_output = "all checks were successful."
# Replaces children output string
template_string = re.sub(r"\$\(.*\)\$", children_output, output_template)
data = self.get_data_for_checks(hosts)
output = macroresolver.resolve_simple_macros_in_string(template_string, data,
macromodulations, timeperiods)
return output.strip()
|
[
"def",
"get_business_rule_output",
"(",
"self",
",",
"hosts",
",",
"services",
",",
"macromodulations",
",",
"timeperiods",
")",
":",
"# pylint: disable=too-many-locals, too-many-branches",
"got_business_rule",
"=",
"getattr",
"(",
"self",
",",
"'got_business_rule'",
",",
"False",
")",
"# Checks that the service is a business rule.",
"if",
"got_business_rule",
"is",
"False",
"or",
"self",
".",
"business_rule",
"is",
"None",
":",
"return",
"\"\"",
"# Checks that the business rule has a format specified.",
"output_template",
"=",
"self",
".",
"business_rule_output_template",
"if",
"not",
"output_template",
":",
"return",
"\"\"",
"macroresolver",
"=",
"MacroResolver",
"(",
")",
"# Extracts children template strings",
"elts",
"=",
"re",
".",
"findall",
"(",
"r\"\\$\\((.*)\\)\\$\"",
",",
"output_template",
")",
"if",
"not",
"elts",
":",
"child_template_string",
"=",
"\"\"",
"else",
":",
"child_template_string",
"=",
"elts",
"[",
"0",
"]",
"# Processes child services output",
"children_output",
"=",
"\"\"",
"ok_count",
"=",
"0",
"# Expands child items format string macros.",
"items",
"=",
"self",
".",
"business_rule",
".",
"list_all_elements",
"(",
")",
"for",
"item_uuid",
"in",
"items",
":",
"if",
"item_uuid",
"in",
"hosts",
":",
"item",
"=",
"hosts",
"[",
"item_uuid",
"]",
"elif",
"item_uuid",
"in",
"services",
":",
"item",
"=",
"services",
"[",
"item_uuid",
"]",
"# Do not display children in OK state",
"# todo: last_hard_state ? why not current state if state type is hard ?",
"if",
"item",
".",
"last_hard_state_id",
"==",
"0",
":",
"ok_count",
"+=",
"1",
"continue",
"data",
"=",
"item",
".",
"get_data_for_checks",
"(",
"hosts",
")",
"children_output",
"+=",
"macroresolver",
".",
"resolve_simple_macros_in_string",
"(",
"child_template_string",
",",
"data",
",",
"macromodulations",
",",
"timeperiods",
")",
"if",
"ok_count",
"==",
"len",
"(",
"items",
")",
":",
"children_output",
"=",
"\"all checks were successful.\"",
"# Replaces children output string",
"template_string",
"=",
"re",
".",
"sub",
"(",
"r\"\\$\\(.*\\)\\$\"",
",",
"children_output",
",",
"output_template",
")",
"data",
"=",
"self",
".",
"get_data_for_checks",
"(",
"hosts",
")",
"output",
"=",
"macroresolver",
".",
"resolve_simple_macros_in_string",
"(",
"template_string",
",",
"data",
",",
"macromodulations",
",",
"timeperiods",
")",
"return",
"output",
".",
"strip",
"(",
")"
] |
Returns a status string for business rules based items formatted
using business_rule_output_template attribute as template.
The template may embed output formatting for itself, and for its child
(dependent) items. Child format string is expanded into the $( and )$,
using the string between brackets as format string.
Any business rule based item or child macro may be used. In addition,
the $STATUS$, $SHORTSTATUS$ and $FULLNAME$ macro which name is common
to hosts and services may be used to ease template writing.
Caution: only children in state not OK are displayed.
Example:
A business rule with a format string looking like
"$STATUS$ [ $($TATUS$: $HOSTNAME$,$SERVICEDESC$ )$ ]"
Would return
"CRITICAL [ CRITICAL: host1,srv1 WARNING: host2,srv2 ]"
:param hosts: Hosts object to look for objects
:type hosts: alignak.objects.host.Hosts
:param services: Services object to look for objects
:type services: alignak.objects.service.Services
:param macromodulations: Macromodulations object to look for objects
:type macromodulations: alignak.objects.macromodulation.Macromodulations
:param timeperiods: Timeperiods object to look for objects
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: status for business rules
:rtype: str
|
[
"Returns",
"a",
"status",
"string",
"for",
"business",
"rules",
"based",
"items",
"formatted",
"using",
"business_rule_output_template",
"attribute",
"as",
"template",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L2612-L2692
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.business_rule_notification_is_blocked
|
def business_rule_notification_is_blocked(self, hosts, services):
# pylint: disable=too-many-locals
"""Process business rule notifications behaviour. If all problems have
been acknowledged, no notifications should be sent if state is not OK.
By default, downtimes are ignored, unless explicitly told to be treated
as acknowledgements through with the business_rule_downtime_as_ack set.
:return: True if all source problem are acknowledged, otherwise False
:rtype: bool
"""
# Walks through problems to check if all items in non ok are
# acknowledged or in downtime period.
acknowledged = 0
for src_prob_id in self.source_problems:
if src_prob_id in hosts:
src_prob = hosts[src_prob_id]
else:
src_prob = services[src_prob_id]
if src_prob.last_hard_state_id != 0:
if src_prob.problem_has_been_acknowledged:
# Problem hast been acknowledged
acknowledged += 1
# Only check problems under downtime if we are
# explicitly told to do so.
elif self.business_rule_downtime_as_ack is True:
if src_prob.scheduled_downtime_depth > 0:
# Problem is under downtime, and downtimes should be
# treated as acknowledgements
acknowledged += 1
elif hasattr(src_prob, "host") and \
hosts[src_prob.host].scheduled_downtime_depth > 0:
# Host is under downtime, and downtimes should be
# treated as acknowledgements
acknowledged += 1
return acknowledged == len(self.source_problems)
|
python
|
def business_rule_notification_is_blocked(self, hosts, services):
# pylint: disable=too-many-locals
"""Process business rule notifications behaviour. If all problems have
been acknowledged, no notifications should be sent if state is not OK.
By default, downtimes are ignored, unless explicitly told to be treated
as acknowledgements through with the business_rule_downtime_as_ack set.
:return: True if all source problem are acknowledged, otherwise False
:rtype: bool
"""
# Walks through problems to check if all items in non ok are
# acknowledged or in downtime period.
acknowledged = 0
for src_prob_id in self.source_problems:
if src_prob_id in hosts:
src_prob = hosts[src_prob_id]
else:
src_prob = services[src_prob_id]
if src_prob.last_hard_state_id != 0:
if src_prob.problem_has_been_acknowledged:
# Problem hast been acknowledged
acknowledged += 1
# Only check problems under downtime if we are
# explicitly told to do so.
elif self.business_rule_downtime_as_ack is True:
if src_prob.scheduled_downtime_depth > 0:
# Problem is under downtime, and downtimes should be
# treated as acknowledgements
acknowledged += 1
elif hasattr(src_prob, "host") and \
hosts[src_prob.host].scheduled_downtime_depth > 0:
# Host is under downtime, and downtimes should be
# treated as acknowledgements
acknowledged += 1
return acknowledged == len(self.source_problems)
|
[
"def",
"business_rule_notification_is_blocked",
"(",
"self",
",",
"hosts",
",",
"services",
")",
":",
"# pylint: disable=too-many-locals",
"# Walks through problems to check if all items in non ok are",
"# acknowledged or in downtime period.",
"acknowledged",
"=",
"0",
"for",
"src_prob_id",
"in",
"self",
".",
"source_problems",
":",
"if",
"src_prob_id",
"in",
"hosts",
":",
"src_prob",
"=",
"hosts",
"[",
"src_prob_id",
"]",
"else",
":",
"src_prob",
"=",
"services",
"[",
"src_prob_id",
"]",
"if",
"src_prob",
".",
"last_hard_state_id",
"!=",
"0",
":",
"if",
"src_prob",
".",
"problem_has_been_acknowledged",
":",
"# Problem hast been acknowledged",
"acknowledged",
"+=",
"1",
"# Only check problems under downtime if we are",
"# explicitly told to do so.",
"elif",
"self",
".",
"business_rule_downtime_as_ack",
"is",
"True",
":",
"if",
"src_prob",
".",
"scheduled_downtime_depth",
">",
"0",
":",
"# Problem is under downtime, and downtimes should be",
"# treated as acknowledgements",
"acknowledged",
"+=",
"1",
"elif",
"hasattr",
"(",
"src_prob",
",",
"\"host\"",
")",
"and",
"hosts",
"[",
"src_prob",
".",
"host",
"]",
".",
"scheduled_downtime_depth",
">",
"0",
":",
"# Host is under downtime, and downtimes should be",
"# treated as acknowledgements",
"acknowledged",
"+=",
"1",
"return",
"acknowledged",
"==",
"len",
"(",
"self",
".",
"source_problems",
")"
] |
Process business rule notifications behaviour. If all problems have
been acknowledged, no notifications should be sent if state is not OK.
By default, downtimes are ignored, unless explicitly told to be treated
as acknowledgements through with the business_rule_downtime_as_ack set.
:return: True if all source problem are acknowledged, otherwise False
:rtype: bool
|
[
"Process",
"business",
"rule",
"notifications",
"behaviour",
".",
"If",
"all",
"problems",
"have",
"been",
"acknowledged",
"no",
"notifications",
"should",
"be",
"sent",
"if",
"state",
"is",
"not",
"OK",
".",
"By",
"default",
"downtimes",
"are",
"ignored",
"unless",
"explicitly",
"told",
"to",
"be",
"treated",
"as",
"acknowledgements",
"through",
"with",
"the",
"business_rule_downtime_as_ack",
"set",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L2694-L2729
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.fill_data_brok_from
|
def fill_data_brok_from(self, data, brok_type):
"""Fill data brok dependent on the brok_type
:param data: data to fill
:type data: dict
:param brok_type: brok type
:type: str
:return: None
"""
super(SchedulingItem, self).fill_data_brok_from(data, brok_type)
# workaround/easy trick to have the command_name of this
# SchedulingItem in its check_result brok
if brok_type == 'check_result':
data['command_name'] = ''
if self.check_command:
data['command_name'] = self.check_command.command.command_name
|
python
|
def fill_data_brok_from(self, data, brok_type):
"""Fill data brok dependent on the brok_type
:param data: data to fill
:type data: dict
:param brok_type: brok type
:type: str
:return: None
"""
super(SchedulingItem, self).fill_data_brok_from(data, brok_type)
# workaround/easy trick to have the command_name of this
# SchedulingItem in its check_result brok
if brok_type == 'check_result':
data['command_name'] = ''
if self.check_command:
data['command_name'] = self.check_command.command.command_name
|
[
"def",
"fill_data_brok_from",
"(",
"self",
",",
"data",
",",
"brok_type",
")",
":",
"super",
"(",
"SchedulingItem",
",",
"self",
")",
".",
"fill_data_brok_from",
"(",
"data",
",",
"brok_type",
")",
"# workaround/easy trick to have the command_name of this",
"# SchedulingItem in its check_result brok",
"if",
"brok_type",
"==",
"'check_result'",
":",
"data",
"[",
"'command_name'",
"]",
"=",
"''",
"if",
"self",
".",
"check_command",
":",
"data",
"[",
"'command_name'",
"]",
"=",
"self",
".",
"check_command",
".",
"command",
".",
"command_name"
] |
Fill data brok dependent on the brok_type
:param data: data to fill
:type data: dict
:param brok_type: brok type
:type: str
:return: None
|
[
"Fill",
"data",
"brok",
"dependent",
"on",
"the",
"brok_type"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L2934-L2949
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.acknowledge_problem
|
def acknowledge_problem(self, notification_period, hosts, services, sticky, notify, author,
comment, end_time=0):
# pylint: disable=too-many-arguments
"""
Add an acknowledge
:param sticky: acknowledge will be always present is host return in UP state
:type sticky: integer
:param notify: if to 1, send a notification
:type notify: integer
:param author: name of the author or the acknowledge
:type author: str
:param comment: comment (description) of the acknowledge
:type comment: str
:param end_time: end (timeout) of this acknowledge in seconds(timestamp) (0 to never end)
:type end_time: int
:return: None | alignak.comment.Comment
"""
comm = None
logger.debug("Acknowledge requested for %s %s.", self.my_type, self.get_name())
if self.state != self.ok_up:
# case have yet an acknowledge
if self.problem_has_been_acknowledged and self.acknowledgement:
self.del_comment(getattr(self.acknowledgement, 'comment_id', None))
if notify:
self.create_notifications('ACKNOWLEDGEMENT',
notification_period, hosts, services)
self.problem_has_been_acknowledged = True
sticky = sticky == 2
data = {
'ref': self.uuid, 'sticky': sticky, 'author': author, 'comment': comment,
'end_time': end_time, 'notify': notify
}
self.acknowledgement = Acknowledge(data)
if self.my_type == 'host':
comment_type = 1
self.broks.append(self.acknowledgement.get_raise_brok(self.get_name()))
else:
comment_type = 2
self.broks.append(self.acknowledgement.get_raise_brok(self.host_name,
self.get_name()))
data = {
'author': author, 'comment': comment, 'comment_type': comment_type, 'entry_type': 4,
'source': 0, 'expires': False, 'ref': self.uuid
}
comm = Comment(data)
self.acknowledgement.comment_id = comm.uuid
self.comments[comm.uuid] = comm
self.broks.append(self.get_update_status_brok())
self.raise_acknowledge_log_entry()
else:
logger.debug("Acknowledge requested for %s %s but element state is OK/UP.",
self.my_type, self.get_name())
# For an host, acknowledge all its services that are problems
if self.my_type == 'host':
for service_uuid in self.services:
if service_uuid not in services:
continue
services[service_uuid].acknowledge_problem(notification_period, hosts, services,
sticky, notify, author, comment,
end_time)
return comm
|
python
|
def acknowledge_problem(self, notification_period, hosts, services, sticky, notify, author,
comment, end_time=0):
# pylint: disable=too-many-arguments
"""
Add an acknowledge
:param sticky: acknowledge will be always present is host return in UP state
:type sticky: integer
:param notify: if to 1, send a notification
:type notify: integer
:param author: name of the author or the acknowledge
:type author: str
:param comment: comment (description) of the acknowledge
:type comment: str
:param end_time: end (timeout) of this acknowledge in seconds(timestamp) (0 to never end)
:type end_time: int
:return: None | alignak.comment.Comment
"""
comm = None
logger.debug("Acknowledge requested for %s %s.", self.my_type, self.get_name())
if self.state != self.ok_up:
# case have yet an acknowledge
if self.problem_has_been_acknowledged and self.acknowledgement:
self.del_comment(getattr(self.acknowledgement, 'comment_id', None))
if notify:
self.create_notifications('ACKNOWLEDGEMENT',
notification_period, hosts, services)
self.problem_has_been_acknowledged = True
sticky = sticky == 2
data = {
'ref': self.uuid, 'sticky': sticky, 'author': author, 'comment': comment,
'end_time': end_time, 'notify': notify
}
self.acknowledgement = Acknowledge(data)
if self.my_type == 'host':
comment_type = 1
self.broks.append(self.acknowledgement.get_raise_brok(self.get_name()))
else:
comment_type = 2
self.broks.append(self.acknowledgement.get_raise_brok(self.host_name,
self.get_name()))
data = {
'author': author, 'comment': comment, 'comment_type': comment_type, 'entry_type': 4,
'source': 0, 'expires': False, 'ref': self.uuid
}
comm = Comment(data)
self.acknowledgement.comment_id = comm.uuid
self.comments[comm.uuid] = comm
self.broks.append(self.get_update_status_brok())
self.raise_acknowledge_log_entry()
else:
logger.debug("Acknowledge requested for %s %s but element state is OK/UP.",
self.my_type, self.get_name())
# For an host, acknowledge all its services that are problems
if self.my_type == 'host':
for service_uuid in self.services:
if service_uuid not in services:
continue
services[service_uuid].acknowledge_problem(notification_period, hosts, services,
sticky, notify, author, comment,
end_time)
return comm
|
[
"def",
"acknowledge_problem",
"(",
"self",
",",
"notification_period",
",",
"hosts",
",",
"services",
",",
"sticky",
",",
"notify",
",",
"author",
",",
"comment",
",",
"end_time",
"=",
"0",
")",
":",
"# pylint: disable=too-many-arguments",
"comm",
"=",
"None",
"logger",
".",
"debug",
"(",
"\"Acknowledge requested for %s %s.\"",
",",
"self",
".",
"my_type",
",",
"self",
".",
"get_name",
"(",
")",
")",
"if",
"self",
".",
"state",
"!=",
"self",
".",
"ok_up",
":",
"# case have yet an acknowledge",
"if",
"self",
".",
"problem_has_been_acknowledged",
"and",
"self",
".",
"acknowledgement",
":",
"self",
".",
"del_comment",
"(",
"getattr",
"(",
"self",
".",
"acknowledgement",
",",
"'comment_id'",
",",
"None",
")",
")",
"if",
"notify",
":",
"self",
".",
"create_notifications",
"(",
"'ACKNOWLEDGEMENT'",
",",
"notification_period",
",",
"hosts",
",",
"services",
")",
"self",
".",
"problem_has_been_acknowledged",
"=",
"True",
"sticky",
"=",
"sticky",
"==",
"2",
"data",
"=",
"{",
"'ref'",
":",
"self",
".",
"uuid",
",",
"'sticky'",
":",
"sticky",
",",
"'author'",
":",
"author",
",",
"'comment'",
":",
"comment",
",",
"'end_time'",
":",
"end_time",
",",
"'notify'",
":",
"notify",
"}",
"self",
".",
"acknowledgement",
"=",
"Acknowledge",
"(",
"data",
")",
"if",
"self",
".",
"my_type",
"==",
"'host'",
":",
"comment_type",
"=",
"1",
"self",
".",
"broks",
".",
"append",
"(",
"self",
".",
"acknowledgement",
".",
"get_raise_brok",
"(",
"self",
".",
"get_name",
"(",
")",
")",
")",
"else",
":",
"comment_type",
"=",
"2",
"self",
".",
"broks",
".",
"append",
"(",
"self",
".",
"acknowledgement",
".",
"get_raise_brok",
"(",
"self",
".",
"host_name",
",",
"self",
".",
"get_name",
"(",
")",
")",
")",
"data",
"=",
"{",
"'author'",
":",
"author",
",",
"'comment'",
":",
"comment",
",",
"'comment_type'",
":",
"comment_type",
",",
"'entry_type'",
":",
"4",
",",
"'source'",
":",
"0",
",",
"'expires'",
":",
"False",
",",
"'ref'",
":",
"self",
".",
"uuid",
"}",
"comm",
"=",
"Comment",
"(",
"data",
")",
"self",
".",
"acknowledgement",
".",
"comment_id",
"=",
"comm",
".",
"uuid",
"self",
".",
"comments",
"[",
"comm",
".",
"uuid",
"]",
"=",
"comm",
"self",
".",
"broks",
".",
"append",
"(",
"self",
".",
"get_update_status_brok",
"(",
")",
")",
"self",
".",
"raise_acknowledge_log_entry",
"(",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Acknowledge requested for %s %s but element state is OK/UP.\"",
",",
"self",
".",
"my_type",
",",
"self",
".",
"get_name",
"(",
")",
")",
"# For an host, acknowledge all its services that are problems",
"if",
"self",
".",
"my_type",
"==",
"'host'",
":",
"for",
"service_uuid",
"in",
"self",
".",
"services",
":",
"if",
"service_uuid",
"not",
"in",
"services",
":",
"continue",
"services",
"[",
"service_uuid",
"]",
".",
"acknowledge_problem",
"(",
"notification_period",
",",
"hosts",
",",
"services",
",",
"sticky",
",",
"notify",
",",
"author",
",",
"comment",
",",
"end_time",
")",
"return",
"comm"
] |
Add an acknowledge
:param sticky: acknowledge will be always present is host return in UP state
:type sticky: integer
:param notify: if to 1, send a notification
:type notify: integer
:param author: name of the author or the acknowledge
:type author: str
:param comment: comment (description) of the acknowledge
:type comment: str
:param end_time: end (timeout) of this acknowledge in seconds(timestamp) (0 to never end)
:type end_time: int
:return: None | alignak.comment.Comment
|
[
"Add",
"an",
"acknowledge"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L2951-L3017
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.check_for_expire_acknowledge
|
def check_for_expire_acknowledge(self):
"""
If have acknowledge and is expired, delete it
:return: None
"""
if (self.acknowledgement and
self.acknowledgement.end_time != 0 and
self.acknowledgement.end_time < time.time()):
self.unacknowledge_problem()
|
python
|
def check_for_expire_acknowledge(self):
"""
If have acknowledge and is expired, delete it
:return: None
"""
if (self.acknowledgement and
self.acknowledgement.end_time != 0 and
self.acknowledgement.end_time < time.time()):
self.unacknowledge_problem()
|
[
"def",
"check_for_expire_acknowledge",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"acknowledgement",
"and",
"self",
".",
"acknowledgement",
".",
"end_time",
"!=",
"0",
"and",
"self",
".",
"acknowledgement",
".",
"end_time",
"<",
"time",
".",
"time",
"(",
")",
")",
":",
"self",
".",
"unacknowledge_problem",
"(",
")"
] |
If have acknowledge and is expired, delete it
:return: None
|
[
"If",
"have",
"acknowledge",
"and",
"is",
"expired",
"delete",
"it"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L3019-L3028
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.unacknowledge_problem
|
def unacknowledge_problem(self):
"""
Remove the acknowledge, reset the flag. The comment is deleted
:return: None
"""
if self.problem_has_been_acknowledged:
logger.debug("[item::%s] deleting acknowledge of %s",
self.get_name(),
self.get_full_name())
self.problem_has_been_acknowledged = False
if self.my_type == 'host':
self.broks.append(self.acknowledgement.get_expire_brok(self.get_name()))
else:
self.broks.append(self.acknowledgement.get_expire_brok(self.host_name,
self.get_name()))
# delete the comment of the item related with the acknowledge
if hasattr(self.acknowledgement, 'comment_id') and \
self.acknowledgement.comment_id in self.comments:
del self.comments[self.acknowledgement.comment_id]
# Should not be deleted, a None is Good
self.acknowledgement = None
self.broks.append(self.get_update_status_brok())
self.raise_unacknowledge_log_entry()
|
python
|
def unacknowledge_problem(self):
"""
Remove the acknowledge, reset the flag. The comment is deleted
:return: None
"""
if self.problem_has_been_acknowledged:
logger.debug("[item::%s] deleting acknowledge of %s",
self.get_name(),
self.get_full_name())
self.problem_has_been_acknowledged = False
if self.my_type == 'host':
self.broks.append(self.acknowledgement.get_expire_brok(self.get_name()))
else:
self.broks.append(self.acknowledgement.get_expire_brok(self.host_name,
self.get_name()))
# delete the comment of the item related with the acknowledge
if hasattr(self.acknowledgement, 'comment_id') and \
self.acknowledgement.comment_id in self.comments:
del self.comments[self.acknowledgement.comment_id]
# Should not be deleted, a None is Good
self.acknowledgement = None
self.broks.append(self.get_update_status_brok())
self.raise_unacknowledge_log_entry()
|
[
"def",
"unacknowledge_problem",
"(",
"self",
")",
":",
"if",
"self",
".",
"problem_has_been_acknowledged",
":",
"logger",
".",
"debug",
"(",
"\"[item::%s] deleting acknowledge of %s\"",
",",
"self",
".",
"get_name",
"(",
")",
",",
"self",
".",
"get_full_name",
"(",
")",
")",
"self",
".",
"problem_has_been_acknowledged",
"=",
"False",
"if",
"self",
".",
"my_type",
"==",
"'host'",
":",
"self",
".",
"broks",
".",
"append",
"(",
"self",
".",
"acknowledgement",
".",
"get_expire_brok",
"(",
"self",
".",
"get_name",
"(",
")",
")",
")",
"else",
":",
"self",
".",
"broks",
".",
"append",
"(",
"self",
".",
"acknowledgement",
".",
"get_expire_brok",
"(",
"self",
".",
"host_name",
",",
"self",
".",
"get_name",
"(",
")",
")",
")",
"# delete the comment of the item related with the acknowledge",
"if",
"hasattr",
"(",
"self",
".",
"acknowledgement",
",",
"'comment_id'",
")",
"and",
"self",
".",
"acknowledgement",
".",
"comment_id",
"in",
"self",
".",
"comments",
":",
"del",
"self",
".",
"comments",
"[",
"self",
".",
"acknowledgement",
".",
"comment_id",
"]",
"# Should not be deleted, a None is Good",
"self",
".",
"acknowledgement",
"=",
"None",
"self",
".",
"broks",
".",
"append",
"(",
"self",
".",
"get_update_status_brok",
"(",
")",
")",
"self",
".",
"raise_unacknowledge_log_entry",
"(",
")"
] |
Remove the acknowledge, reset the flag. The comment is deleted
:return: None
|
[
"Remove",
"the",
"acknowledge",
"reset",
"the",
"flag",
".",
"The",
"comment",
"is",
"deleted"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L3030-L3055
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.unacknowledge_problem_if_not_sticky
|
def unacknowledge_problem_if_not_sticky(self):
"""
Remove the acknowledge if it is not sticky
:return: None
"""
if hasattr(self, 'acknowledgement') and self.acknowledgement is not None:
if not self.acknowledgement.sticky:
self.unacknowledge_problem()
|
python
|
def unacknowledge_problem_if_not_sticky(self):
"""
Remove the acknowledge if it is not sticky
:return: None
"""
if hasattr(self, 'acknowledgement') and self.acknowledgement is not None:
if not self.acknowledgement.sticky:
self.unacknowledge_problem()
|
[
"def",
"unacknowledge_problem_if_not_sticky",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'acknowledgement'",
")",
"and",
"self",
".",
"acknowledgement",
"is",
"not",
"None",
":",
"if",
"not",
"self",
".",
"acknowledgement",
".",
"sticky",
":",
"self",
".",
"unacknowledge_problem",
"(",
")"
] |
Remove the acknowledge if it is not sticky
:return: None
|
[
"Remove",
"the",
"acknowledge",
"if",
"it",
"is",
"not",
"sticky"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L3057-L3065
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.set_impact_state
|
def set_impact_state(self):
"""We just go an impact, so we go unreachable
But only if we enable this state change in the conf
:return: None
"""
cls = self.__class__
if cls.enable_problem_impacts_states_change:
logger.debug("%s is impacted and goes UNREACHABLE", self)
# Track the old state (problem occured before a new check)
self.state_before_impact = self.state
self.state_id_before_impact = self.state_id
# This flag will know if we override the impact state
self.state_changed_since_impact = False
# Set unreachable
self.set_unreachable()
|
python
|
def set_impact_state(self):
"""We just go an impact, so we go unreachable
But only if we enable this state change in the conf
:return: None
"""
cls = self.__class__
if cls.enable_problem_impacts_states_change:
logger.debug("%s is impacted and goes UNREACHABLE", self)
# Track the old state (problem occured before a new check)
self.state_before_impact = self.state
self.state_id_before_impact = self.state_id
# This flag will know if we override the impact state
self.state_changed_since_impact = False
# Set unreachable
self.set_unreachable()
|
[
"def",
"set_impact_state",
"(",
"self",
")",
":",
"cls",
"=",
"self",
".",
"__class__",
"if",
"cls",
".",
"enable_problem_impacts_states_change",
":",
"logger",
".",
"debug",
"(",
"\"%s is impacted and goes UNREACHABLE\"",
",",
"self",
")",
"# Track the old state (problem occured before a new check)",
"self",
".",
"state_before_impact",
"=",
"self",
".",
"state",
"self",
".",
"state_id_before_impact",
"=",
"self",
".",
"state_id",
"# This flag will know if we override the impact state",
"self",
".",
"state_changed_since_impact",
"=",
"False",
"# Set unreachable",
"self",
".",
"set_unreachable",
"(",
")"
] |
We just go an impact, so we go unreachable
But only if we enable this state change in the conf
:return: None
|
[
"We",
"just",
"go",
"an",
"impact",
"so",
"we",
"go",
"unreachable",
"But",
"only",
"if",
"we",
"enable",
"this",
"state",
"change",
"in",
"the",
"conf"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L3215-L3231
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItem.unset_impact_state
|
def unset_impact_state(self):
"""Unset impact, only if impact state change is set in configuration
:return: None
"""
cls = self.__class__
if cls.enable_problem_impacts_states_change and not self.state_changed_since_impact:
self.state = self.state_before_impact
self.state_id = self.state_id_before_impact
|
python
|
def unset_impact_state(self):
"""Unset impact, only if impact state change is set in configuration
:return: None
"""
cls = self.__class__
if cls.enable_problem_impacts_states_change and not self.state_changed_since_impact:
self.state = self.state_before_impact
self.state_id = self.state_id_before_impact
|
[
"def",
"unset_impact_state",
"(",
"self",
")",
":",
"cls",
"=",
"self",
".",
"__class__",
"if",
"cls",
".",
"enable_problem_impacts_states_change",
"and",
"not",
"self",
".",
"state_changed_since_impact",
":",
"self",
".",
"state",
"=",
"self",
".",
"state_before_impact",
"self",
".",
"state_id",
"=",
"self",
".",
"state_id_before_impact"
] |
Unset impact, only if impact state change is set in configuration
:return: None
|
[
"Unset",
"impact",
"only",
"if",
"impact",
"state",
"change",
"is",
"set",
"in",
"configuration"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L3233-L3241
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItems.find_by_filter
|
def find_by_filter(self, filters, all_items):
"""
Find items by filters
:param filters: list of filters
:type filters: list
:param all_items: monitoring items
:type: dict
:return: list of items
:rtype: list
"""
items = []
for i in self:
failed = False
if hasattr(i, "host"):
all_items["service"] = i
else:
all_items["host"] = i
for filt in filters:
if not filt(all_items):
failed = True
break
if failed is False:
items.append(i)
return items
|
python
|
def find_by_filter(self, filters, all_items):
"""
Find items by filters
:param filters: list of filters
:type filters: list
:param all_items: monitoring items
:type: dict
:return: list of items
:rtype: list
"""
items = []
for i in self:
failed = False
if hasattr(i, "host"):
all_items["service"] = i
else:
all_items["host"] = i
for filt in filters:
if not filt(all_items):
failed = True
break
if failed is False:
items.append(i)
return items
|
[
"def",
"find_by_filter",
"(",
"self",
",",
"filters",
",",
"all_items",
")",
":",
"items",
"=",
"[",
"]",
"for",
"i",
"in",
"self",
":",
"failed",
"=",
"False",
"if",
"hasattr",
"(",
"i",
",",
"\"host\"",
")",
":",
"all_items",
"[",
"\"service\"",
"]",
"=",
"i",
"else",
":",
"all_items",
"[",
"\"host\"",
"]",
"=",
"i",
"for",
"filt",
"in",
"filters",
":",
"if",
"not",
"filt",
"(",
"all_items",
")",
":",
"failed",
"=",
"True",
"break",
"if",
"failed",
"is",
"False",
":",
"items",
".",
"append",
"(",
"i",
")",
"return",
"items"
] |
Find items by filters
:param filters: list of filters
:type filters: list
:param all_items: monitoring items
:type: dict
:return: list of items
:rtype: list
|
[
"Find",
"items",
"by",
"filters"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L3379-L3403
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItems.add_act_dependency
|
def add_act_dependency(self, son_id, parent_id, notif_failure_criteria, dep_period,
inherits_parents):
"""
Add a logical dependency for actions between two hosts or services.
:param son_id: uuid of son host
:type son_id: str
:param parent_id: uuid of parent host
:type parent_id: str
:param notif_failure_criteria: notification failure criteria,
notification for a dependent host may vary
:type notif_failure_criteria: list
:param dep_period: dependency period. Timeperiod for dependency may vary
:type dep_period: str | None
:param inherits_parents: if this dep will inherit from parents (timeperiod, status)
:type inherits_parents: bool
:return:
"""
if son_id in self:
son = self[son_id]
else:
msg = "Dependency son (%s) unknown, configuration error" % son_id
self.add_error(msg)
parent = self[parent_id]
son.act_depend_of.append((parent_id, notif_failure_criteria, dep_period, inherits_parents))
parent.act_depend_of_me.append((son_id, notif_failure_criteria, dep_period,
inherits_parents))
# TODO: Is it necessary? We already have this info in act_depend_* attributes
son.parent_dependencies.add(parent_id)
parent.child_dependencies.add(son_id)
|
python
|
def add_act_dependency(self, son_id, parent_id, notif_failure_criteria, dep_period,
inherits_parents):
"""
Add a logical dependency for actions between two hosts or services.
:param son_id: uuid of son host
:type son_id: str
:param parent_id: uuid of parent host
:type parent_id: str
:param notif_failure_criteria: notification failure criteria,
notification for a dependent host may vary
:type notif_failure_criteria: list
:param dep_period: dependency period. Timeperiod for dependency may vary
:type dep_period: str | None
:param inherits_parents: if this dep will inherit from parents (timeperiod, status)
:type inherits_parents: bool
:return:
"""
if son_id in self:
son = self[son_id]
else:
msg = "Dependency son (%s) unknown, configuration error" % son_id
self.add_error(msg)
parent = self[parent_id]
son.act_depend_of.append((parent_id, notif_failure_criteria, dep_period, inherits_parents))
parent.act_depend_of_me.append((son_id, notif_failure_criteria, dep_period,
inherits_parents))
# TODO: Is it necessary? We already have this info in act_depend_* attributes
son.parent_dependencies.add(parent_id)
parent.child_dependencies.add(son_id)
|
[
"def",
"add_act_dependency",
"(",
"self",
",",
"son_id",
",",
"parent_id",
",",
"notif_failure_criteria",
",",
"dep_period",
",",
"inherits_parents",
")",
":",
"if",
"son_id",
"in",
"self",
":",
"son",
"=",
"self",
"[",
"son_id",
"]",
"else",
":",
"msg",
"=",
"\"Dependency son (%s) unknown, configuration error\"",
"%",
"son_id",
"self",
".",
"add_error",
"(",
"msg",
")",
"parent",
"=",
"self",
"[",
"parent_id",
"]",
"son",
".",
"act_depend_of",
".",
"append",
"(",
"(",
"parent_id",
",",
"notif_failure_criteria",
",",
"dep_period",
",",
"inherits_parents",
")",
")",
"parent",
".",
"act_depend_of_me",
".",
"append",
"(",
"(",
"son_id",
",",
"notif_failure_criteria",
",",
"dep_period",
",",
"inherits_parents",
")",
")",
"# TODO: Is it necessary? We already have this info in act_depend_* attributes",
"son",
".",
"parent_dependencies",
".",
"add",
"(",
"parent_id",
")",
"parent",
".",
"child_dependencies",
".",
"add",
"(",
"son_id",
")"
] |
Add a logical dependency for actions between two hosts or services.
:param son_id: uuid of son host
:type son_id: str
:param parent_id: uuid of parent host
:type parent_id: str
:param notif_failure_criteria: notification failure criteria,
notification for a dependent host may vary
:type notif_failure_criteria: list
:param dep_period: dependency period. Timeperiod for dependency may vary
:type dep_period: str | None
:param inherits_parents: if this dep will inherit from parents (timeperiod, status)
:type inherits_parents: bool
:return:
|
[
"Add",
"a",
"logical",
"dependency",
"for",
"actions",
"between",
"two",
"hosts",
"or",
"services",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L3405-L3435
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItems.del_act_dependency
|
def del_act_dependency(self, son_id, parent_id): # pragma: no cover, not yet tested
"""Remove act_dependency between two hosts or services.
TODO: do we really intend to remove dynamically ?
:param son_id: uuid of son host/service
:type son_id: str
:param parent_id: uuid of parent host/service
:type parent_id: str
:return: None
"""
son = self[son_id]
parent = self[parent_id]
to_del = []
# First we remove in my list
for (host, status, timeperiod, inherits_parent) in son.act_depend_of:
if host == parent_id:
to_del.append((host, status, timeperiod, inherits_parent))
for tup in to_del:
son.act_depend_of.remove(tup)
# And now in the father part
to_del = []
for (host, status, timeperiod, inherits_parent) in parent.act_depend_of_me:
if host == son_id:
to_del.append((host, status, timeperiod, inherits_parent))
for tup in to_del:
parent.act_depend_of_me.remove(tup)
# Remove in child/parents dependencies too
# Me in father list
parent.child_dependencies.remove(son_id)
# and father list in mine
son.parent_dependencies.remove(parent_id)
|
python
|
def del_act_dependency(self, son_id, parent_id): # pragma: no cover, not yet tested
"""Remove act_dependency between two hosts or services.
TODO: do we really intend to remove dynamically ?
:param son_id: uuid of son host/service
:type son_id: str
:param parent_id: uuid of parent host/service
:type parent_id: str
:return: None
"""
son = self[son_id]
parent = self[parent_id]
to_del = []
# First we remove in my list
for (host, status, timeperiod, inherits_parent) in son.act_depend_of:
if host == parent_id:
to_del.append((host, status, timeperiod, inherits_parent))
for tup in to_del:
son.act_depend_of.remove(tup)
# And now in the father part
to_del = []
for (host, status, timeperiod, inherits_parent) in parent.act_depend_of_me:
if host == son_id:
to_del.append((host, status, timeperiod, inherits_parent))
for tup in to_del:
parent.act_depend_of_me.remove(tup)
# Remove in child/parents dependencies too
# Me in father list
parent.child_dependencies.remove(son_id)
# and father list in mine
son.parent_dependencies.remove(parent_id)
|
[
"def",
"del_act_dependency",
"(",
"self",
",",
"son_id",
",",
"parent_id",
")",
":",
"# pragma: no cover, not yet tested",
"son",
"=",
"self",
"[",
"son_id",
"]",
"parent",
"=",
"self",
"[",
"parent_id",
"]",
"to_del",
"=",
"[",
"]",
"# First we remove in my list",
"for",
"(",
"host",
",",
"status",
",",
"timeperiod",
",",
"inherits_parent",
")",
"in",
"son",
".",
"act_depend_of",
":",
"if",
"host",
"==",
"parent_id",
":",
"to_del",
".",
"append",
"(",
"(",
"host",
",",
"status",
",",
"timeperiod",
",",
"inherits_parent",
")",
")",
"for",
"tup",
"in",
"to_del",
":",
"son",
".",
"act_depend_of",
".",
"remove",
"(",
"tup",
")",
"# And now in the father part",
"to_del",
"=",
"[",
"]",
"for",
"(",
"host",
",",
"status",
",",
"timeperiod",
",",
"inherits_parent",
")",
"in",
"parent",
".",
"act_depend_of_me",
":",
"if",
"host",
"==",
"son_id",
":",
"to_del",
".",
"append",
"(",
"(",
"host",
",",
"status",
",",
"timeperiod",
",",
"inherits_parent",
")",
")",
"for",
"tup",
"in",
"to_del",
":",
"parent",
".",
"act_depend_of_me",
".",
"remove",
"(",
"tup",
")",
"# Remove in child/parents dependencies too",
"# Me in father list",
"parent",
".",
"child_dependencies",
".",
"remove",
"(",
"son_id",
")",
"# and father list in mine",
"son",
".",
"parent_dependencies",
".",
"remove",
"(",
"parent_id",
")"
] |
Remove act_dependency between two hosts or services.
TODO: do we really intend to remove dynamically ?
:param son_id: uuid of son host/service
:type son_id: str
:param parent_id: uuid of parent host/service
:type parent_id: str
:return: None
|
[
"Remove",
"act_dependency",
"between",
"two",
"hosts",
"or",
"services",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L3437-L3470
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItems.add_chk_dependency
|
def add_chk_dependency(self, son_id, parent_id, notif_failure_criteria, dep_period,
inherits_parents):
"""
Add a logical dependency for checks between two hosts or services.
:param son_id: uuid of son host/service
:type son_id: str
:param parent_id: uuid of parent host/service
:type parent_id: str
:param notif_failure_criteria: notification failure criteria,
notification for a dependent host may vary
:type notif_failure_criteria: list
:param dep_period: dependency period. Timeperiod for dependency may vary
:type dep_period: str
:param inherits_parents: if this dep will inherit from parents (timeperiod, status)
:type inherits_parents: bool
:return:
"""
son = self[son_id]
parent = self[parent_id]
son.chk_depend_of.append((parent_id, notif_failure_criteria, 'logic_dep', dep_period,
inherits_parents))
parent.chk_depend_of_me.append((son_id, notif_failure_criteria, 'logic_dep', dep_period,
inherits_parents))
# TODO: Is it necessary? We already have this info in act_depend_* attributes
son.parent_dependencies.add(parent_id)
parent.child_dependencies.add(son_id)
|
python
|
def add_chk_dependency(self, son_id, parent_id, notif_failure_criteria, dep_period,
inherits_parents):
"""
Add a logical dependency for checks between two hosts or services.
:param son_id: uuid of son host/service
:type son_id: str
:param parent_id: uuid of parent host/service
:type parent_id: str
:param notif_failure_criteria: notification failure criteria,
notification for a dependent host may vary
:type notif_failure_criteria: list
:param dep_period: dependency period. Timeperiod for dependency may vary
:type dep_period: str
:param inherits_parents: if this dep will inherit from parents (timeperiod, status)
:type inherits_parents: bool
:return:
"""
son = self[son_id]
parent = self[parent_id]
son.chk_depend_of.append((parent_id, notif_failure_criteria, 'logic_dep', dep_period,
inherits_parents))
parent.chk_depend_of_me.append((son_id, notif_failure_criteria, 'logic_dep', dep_period,
inherits_parents))
# TODO: Is it necessary? We already have this info in act_depend_* attributes
son.parent_dependencies.add(parent_id)
parent.child_dependencies.add(son_id)
|
[
"def",
"add_chk_dependency",
"(",
"self",
",",
"son_id",
",",
"parent_id",
",",
"notif_failure_criteria",
",",
"dep_period",
",",
"inherits_parents",
")",
":",
"son",
"=",
"self",
"[",
"son_id",
"]",
"parent",
"=",
"self",
"[",
"parent_id",
"]",
"son",
".",
"chk_depend_of",
".",
"append",
"(",
"(",
"parent_id",
",",
"notif_failure_criteria",
",",
"'logic_dep'",
",",
"dep_period",
",",
"inherits_parents",
")",
")",
"parent",
".",
"chk_depend_of_me",
".",
"append",
"(",
"(",
"son_id",
",",
"notif_failure_criteria",
",",
"'logic_dep'",
",",
"dep_period",
",",
"inherits_parents",
")",
")",
"# TODO: Is it necessary? We already have this info in act_depend_* attributes",
"son",
".",
"parent_dependencies",
".",
"add",
"(",
"parent_id",
")",
"parent",
".",
"child_dependencies",
".",
"add",
"(",
"son_id",
")"
] |
Add a logical dependency for checks between two hosts or services.
:param son_id: uuid of son host/service
:type son_id: str
:param parent_id: uuid of parent host/service
:type parent_id: str
:param notif_failure_criteria: notification failure criteria,
notification for a dependent host may vary
:type notif_failure_criteria: list
:param dep_period: dependency period. Timeperiod for dependency may vary
:type dep_period: str
:param inherits_parents: if this dep will inherit from parents (timeperiod, status)
:type inherits_parents: bool
:return:
|
[
"Add",
"a",
"logical",
"dependency",
"for",
"checks",
"between",
"two",
"hosts",
"or",
"services",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L3472-L3499
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/schedulingitem.py
|
SchedulingItems.create_business_rules
|
def create_business_rules(self, hosts, services, hostgroups, servicegroups,
macromodulations, timeperiods):
"""
Loop on hosts or services and call SchedulingItem.create_business_rules
:param hosts: hosts to link to
:type hosts: alignak.objects.host.Hosts
:param services: services to link to
:type services: alignak.objects.service.Services
:param hostgroups: hostgroups to link to
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:param servicegroups: servicegroups to link to
:type servicegroups: alignak.objects.servicegroup.Servicegroups
:param macromodulations: macromodulations to link to
:type macromodulations: alignak.objects.macromodulation.Macromodulations
:param timeperiods: timeperiods to link to
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
"""
for item in self:
item.create_business_rules(hosts, services, hostgroups,
servicegroups, macromodulations, timeperiods)
|
python
|
def create_business_rules(self, hosts, services, hostgroups, servicegroups,
macromodulations, timeperiods):
"""
Loop on hosts or services and call SchedulingItem.create_business_rules
:param hosts: hosts to link to
:type hosts: alignak.objects.host.Hosts
:param services: services to link to
:type services: alignak.objects.service.Services
:param hostgroups: hostgroups to link to
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:param servicegroups: servicegroups to link to
:type servicegroups: alignak.objects.servicegroup.Servicegroups
:param macromodulations: macromodulations to link to
:type macromodulations: alignak.objects.macromodulation.Macromodulations
:param timeperiods: timeperiods to link to
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
"""
for item in self:
item.create_business_rules(hosts, services, hostgroups,
servicegroups, macromodulations, timeperiods)
|
[
"def",
"create_business_rules",
"(",
"self",
",",
"hosts",
",",
"services",
",",
"hostgroups",
",",
"servicegroups",
",",
"macromodulations",
",",
"timeperiods",
")",
":",
"for",
"item",
"in",
"self",
":",
"item",
".",
"create_business_rules",
"(",
"hosts",
",",
"services",
",",
"hostgroups",
",",
"servicegroups",
",",
"macromodulations",
",",
"timeperiods",
")"
] |
Loop on hosts or services and call SchedulingItem.create_business_rules
:param hosts: hosts to link to
:type hosts: alignak.objects.host.Hosts
:param services: services to link to
:type services: alignak.objects.service.Services
:param hostgroups: hostgroups to link to
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:param servicegroups: servicegroups to link to
:type servicegroups: alignak.objects.servicegroup.Servicegroups
:param macromodulations: macromodulations to link to
:type macromodulations: alignak.objects.macromodulation.Macromodulations
:param timeperiods: timeperiods to link to
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
|
[
"Loop",
"on",
"hosts",
"or",
"services",
"and",
"call",
"SchedulingItem",
".",
"create_business_rules"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L3501-L3522
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/servicegroup.py
|
Servicegroup.get_services_by_explosion
|
def get_services_by_explosion(self, servicegroups):
# pylint: disable=access-member-before-definition
"""
Get all services of this servicegroup and add it in members container
:param servicegroups: servicegroups object
:type servicegroups: alignak.objects.servicegroup.Servicegroups
:return: return empty string or list of members
:rtype: str or list
"""
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_exploded = True
# Now the recursive part
# rec_tag is set to False every HG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[servicegroup::%s] got a loop in servicegroup definition",
self.get_name())
if hasattr(self, 'members'):
return self.members
return ''
# Ok, not a loop, we tag it and continue
self.rec_tag = True
sg_mbrs = self.get_servicegroup_members()
for sg_mbr in sg_mbrs:
servicegroup = servicegroups.find_by_name(sg_mbr.strip())
if servicegroup is not None:
value = servicegroup.get_services_by_explosion(servicegroups)
if value is not None:
self.add_members(value)
if hasattr(self, 'members'):
return self.members
return ''
|
python
|
def get_services_by_explosion(self, servicegroups):
# pylint: disable=access-member-before-definition
"""
Get all services of this servicegroup and add it in members container
:param servicegroups: servicegroups object
:type servicegroups: alignak.objects.servicegroup.Servicegroups
:return: return empty string or list of members
:rtype: str or list
"""
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_exploded = True
# Now the recursive part
# rec_tag is set to False every HG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[servicegroup::%s] got a loop in servicegroup definition",
self.get_name())
if hasattr(self, 'members'):
return self.members
return ''
# Ok, not a loop, we tag it and continue
self.rec_tag = True
sg_mbrs = self.get_servicegroup_members()
for sg_mbr in sg_mbrs:
servicegroup = servicegroups.find_by_name(sg_mbr.strip())
if servicegroup is not None:
value = servicegroup.get_services_by_explosion(servicegroups)
if value is not None:
self.add_members(value)
if hasattr(self, 'members'):
return self.members
return ''
|
[
"def",
"get_services_by_explosion",
"(",
"self",
",",
"servicegroups",
")",
":",
"# pylint: disable=access-member-before-definition",
"# First we tag the hg so it will not be explode",
"# if a son of it already call it",
"self",
".",
"already_exploded",
"=",
"True",
"# Now the recursive part",
"# rec_tag is set to False every HG we explode",
"# so if True here, it must be a loop in HG",
"# calls... not GOOD!",
"if",
"self",
".",
"rec_tag",
":",
"logger",
".",
"error",
"(",
"\"[servicegroup::%s] got a loop in servicegroup definition\"",
",",
"self",
".",
"get_name",
"(",
")",
")",
"if",
"hasattr",
"(",
"self",
",",
"'members'",
")",
":",
"return",
"self",
".",
"members",
"return",
"''",
"# Ok, not a loop, we tag it and continue",
"self",
".",
"rec_tag",
"=",
"True",
"sg_mbrs",
"=",
"self",
".",
"get_servicegroup_members",
"(",
")",
"for",
"sg_mbr",
"in",
"sg_mbrs",
":",
"servicegroup",
"=",
"servicegroups",
".",
"find_by_name",
"(",
"sg_mbr",
".",
"strip",
"(",
")",
")",
"if",
"servicegroup",
"is",
"not",
"None",
":",
"value",
"=",
"servicegroup",
".",
"get_services_by_explosion",
"(",
"servicegroups",
")",
"if",
"value",
"is",
"not",
"None",
":",
"self",
".",
"add_members",
"(",
"value",
")",
"if",
"hasattr",
"(",
"self",
",",
"'members'",
")",
":",
"return",
"self",
".",
"members",
"return",
"''"
] |
Get all services of this servicegroup and add it in members container
:param servicegroups: servicegroups object
:type servicegroups: alignak.objects.servicegroup.Servicegroups
:return: return empty string or list of members
:rtype: str or list
|
[
"Get",
"all",
"services",
"of",
"this",
"servicegroup",
"and",
"add",
"it",
"in",
"members",
"container"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/servicegroup.py#L115-L154
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/servicegroup.py
|
Servicegroups.explode
|
def explode(self):
"""
Get services and put them in members container
:return: None
"""
# We do not want a same service group to be exploded again and again
# so we tag it
for tmp_sg in list(self.items.values()):
tmp_sg.already_exploded = False
for servicegroup in list(self.items.values()):
if servicegroup.already_exploded:
continue
# get_services_by_explosion is a recursive
# function, so we must tag hg so we do not loop
for tmp_sg in list(self.items.values()):
tmp_sg.rec_tag = False
servicegroup.get_services_by_explosion(self)
# We clean the tags
for tmp_sg in list(self.items.values()):
if hasattr(tmp_sg, 'rec_tag'):
del tmp_sg.rec_tag
del tmp_sg.already_exploded
|
python
|
def explode(self):
"""
Get services and put them in members container
:return: None
"""
# We do not want a same service group to be exploded again and again
# so we tag it
for tmp_sg in list(self.items.values()):
tmp_sg.already_exploded = False
for servicegroup in list(self.items.values()):
if servicegroup.already_exploded:
continue
# get_services_by_explosion is a recursive
# function, so we must tag hg so we do not loop
for tmp_sg in list(self.items.values()):
tmp_sg.rec_tag = False
servicegroup.get_services_by_explosion(self)
# We clean the tags
for tmp_sg in list(self.items.values()):
if hasattr(tmp_sg, 'rec_tag'):
del tmp_sg.rec_tag
del tmp_sg.already_exploded
|
[
"def",
"explode",
"(",
"self",
")",
":",
"# We do not want a same service group to be exploded again and again",
"# so we tag it",
"for",
"tmp_sg",
"in",
"list",
"(",
"self",
".",
"items",
".",
"values",
"(",
")",
")",
":",
"tmp_sg",
".",
"already_exploded",
"=",
"False",
"for",
"servicegroup",
"in",
"list",
"(",
"self",
".",
"items",
".",
"values",
"(",
")",
")",
":",
"if",
"servicegroup",
".",
"already_exploded",
":",
"continue",
"# get_services_by_explosion is a recursive",
"# function, so we must tag hg so we do not loop",
"for",
"tmp_sg",
"in",
"list",
"(",
"self",
".",
"items",
".",
"values",
"(",
")",
")",
":",
"tmp_sg",
".",
"rec_tag",
"=",
"False",
"servicegroup",
".",
"get_services_by_explosion",
"(",
"self",
")",
"# We clean the tags",
"for",
"tmp_sg",
"in",
"list",
"(",
"self",
".",
"items",
".",
"values",
"(",
")",
")",
":",
"if",
"hasattr",
"(",
"tmp_sg",
",",
"'rec_tag'",
")",
":",
"del",
"tmp_sg",
".",
"rec_tag",
"del",
"tmp_sg",
".",
"already_exploded"
] |
Get services and put them in members container
:return: None
|
[
"Get",
"services",
"and",
"put",
"them",
"in",
"members",
"container"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/servicegroup.py#L260-L285
|
train
|
Alignak-monitoring/alignak
|
alignak/log.py
|
setup_logger
|
def setup_logger(logger_configuration_file, log_dir=None, process_name='', log_file=''):
# pylint: disable=too-many-branches
"""
Configure the provided logger
- get and update the content of the Json configuration file
- configure the logger with this file
If a log_dir and process_name are provided, the format and filename in the configuration file
are updated with the provided values if they contain the patterns %(logdir)s and %(daemon)s
If no log_dir and process_name are provided, this function will truncate the log file
defined in the configuration file.
If a log file name is provided, it will override the default defined log file name.
At first, this function checks if the logger is still existing and initialized to
update the handlers and formatters. This mainly happens during the unit tests.
:param logger_configuration_file: Python Json logger configuration file
:rtype logger_configuration_file: str
:param log_dir: default log directory to update the defined logging handlers
:rtype log_dir: str
:param process_name: process name to update the defined logging formatters
:rtype process_name: str
:param log_file: log file name to update the defined log file
:rtype log_file: str
:return: None
"""
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
for handler in logger_.handlers:
if not process_name:
break
# Logger is already configured?
if getattr(handler, '_name', None) == 'daemons':
# Update the declared formats and file names with the process name
# This is for unit tests purpose only: alignak_tests will be replaced
# with the provided process name
for hdlr in logger_.handlers:
# print("- handler : %s (%s)" % (hdlr, hdlr.formatter._fmt))
if 'alignak_tests' in hdlr.formatter._fmt:
formatter = logging.Formatter(hdlr.formatter._fmt.replace("alignak_tests",
process_name))
hdlr.setFormatter(formatter)
if getattr(hdlr, 'filename', None) and 'alignak_tests' in hdlr.filename:
hdlr.filename = hdlr.filename._fmt.replace("alignak_tests", process_name)
# print("- handler : %s (%s) -> %s" % (hdlr, hdlr.formatter._fmt,
# hdlr.filename))
# else:
# print("- handler : %s (%s)" % (hdlr, hdlr.formatter._fmt))
break
else:
if not logger_configuration_file or not os.path.exists(logger_configuration_file):
print("The logger configuration file does not exist: %s" % logger_configuration_file)
return
with open(logger_configuration_file, 'rt') as _file:
config = json.load(_file)
truncate = False
if not process_name and not log_dir:
truncate = True
if not process_name:
process_name = 'alignak_tests'
if not log_dir:
log_dir = '/tmp'
# Update the declared formats with the process name
for formatter in config['formatters']:
if 'format' not in config['formatters'][formatter]:
continue
config['formatters'][formatter]['format'] = \
config['formatters'][formatter]['format'].replace("%(daemon)s", process_name)
# Update the declared log file names with the log directory
for hdlr in config['handlers']:
if 'filename' not in config['handlers'][hdlr]:
continue
if log_file and hdlr == 'daemons':
config['handlers'][hdlr]['filename'] = log_file
else:
config['handlers'][hdlr]['filename'] = \
config['handlers'][hdlr]['filename'].replace("%(logdir)s", log_dir)
config['handlers'][hdlr]['filename'] = \
config['handlers'][hdlr]['filename'].replace("%(daemon)s", process_name)
if truncate and os.path.exists(config['handlers'][hdlr]['filename']):
with open(config['handlers'][hdlr]['filename'], "w") as file_log_file:
file_log_file.truncate()
# Configure the logger, any error will raise an exception
logger_dictConfig(config)
|
python
|
def setup_logger(logger_configuration_file, log_dir=None, process_name='', log_file=''):
# pylint: disable=too-many-branches
"""
Configure the provided logger
- get and update the content of the Json configuration file
- configure the logger with this file
If a log_dir and process_name are provided, the format and filename in the configuration file
are updated with the provided values if they contain the patterns %(logdir)s and %(daemon)s
If no log_dir and process_name are provided, this function will truncate the log file
defined in the configuration file.
If a log file name is provided, it will override the default defined log file name.
At first, this function checks if the logger is still existing and initialized to
update the handlers and formatters. This mainly happens during the unit tests.
:param logger_configuration_file: Python Json logger configuration file
:rtype logger_configuration_file: str
:param log_dir: default log directory to update the defined logging handlers
:rtype log_dir: str
:param process_name: process name to update the defined logging formatters
:rtype process_name: str
:param log_file: log file name to update the defined log file
:rtype log_file: str
:return: None
"""
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
for handler in logger_.handlers:
if not process_name:
break
# Logger is already configured?
if getattr(handler, '_name', None) == 'daemons':
# Update the declared formats and file names with the process name
# This is for unit tests purpose only: alignak_tests will be replaced
# with the provided process name
for hdlr in logger_.handlers:
# print("- handler : %s (%s)" % (hdlr, hdlr.formatter._fmt))
if 'alignak_tests' in hdlr.formatter._fmt:
formatter = logging.Formatter(hdlr.formatter._fmt.replace("alignak_tests",
process_name))
hdlr.setFormatter(formatter)
if getattr(hdlr, 'filename', None) and 'alignak_tests' in hdlr.filename:
hdlr.filename = hdlr.filename._fmt.replace("alignak_tests", process_name)
# print("- handler : %s (%s) -> %s" % (hdlr, hdlr.formatter._fmt,
# hdlr.filename))
# else:
# print("- handler : %s (%s)" % (hdlr, hdlr.formatter._fmt))
break
else:
if not logger_configuration_file or not os.path.exists(logger_configuration_file):
print("The logger configuration file does not exist: %s" % logger_configuration_file)
return
with open(logger_configuration_file, 'rt') as _file:
config = json.load(_file)
truncate = False
if not process_name and not log_dir:
truncate = True
if not process_name:
process_name = 'alignak_tests'
if not log_dir:
log_dir = '/tmp'
# Update the declared formats with the process name
for formatter in config['formatters']:
if 'format' not in config['formatters'][formatter]:
continue
config['formatters'][formatter]['format'] = \
config['formatters'][formatter]['format'].replace("%(daemon)s", process_name)
# Update the declared log file names with the log directory
for hdlr in config['handlers']:
if 'filename' not in config['handlers'][hdlr]:
continue
if log_file and hdlr == 'daemons':
config['handlers'][hdlr]['filename'] = log_file
else:
config['handlers'][hdlr]['filename'] = \
config['handlers'][hdlr]['filename'].replace("%(logdir)s", log_dir)
config['handlers'][hdlr]['filename'] = \
config['handlers'][hdlr]['filename'].replace("%(daemon)s", process_name)
if truncate and os.path.exists(config['handlers'][hdlr]['filename']):
with open(config['handlers'][hdlr]['filename'], "w") as file_log_file:
file_log_file.truncate()
# Configure the logger, any error will raise an exception
logger_dictConfig(config)
|
[
"def",
"setup_logger",
"(",
"logger_configuration_file",
",",
"log_dir",
"=",
"None",
",",
"process_name",
"=",
"''",
",",
"log_file",
"=",
"''",
")",
":",
"# pylint: disable=too-many-branches",
"logger_",
"=",
"logging",
".",
"getLogger",
"(",
"ALIGNAK_LOGGER_NAME",
")",
"for",
"handler",
"in",
"logger_",
".",
"handlers",
":",
"if",
"not",
"process_name",
":",
"break",
"# Logger is already configured?",
"if",
"getattr",
"(",
"handler",
",",
"'_name'",
",",
"None",
")",
"==",
"'daemons'",
":",
"# Update the declared formats and file names with the process name",
"# This is for unit tests purpose only: alignak_tests will be replaced",
"# with the provided process name",
"for",
"hdlr",
"in",
"logger_",
".",
"handlers",
":",
"# print(\"- handler : %s (%s)\" % (hdlr, hdlr.formatter._fmt))",
"if",
"'alignak_tests'",
"in",
"hdlr",
".",
"formatter",
".",
"_fmt",
":",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"hdlr",
".",
"formatter",
".",
"_fmt",
".",
"replace",
"(",
"\"alignak_tests\"",
",",
"process_name",
")",
")",
"hdlr",
".",
"setFormatter",
"(",
"formatter",
")",
"if",
"getattr",
"(",
"hdlr",
",",
"'filename'",
",",
"None",
")",
"and",
"'alignak_tests'",
"in",
"hdlr",
".",
"filename",
":",
"hdlr",
".",
"filename",
"=",
"hdlr",
".",
"filename",
".",
"_fmt",
".",
"replace",
"(",
"\"alignak_tests\"",
",",
"process_name",
")",
"# print(\"- handler : %s (%s) -> %s\" % (hdlr, hdlr.formatter._fmt,",
"# hdlr.filename))",
"# else:",
"# print(\"- handler : %s (%s)\" % (hdlr, hdlr.formatter._fmt))",
"break",
"else",
":",
"if",
"not",
"logger_configuration_file",
"or",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"logger_configuration_file",
")",
":",
"print",
"(",
"\"The logger configuration file does not exist: %s\"",
"%",
"logger_configuration_file",
")",
"return",
"with",
"open",
"(",
"logger_configuration_file",
",",
"'rt'",
")",
"as",
"_file",
":",
"config",
"=",
"json",
".",
"load",
"(",
"_file",
")",
"truncate",
"=",
"False",
"if",
"not",
"process_name",
"and",
"not",
"log_dir",
":",
"truncate",
"=",
"True",
"if",
"not",
"process_name",
":",
"process_name",
"=",
"'alignak_tests'",
"if",
"not",
"log_dir",
":",
"log_dir",
"=",
"'/tmp'",
"# Update the declared formats with the process name",
"for",
"formatter",
"in",
"config",
"[",
"'formatters'",
"]",
":",
"if",
"'format'",
"not",
"in",
"config",
"[",
"'formatters'",
"]",
"[",
"formatter",
"]",
":",
"continue",
"config",
"[",
"'formatters'",
"]",
"[",
"formatter",
"]",
"[",
"'format'",
"]",
"=",
"config",
"[",
"'formatters'",
"]",
"[",
"formatter",
"]",
"[",
"'format'",
"]",
".",
"replace",
"(",
"\"%(daemon)s\"",
",",
"process_name",
")",
"# Update the declared log file names with the log directory",
"for",
"hdlr",
"in",
"config",
"[",
"'handlers'",
"]",
":",
"if",
"'filename'",
"not",
"in",
"config",
"[",
"'handlers'",
"]",
"[",
"hdlr",
"]",
":",
"continue",
"if",
"log_file",
"and",
"hdlr",
"==",
"'daemons'",
":",
"config",
"[",
"'handlers'",
"]",
"[",
"hdlr",
"]",
"[",
"'filename'",
"]",
"=",
"log_file",
"else",
":",
"config",
"[",
"'handlers'",
"]",
"[",
"hdlr",
"]",
"[",
"'filename'",
"]",
"=",
"config",
"[",
"'handlers'",
"]",
"[",
"hdlr",
"]",
"[",
"'filename'",
"]",
".",
"replace",
"(",
"\"%(logdir)s\"",
",",
"log_dir",
")",
"config",
"[",
"'handlers'",
"]",
"[",
"hdlr",
"]",
"[",
"'filename'",
"]",
"=",
"config",
"[",
"'handlers'",
"]",
"[",
"hdlr",
"]",
"[",
"'filename'",
"]",
".",
"replace",
"(",
"\"%(daemon)s\"",
",",
"process_name",
")",
"if",
"truncate",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"config",
"[",
"'handlers'",
"]",
"[",
"hdlr",
"]",
"[",
"'filename'",
"]",
")",
":",
"with",
"open",
"(",
"config",
"[",
"'handlers'",
"]",
"[",
"hdlr",
"]",
"[",
"'filename'",
"]",
",",
"\"w\"",
")",
"as",
"file_log_file",
":",
"file_log_file",
".",
"truncate",
"(",
")",
"# Configure the logger, any error will raise an exception",
"logger_dictConfig",
"(",
"config",
")"
] |
Configure the provided logger
- get and update the content of the Json configuration file
- configure the logger with this file
If a log_dir and process_name are provided, the format and filename in the configuration file
are updated with the provided values if they contain the patterns %(logdir)s and %(daemon)s
If no log_dir and process_name are provided, this function will truncate the log file
defined in the configuration file.
If a log file name is provided, it will override the default defined log file name.
At first, this function checks if the logger is still existing and initialized to
update the handlers and formatters. This mainly happens during the unit tests.
:param logger_configuration_file: Python Json logger configuration file
:rtype logger_configuration_file: str
:param log_dir: default log directory to update the defined logging handlers
:rtype log_dir: str
:param process_name: process name to update the defined logging formatters
:rtype process_name: str
:param log_file: log file name to update the defined log file
:rtype log_file: str
:return: None
|
[
"Configure",
"the",
"provided",
"logger",
"-",
"get",
"and",
"update",
"the",
"content",
"of",
"the",
"Json",
"configuration",
"file",
"-",
"configure",
"the",
"logger",
"with",
"this",
"file"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/log.py#L111-L198
|
train
|
Alignak-monitoring/alignak
|
alignak/log.py
|
set_log_console
|
def set_log_console(log_level=logging.INFO):
"""Set the Alignak daemons logger have a console log handler.
This is only used for the arbiter verify mode to add a console log handler.
:param log_level: log level
:return: n/a
"""
# Change the logger and all its handlers log level
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
logger_.setLevel(log_level)
# Adding a console logger...
csh = ColorStreamHandler(sys.stdout)
csh.setFormatter(Formatter('[%(asctime)s] %(levelname)s: [%(name)s] %(message)s',
"%Y-%m-%d %H:%M:%S"))
logger_.addHandler(csh)
|
python
|
def set_log_console(log_level=logging.INFO):
"""Set the Alignak daemons logger have a console log handler.
This is only used for the arbiter verify mode to add a console log handler.
:param log_level: log level
:return: n/a
"""
# Change the logger and all its handlers log level
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
logger_.setLevel(log_level)
# Adding a console logger...
csh = ColorStreamHandler(sys.stdout)
csh.setFormatter(Formatter('[%(asctime)s] %(levelname)s: [%(name)s] %(message)s',
"%Y-%m-%d %H:%M:%S"))
logger_.addHandler(csh)
|
[
"def",
"set_log_console",
"(",
"log_level",
"=",
"logging",
".",
"INFO",
")",
":",
"# Change the logger and all its handlers log level",
"logger_",
"=",
"logging",
".",
"getLogger",
"(",
"ALIGNAK_LOGGER_NAME",
")",
"logger_",
".",
"setLevel",
"(",
"log_level",
")",
"# Adding a console logger...",
"csh",
"=",
"ColorStreamHandler",
"(",
"sys",
".",
"stdout",
")",
"csh",
".",
"setFormatter",
"(",
"Formatter",
"(",
"'[%(asctime)s] %(levelname)s: [%(name)s] %(message)s'",
",",
"\"%Y-%m-%d %H:%M:%S\"",
")",
")",
"logger_",
".",
"addHandler",
"(",
"csh",
")"
] |
Set the Alignak daemons logger have a console log handler.
This is only used for the arbiter verify mode to add a console log handler.
:param log_level: log level
:return: n/a
|
[
"Set",
"the",
"Alignak",
"daemons",
"logger",
"have",
"a",
"console",
"log",
"handler",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/log.py#L201-L217
|
train
|
Alignak-monitoring/alignak
|
alignak/log.py
|
set_log_level
|
def set_log_level(log_level=logging.INFO, handlers=None):
"""Set the Alignak logger log level. This is mainly used for the arbiter verify code to
set the log level at INFO level whatever the configured log level is set.
This is also used when changing the daemon log level thanks to the WS interface
If an handlers name list is provided, all the handlers which name is in this list are
concerned else only the `daemons` handler log level is changed.
:param handlers: list of concerned handlers
:type: list
:param log_level: log level
:return: n/a
"""
# print("Setting log level: %s" % (log_level))
# Change the logger and all its handlers log level
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
logger_.setLevel(log_level)
if handlers is not None:
for handler in logger_.handlers:
if getattr(handler, '_name', None) in handlers:
handler.setLevel(log_level)
|
python
|
def set_log_level(log_level=logging.INFO, handlers=None):
"""Set the Alignak logger log level. This is mainly used for the arbiter verify code to
set the log level at INFO level whatever the configured log level is set.
This is also used when changing the daemon log level thanks to the WS interface
If an handlers name list is provided, all the handlers which name is in this list are
concerned else only the `daemons` handler log level is changed.
:param handlers: list of concerned handlers
:type: list
:param log_level: log level
:return: n/a
"""
# print("Setting log level: %s" % (log_level))
# Change the logger and all its handlers log level
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
logger_.setLevel(log_level)
if handlers is not None:
for handler in logger_.handlers:
if getattr(handler, '_name', None) in handlers:
handler.setLevel(log_level)
|
[
"def",
"set_log_level",
"(",
"log_level",
"=",
"logging",
".",
"INFO",
",",
"handlers",
"=",
"None",
")",
":",
"# print(\"Setting log level: %s\" % (log_level))",
"# Change the logger and all its handlers log level",
"logger_",
"=",
"logging",
".",
"getLogger",
"(",
"ALIGNAK_LOGGER_NAME",
")",
"logger_",
".",
"setLevel",
"(",
"log_level",
")",
"if",
"handlers",
"is",
"not",
"None",
":",
"for",
"handler",
"in",
"logger_",
".",
"handlers",
":",
"if",
"getattr",
"(",
"handler",
",",
"'_name'",
",",
"None",
")",
"in",
"handlers",
":",
"handler",
".",
"setLevel",
"(",
"log_level",
")"
] |
Set the Alignak logger log level. This is mainly used for the arbiter verify code to
set the log level at INFO level whatever the configured log level is set.
This is also used when changing the daemon log level thanks to the WS interface
If an handlers name list is provided, all the handlers which name is in this list are
concerned else only the `daemons` handler log level is changed.
:param handlers: list of concerned handlers
:type: list
:param log_level: log level
:return: n/a
|
[
"Set",
"the",
"Alignak",
"logger",
"log",
"level",
".",
"This",
"is",
"mainly",
"used",
"for",
"the",
"arbiter",
"verify",
"code",
"to",
"set",
"the",
"log",
"level",
"at",
"INFO",
"level",
"whatever",
"the",
"configured",
"log",
"level",
"is",
"set",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/log.py#L220-L242
|
train
|
Alignak-monitoring/alignak
|
alignak/log.py
|
make_monitoring_log
|
def make_monitoring_log(level, message, timestamp=None, to_logger=False):
"""
Function used to build the monitoring log.
Emit a log message with the provided level to the monitoring log logger.
Build a Brok typed as monitoring_log with the provided message
When to_logger is True, the information is sent to the python logger, else a monitoring_log
Brok is returned. The Brok is managed by the daemons to build an Event that will br logged
by the Arbiter when it collects all the events.
TODO: replace with dedicated brok for each event to log - really useful?
:param level: log level as defined in logging
:type level: str
:param message: message to send to the monitoring log logger
:type message: str
:param to_logger: when set, send to the logger, else raise a brok
:type to_logger: bool
:param timestamp: if set, force the log event timestamp
:return: a monitoring_log Brok
:rtype: alignak.brok.Brok
"""
level = level.lower()
if level not in ['debug', 'info', 'warning', 'error', 'critical']:
return False
if to_logger:
logging.getLogger(ALIGNAK_LOGGER_NAME).debug("Monitoring log: %s / %s", level, message)
# Emit to our monitoring log logger
message = message.replace('\r', '\\r')
message = message.replace('\n', '\\n')
logger_ = logging.getLogger(MONITORING_LOGGER_NAME)
logging_function = getattr(logger_, level)
try:
message = message.decode('utf8', 'ignore')
except UnicodeEncodeError:
pass
except AttributeError:
# Python 3 raises an exception!
pass
if timestamp:
st = datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
logging_function(message, extra={'my_date': st})
else:
logging_function(message)
return True
# ... and returns a brok
return Brok({'type': 'monitoring_log', 'data': {'level': level, 'message': message}})
|
python
|
def make_monitoring_log(level, message, timestamp=None, to_logger=False):
"""
Function used to build the monitoring log.
Emit a log message with the provided level to the monitoring log logger.
Build a Brok typed as monitoring_log with the provided message
When to_logger is True, the information is sent to the python logger, else a monitoring_log
Brok is returned. The Brok is managed by the daemons to build an Event that will br logged
by the Arbiter when it collects all the events.
TODO: replace with dedicated brok for each event to log - really useful?
:param level: log level as defined in logging
:type level: str
:param message: message to send to the monitoring log logger
:type message: str
:param to_logger: when set, send to the logger, else raise a brok
:type to_logger: bool
:param timestamp: if set, force the log event timestamp
:return: a monitoring_log Brok
:rtype: alignak.brok.Brok
"""
level = level.lower()
if level not in ['debug', 'info', 'warning', 'error', 'critical']:
return False
if to_logger:
logging.getLogger(ALIGNAK_LOGGER_NAME).debug("Monitoring log: %s / %s", level, message)
# Emit to our monitoring log logger
message = message.replace('\r', '\\r')
message = message.replace('\n', '\\n')
logger_ = logging.getLogger(MONITORING_LOGGER_NAME)
logging_function = getattr(logger_, level)
try:
message = message.decode('utf8', 'ignore')
except UnicodeEncodeError:
pass
except AttributeError:
# Python 3 raises an exception!
pass
if timestamp:
st = datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
logging_function(message, extra={'my_date': st})
else:
logging_function(message)
return True
# ... and returns a brok
return Brok({'type': 'monitoring_log', 'data': {'level': level, 'message': message}})
|
[
"def",
"make_monitoring_log",
"(",
"level",
",",
"message",
",",
"timestamp",
"=",
"None",
",",
"to_logger",
"=",
"False",
")",
":",
"level",
"=",
"level",
".",
"lower",
"(",
")",
"if",
"level",
"not",
"in",
"[",
"'debug'",
",",
"'info'",
",",
"'warning'",
",",
"'error'",
",",
"'critical'",
"]",
":",
"return",
"False",
"if",
"to_logger",
":",
"logging",
".",
"getLogger",
"(",
"ALIGNAK_LOGGER_NAME",
")",
".",
"debug",
"(",
"\"Monitoring log: %s / %s\"",
",",
"level",
",",
"message",
")",
"# Emit to our monitoring log logger",
"message",
"=",
"message",
".",
"replace",
"(",
"'\\r'",
",",
"'\\\\r'",
")",
"message",
"=",
"message",
".",
"replace",
"(",
"'\\n'",
",",
"'\\\\n'",
")",
"logger_",
"=",
"logging",
".",
"getLogger",
"(",
"MONITORING_LOGGER_NAME",
")",
"logging_function",
"=",
"getattr",
"(",
"logger_",
",",
"level",
")",
"try",
":",
"message",
"=",
"message",
".",
"decode",
"(",
"'utf8'",
",",
"'ignore'",
")",
"except",
"UnicodeEncodeError",
":",
"pass",
"except",
"AttributeError",
":",
"# Python 3 raises an exception!",
"pass",
"if",
"timestamp",
":",
"st",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"timestamp",
")",
".",
"strftime",
"(",
"'%Y-%m-%d %H:%M:%S'",
")",
"logging_function",
"(",
"message",
",",
"extra",
"=",
"{",
"'my_date'",
":",
"st",
"}",
")",
"else",
":",
"logging_function",
"(",
"message",
")",
"return",
"True",
"# ... and returns a brok",
"return",
"Brok",
"(",
"{",
"'type'",
":",
"'monitoring_log'",
",",
"'data'",
":",
"{",
"'level'",
":",
"level",
",",
"'message'",
":",
"message",
"}",
"}",
")"
] |
Function used to build the monitoring log.
Emit a log message with the provided level to the monitoring log logger.
Build a Brok typed as monitoring_log with the provided message
When to_logger is True, the information is sent to the python logger, else a monitoring_log
Brok is returned. The Brok is managed by the daemons to build an Event that will br logged
by the Arbiter when it collects all the events.
TODO: replace with dedicated brok for each event to log - really useful?
:param level: log level as defined in logging
:type level: str
:param message: message to send to the monitoring log logger
:type message: str
:param to_logger: when set, send to the logger, else raise a brok
:type to_logger: bool
:param timestamp: if set, force the log event timestamp
:return: a monitoring_log Brok
:rtype: alignak.brok.Brok
|
[
"Function",
"used",
"to",
"build",
"the",
"monitoring",
"log",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/log.py#L256-L308
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/contact.py
|
Contact.want_service_notification
|
def want_service_notification(self, notifways, timeperiods,
timestamp, state, n_type, business_impact, cmd=None):
"""Check if notification options match the state of the service
:param timestamp: time we want to notify the contact (usually now)
:type timestamp: int
:param state: host or service state ("WARNING", "CRITICAL" ..)
:type state: str
:param n_type: type of notification ("PROBLEM", "RECOVERY" ..)
:type n_type: str
:param business_impact: impact of this service
:type business_impact: int
:param cmd: command launched to notify the contact
:type cmd: str
:return: True if contact wants notification, otherwise False
:rtype: bool
"""
if not self.service_notifications_enabled:
return False
# If we are in downtime, we do not want notification
for downtime_id in self.downtimes:
downtime = self.downtimes[downtime_id]
if downtime.is_in_effect:
self.in_scheduled_downtime = True
return False
self.in_scheduled_downtime = False
# Now the rest is for sub notificationways. If one is OK, we are ok
# We will filter in another phase
for notifway_id in self.notificationways:
notifway = notifways[notifway_id]
nw_b = notifway.want_service_notification(timeperiods, timestamp,
state, n_type, business_impact, cmd)
if nw_b:
return True
# Oh... no one is ok for it? so no, sorry
return False
|
python
|
def want_service_notification(self, notifways, timeperiods,
timestamp, state, n_type, business_impact, cmd=None):
"""Check if notification options match the state of the service
:param timestamp: time we want to notify the contact (usually now)
:type timestamp: int
:param state: host or service state ("WARNING", "CRITICAL" ..)
:type state: str
:param n_type: type of notification ("PROBLEM", "RECOVERY" ..)
:type n_type: str
:param business_impact: impact of this service
:type business_impact: int
:param cmd: command launched to notify the contact
:type cmd: str
:return: True if contact wants notification, otherwise False
:rtype: bool
"""
if not self.service_notifications_enabled:
return False
# If we are in downtime, we do not want notification
for downtime_id in self.downtimes:
downtime = self.downtimes[downtime_id]
if downtime.is_in_effect:
self.in_scheduled_downtime = True
return False
self.in_scheduled_downtime = False
# Now the rest is for sub notificationways. If one is OK, we are ok
# We will filter in another phase
for notifway_id in self.notificationways:
notifway = notifways[notifway_id]
nw_b = notifway.want_service_notification(timeperiods, timestamp,
state, n_type, business_impact, cmd)
if nw_b:
return True
# Oh... no one is ok for it? so no, sorry
return False
|
[
"def",
"want_service_notification",
"(",
"self",
",",
"notifways",
",",
"timeperiods",
",",
"timestamp",
",",
"state",
",",
"n_type",
",",
"business_impact",
",",
"cmd",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"service_notifications_enabled",
":",
"return",
"False",
"# If we are in downtime, we do not want notification",
"for",
"downtime_id",
"in",
"self",
".",
"downtimes",
":",
"downtime",
"=",
"self",
".",
"downtimes",
"[",
"downtime_id",
"]",
"if",
"downtime",
".",
"is_in_effect",
":",
"self",
".",
"in_scheduled_downtime",
"=",
"True",
"return",
"False",
"self",
".",
"in_scheduled_downtime",
"=",
"False",
"# Now the rest is for sub notificationways. If one is OK, we are ok",
"# We will filter in another phase",
"for",
"notifway_id",
"in",
"self",
".",
"notificationways",
":",
"notifway",
"=",
"notifways",
"[",
"notifway_id",
"]",
"nw_b",
"=",
"notifway",
".",
"want_service_notification",
"(",
"timeperiods",
",",
"timestamp",
",",
"state",
",",
"n_type",
",",
"business_impact",
",",
"cmd",
")",
"if",
"nw_b",
":",
"return",
"True",
"# Oh... no one is ok for it? so no, sorry",
"return",
"False"
] |
Check if notification options match the state of the service
:param timestamp: time we want to notify the contact (usually now)
:type timestamp: int
:param state: host or service state ("WARNING", "CRITICAL" ..)
:type state: str
:param n_type: type of notification ("PROBLEM", "RECOVERY" ..)
:type n_type: str
:param business_impact: impact of this service
:type business_impact: int
:param cmd: command launched to notify the contact
:type cmd: str
:return: True if contact wants notification, otherwise False
:rtype: bool
|
[
"Check",
"if",
"notification",
"options",
"match",
"the",
"state",
"of",
"the",
"service"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/contact.py#L243-L281
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/contact.py
|
Contact.want_host_notification
|
def want_host_notification(self, notifways, timeperiods, timestamp, state, n_type,
business_impact, cmd=None):
"""Check if notification options match the state of the host
:param timestamp: time we want to notify the contact (usually now)
:type timestamp: int
:param state: host or service state ("UP", "DOWN" ..)
:type state: str
:param n_type: type of notification ("PROBLEM", "RECOVERY" ..)
:type n_type: str
:param business_impact: impact of this host
:type business_impact: int
:param cmd: command launch to notify the contact
:type cmd: str
:return: True if contact wants notification, otherwise False
:rtype: bool
"""
if not self.host_notifications_enabled:
return False
# If we are in downtime, we do not want notification
for downtime in self.downtimes:
if downtime.is_in_effect:
self.in_scheduled_downtime = True
return False
self.in_scheduled_downtime = False
# Now it's all for sub notificationways. If one is OK, we are OK
# We will filter in another phase
for notifway_id in self.notificationways:
notifway = notifways[notifway_id]
nw_b = notifway.want_host_notification(timeperiods, timestamp,
state, n_type, business_impact, cmd)
if nw_b:
return True
# Oh, nobody..so NO :)
return False
|
python
|
def want_host_notification(self, notifways, timeperiods, timestamp, state, n_type,
business_impact, cmd=None):
"""Check if notification options match the state of the host
:param timestamp: time we want to notify the contact (usually now)
:type timestamp: int
:param state: host or service state ("UP", "DOWN" ..)
:type state: str
:param n_type: type of notification ("PROBLEM", "RECOVERY" ..)
:type n_type: str
:param business_impact: impact of this host
:type business_impact: int
:param cmd: command launch to notify the contact
:type cmd: str
:return: True if contact wants notification, otherwise False
:rtype: bool
"""
if not self.host_notifications_enabled:
return False
# If we are in downtime, we do not want notification
for downtime in self.downtimes:
if downtime.is_in_effect:
self.in_scheduled_downtime = True
return False
self.in_scheduled_downtime = False
# Now it's all for sub notificationways. If one is OK, we are OK
# We will filter in another phase
for notifway_id in self.notificationways:
notifway = notifways[notifway_id]
nw_b = notifway.want_host_notification(timeperiods, timestamp,
state, n_type, business_impact, cmd)
if nw_b:
return True
# Oh, nobody..so NO :)
return False
|
[
"def",
"want_host_notification",
"(",
"self",
",",
"notifways",
",",
"timeperiods",
",",
"timestamp",
",",
"state",
",",
"n_type",
",",
"business_impact",
",",
"cmd",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"host_notifications_enabled",
":",
"return",
"False",
"# If we are in downtime, we do not want notification",
"for",
"downtime",
"in",
"self",
".",
"downtimes",
":",
"if",
"downtime",
".",
"is_in_effect",
":",
"self",
".",
"in_scheduled_downtime",
"=",
"True",
"return",
"False",
"self",
".",
"in_scheduled_downtime",
"=",
"False",
"# Now it's all for sub notificationways. If one is OK, we are OK",
"# We will filter in another phase",
"for",
"notifway_id",
"in",
"self",
".",
"notificationways",
":",
"notifway",
"=",
"notifways",
"[",
"notifway_id",
"]",
"nw_b",
"=",
"notifway",
".",
"want_host_notification",
"(",
"timeperiods",
",",
"timestamp",
",",
"state",
",",
"n_type",
",",
"business_impact",
",",
"cmd",
")",
"if",
"nw_b",
":",
"return",
"True",
"# Oh, nobody..so NO :)",
"return",
"False"
] |
Check if notification options match the state of the host
:param timestamp: time we want to notify the contact (usually now)
:type timestamp: int
:param state: host or service state ("UP", "DOWN" ..)
:type state: str
:param n_type: type of notification ("PROBLEM", "RECOVERY" ..)
:type n_type: str
:param business_impact: impact of this host
:type business_impact: int
:param cmd: command launch to notify the contact
:type cmd: str
:return: True if contact wants notification, otherwise False
:rtype: bool
|
[
"Check",
"if",
"notification",
"options",
"match",
"the",
"state",
"of",
"the",
"host"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/contact.py#L283-L320
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/contact.py
|
Contacts.explode
|
def explode(self, contactgroups, notificationways):
"""Explode all contact for each contactsgroup
:param contactgroups: contactgroups to explode
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:param notificationways: notificationways to explode
:type notificationways: alignak.objects.notificationway.Notificationways
:return: None
"""
# Contactgroups property need to be fulfill for got the information
self.apply_partial_inheritance('contactgroups')
# _special properties maybe came from a template, so
# import them before grok ourselves
for prop in Contact.special_properties:
if prop == 'contact_name':
continue
self.apply_partial_inheritance(prop)
# Register ourselves into the contactsgroups we are in
for contact in self:
if not (hasattr(contact, 'contact_name') and hasattr(contact, 'contactgroups')):
continue
for contactgroup in contact.contactgroups:
contactgroups.add_member(contact.contact_name, contactgroup.strip())
# Now create a notification way with the simple parameter of the
# contacts
for contact in self:
need_notificationway = False
params = {}
for param in Contact.simple_way_parameters:
if hasattr(contact, param):
need_notificationway = True
params[param] = getattr(contact, param)
elif contact.properties[param].has_default: # put a default text value
# Remove the value and put a default value
setattr(contact, param, contact.properties[param].default)
if need_notificationway:
cname = getattr(contact, 'contact_name', getattr(contact, 'alias', ''))
nw_name = cname + '_inner_nw'
notificationways.new_inner_member(nw_name, params)
if not hasattr(contact, 'notificationways'):
contact.notificationways = [nw_name]
else:
contact.notificationways = list(contact.notificationways)
contact.notificationways.append(nw_name)
|
python
|
def explode(self, contactgroups, notificationways):
"""Explode all contact for each contactsgroup
:param contactgroups: contactgroups to explode
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:param notificationways: notificationways to explode
:type notificationways: alignak.objects.notificationway.Notificationways
:return: None
"""
# Contactgroups property need to be fulfill for got the information
self.apply_partial_inheritance('contactgroups')
# _special properties maybe came from a template, so
# import them before grok ourselves
for prop in Contact.special_properties:
if prop == 'contact_name':
continue
self.apply_partial_inheritance(prop)
# Register ourselves into the contactsgroups we are in
for contact in self:
if not (hasattr(contact, 'contact_name') and hasattr(contact, 'contactgroups')):
continue
for contactgroup in contact.contactgroups:
contactgroups.add_member(contact.contact_name, contactgroup.strip())
# Now create a notification way with the simple parameter of the
# contacts
for contact in self:
need_notificationway = False
params = {}
for param in Contact.simple_way_parameters:
if hasattr(contact, param):
need_notificationway = True
params[param] = getattr(contact, param)
elif contact.properties[param].has_default: # put a default text value
# Remove the value and put a default value
setattr(contact, param, contact.properties[param].default)
if need_notificationway:
cname = getattr(contact, 'contact_name', getattr(contact, 'alias', ''))
nw_name = cname + '_inner_nw'
notificationways.new_inner_member(nw_name, params)
if not hasattr(contact, 'notificationways'):
contact.notificationways = [nw_name]
else:
contact.notificationways = list(contact.notificationways)
contact.notificationways.append(nw_name)
|
[
"def",
"explode",
"(",
"self",
",",
"contactgroups",
",",
"notificationways",
")",
":",
"# Contactgroups property need to be fulfill for got the information",
"self",
".",
"apply_partial_inheritance",
"(",
"'contactgroups'",
")",
"# _special properties maybe came from a template, so",
"# import them before grok ourselves",
"for",
"prop",
"in",
"Contact",
".",
"special_properties",
":",
"if",
"prop",
"==",
"'contact_name'",
":",
"continue",
"self",
".",
"apply_partial_inheritance",
"(",
"prop",
")",
"# Register ourselves into the contactsgroups we are in",
"for",
"contact",
"in",
"self",
":",
"if",
"not",
"(",
"hasattr",
"(",
"contact",
",",
"'contact_name'",
")",
"and",
"hasattr",
"(",
"contact",
",",
"'contactgroups'",
")",
")",
":",
"continue",
"for",
"contactgroup",
"in",
"contact",
".",
"contactgroups",
":",
"contactgroups",
".",
"add_member",
"(",
"contact",
".",
"contact_name",
",",
"contactgroup",
".",
"strip",
"(",
")",
")",
"# Now create a notification way with the simple parameter of the",
"# contacts",
"for",
"contact",
"in",
"self",
":",
"need_notificationway",
"=",
"False",
"params",
"=",
"{",
"}",
"for",
"param",
"in",
"Contact",
".",
"simple_way_parameters",
":",
"if",
"hasattr",
"(",
"contact",
",",
"param",
")",
":",
"need_notificationway",
"=",
"True",
"params",
"[",
"param",
"]",
"=",
"getattr",
"(",
"contact",
",",
"param",
")",
"elif",
"contact",
".",
"properties",
"[",
"param",
"]",
".",
"has_default",
":",
"# put a default text value",
"# Remove the value and put a default value",
"setattr",
"(",
"contact",
",",
"param",
",",
"contact",
".",
"properties",
"[",
"param",
"]",
".",
"default",
")",
"if",
"need_notificationway",
":",
"cname",
"=",
"getattr",
"(",
"contact",
",",
"'contact_name'",
",",
"getattr",
"(",
"contact",
",",
"'alias'",
",",
"''",
")",
")",
"nw_name",
"=",
"cname",
"+",
"'_inner_nw'",
"notificationways",
".",
"new_inner_member",
"(",
"nw_name",
",",
"params",
")",
"if",
"not",
"hasattr",
"(",
"contact",
",",
"'notificationways'",
")",
":",
"contact",
".",
"notificationways",
"=",
"[",
"nw_name",
"]",
"else",
":",
"contact",
".",
"notificationways",
"=",
"list",
"(",
"contact",
".",
"notificationways",
")",
"contact",
".",
"notificationways",
".",
"append",
"(",
"nw_name",
")"
] |
Explode all contact for each contactsgroup
:param contactgroups: contactgroups to explode
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:param notificationways: notificationways to explode
:type notificationways: alignak.objects.notificationway.Notificationways
:return: None
|
[
"Explode",
"all",
"contact",
"for",
"each",
"contactsgroup"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/contact.py#L481-L528
|
train
|
Alignak-monitoring/alignak
|
alignak/modules/inner_retention.py
|
InnerRetention.hook_save_retention
|
def hook_save_retention(self, scheduler):
"""Save retention data to a Json formated file
:param scheduler: scheduler instance of alignak
:type scheduler: object
:return: None
"""
if not self.enabled:
logger.warning("Alignak retention module is not enabled."
"Saving objects state is not possible.")
return None
try:
start_time = time.time()
# Get retention data from the scheduler
data_to_save = scheduler.get_retention_data()
if not data_to_save:
logger.warning("Alignak retention data to save are not containing any information.")
return None
# Move services data to their respective hosts dictionary
# Alignak scheduler do not merge the services into the host dictionary!
for host_name in data_to_save['hosts']:
data_to_save['hosts'][host_name]['services'] = {}
data_to_save['hosts'][host_name]['name'] = host_name
for host_name, service_description in data_to_save['services']:
data_to_save['hosts'][host_name]['services'][service_description] = \
data_to_save['services'][(host_name, service_description)]
try:
if not self.retention_file:
logger.info('Saving retention data to: %s', self.retention_dir)
for host_name in data_to_save['hosts']:
file_name = os.path.join(self.retention_dir,
self.retention_file,
"%s.json" % host_name)
with open(file_name, "w") as fd:
fd.write(json.dumps(data_to_save['hosts'][host_name],
indent=2, separators=(',', ': '),
sort_keys=True))
logger.debug('- saved: %s', file_name)
logger.info('Saved')
else:
logger.info('Saving retention data to: %s', self.retention_file)
with open(self.retention_file, "w") as fd:
fd.write(json.dumps(data_to_save['hosts'],
indent=2, separators=(',', ': '), sort_keys=True))
logger.info('Saved')
except Exception as exp: # pylint: disable=broad-except
# pragma: no cover, should never happen...
logger.warning("Error when saving retention data to %s", self.retention_file)
logger.exception(exp)
logger.info('%d hosts saved in retention', len(data_to_save['hosts']))
self.statsmgr.counter('retention-save.hosts', len(data_to_save['hosts']))
logger.info('%d services saved in retention', len(data_to_save['services']))
self.statsmgr.counter('retention-save.services', len(data_to_save['services']))
self.statsmgr.timer('retention-save.time', time.time() - start_time)
logger.info("Retention data saved in %s seconds", (time.time() - start_time))
except Exception as exp: # pylint: disable=broad-except
self.enabled = False
logger.warning("Retention save failed: %s", exp)
logger.exception(exp)
return False
return True
|
python
|
def hook_save_retention(self, scheduler):
"""Save retention data to a Json formated file
:param scheduler: scheduler instance of alignak
:type scheduler: object
:return: None
"""
if not self.enabled:
logger.warning("Alignak retention module is not enabled."
"Saving objects state is not possible.")
return None
try:
start_time = time.time()
# Get retention data from the scheduler
data_to_save = scheduler.get_retention_data()
if not data_to_save:
logger.warning("Alignak retention data to save are not containing any information.")
return None
# Move services data to their respective hosts dictionary
# Alignak scheduler do not merge the services into the host dictionary!
for host_name in data_to_save['hosts']:
data_to_save['hosts'][host_name]['services'] = {}
data_to_save['hosts'][host_name]['name'] = host_name
for host_name, service_description in data_to_save['services']:
data_to_save['hosts'][host_name]['services'][service_description] = \
data_to_save['services'][(host_name, service_description)]
try:
if not self.retention_file:
logger.info('Saving retention data to: %s', self.retention_dir)
for host_name in data_to_save['hosts']:
file_name = os.path.join(self.retention_dir,
self.retention_file,
"%s.json" % host_name)
with open(file_name, "w") as fd:
fd.write(json.dumps(data_to_save['hosts'][host_name],
indent=2, separators=(',', ': '),
sort_keys=True))
logger.debug('- saved: %s', file_name)
logger.info('Saved')
else:
logger.info('Saving retention data to: %s', self.retention_file)
with open(self.retention_file, "w") as fd:
fd.write(json.dumps(data_to_save['hosts'],
indent=2, separators=(',', ': '), sort_keys=True))
logger.info('Saved')
except Exception as exp: # pylint: disable=broad-except
# pragma: no cover, should never happen...
logger.warning("Error when saving retention data to %s", self.retention_file)
logger.exception(exp)
logger.info('%d hosts saved in retention', len(data_to_save['hosts']))
self.statsmgr.counter('retention-save.hosts', len(data_to_save['hosts']))
logger.info('%d services saved in retention', len(data_to_save['services']))
self.statsmgr.counter('retention-save.services', len(data_to_save['services']))
self.statsmgr.timer('retention-save.time', time.time() - start_time)
logger.info("Retention data saved in %s seconds", (time.time() - start_time))
except Exception as exp: # pylint: disable=broad-except
self.enabled = False
logger.warning("Retention save failed: %s", exp)
logger.exception(exp)
return False
return True
|
[
"def",
"hook_save_retention",
"(",
"self",
",",
"scheduler",
")",
":",
"if",
"not",
"self",
".",
"enabled",
":",
"logger",
".",
"warning",
"(",
"\"Alignak retention module is not enabled.\"",
"\"Saving objects state is not possible.\"",
")",
"return",
"None",
"try",
":",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"# Get retention data from the scheduler",
"data_to_save",
"=",
"scheduler",
".",
"get_retention_data",
"(",
")",
"if",
"not",
"data_to_save",
":",
"logger",
".",
"warning",
"(",
"\"Alignak retention data to save are not containing any information.\"",
")",
"return",
"None",
"# Move services data to their respective hosts dictionary",
"# Alignak scheduler do not merge the services into the host dictionary!",
"for",
"host_name",
"in",
"data_to_save",
"[",
"'hosts'",
"]",
":",
"data_to_save",
"[",
"'hosts'",
"]",
"[",
"host_name",
"]",
"[",
"'services'",
"]",
"=",
"{",
"}",
"data_to_save",
"[",
"'hosts'",
"]",
"[",
"host_name",
"]",
"[",
"'name'",
"]",
"=",
"host_name",
"for",
"host_name",
",",
"service_description",
"in",
"data_to_save",
"[",
"'services'",
"]",
":",
"data_to_save",
"[",
"'hosts'",
"]",
"[",
"host_name",
"]",
"[",
"'services'",
"]",
"[",
"service_description",
"]",
"=",
"data_to_save",
"[",
"'services'",
"]",
"[",
"(",
"host_name",
",",
"service_description",
")",
"]",
"try",
":",
"if",
"not",
"self",
".",
"retention_file",
":",
"logger",
".",
"info",
"(",
"'Saving retention data to: %s'",
",",
"self",
".",
"retention_dir",
")",
"for",
"host_name",
"in",
"data_to_save",
"[",
"'hosts'",
"]",
":",
"file_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"retention_dir",
",",
"self",
".",
"retention_file",
",",
"\"%s.json\"",
"%",
"host_name",
")",
"with",
"open",
"(",
"file_name",
",",
"\"w\"",
")",
"as",
"fd",
":",
"fd",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"data_to_save",
"[",
"'hosts'",
"]",
"[",
"host_name",
"]",
",",
"indent",
"=",
"2",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
",",
"sort_keys",
"=",
"True",
")",
")",
"logger",
".",
"debug",
"(",
"'- saved: %s'",
",",
"file_name",
")",
"logger",
".",
"info",
"(",
"'Saved'",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'Saving retention data to: %s'",
",",
"self",
".",
"retention_file",
")",
"with",
"open",
"(",
"self",
".",
"retention_file",
",",
"\"w\"",
")",
"as",
"fd",
":",
"fd",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"data_to_save",
"[",
"'hosts'",
"]",
",",
"indent",
"=",
"2",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
",",
"sort_keys",
"=",
"True",
")",
")",
"logger",
".",
"info",
"(",
"'Saved'",
")",
"except",
"Exception",
"as",
"exp",
":",
"# pylint: disable=broad-except",
"# pragma: no cover, should never happen...",
"logger",
".",
"warning",
"(",
"\"Error when saving retention data to %s\"",
",",
"self",
".",
"retention_file",
")",
"logger",
".",
"exception",
"(",
"exp",
")",
"logger",
".",
"info",
"(",
"'%d hosts saved in retention'",
",",
"len",
"(",
"data_to_save",
"[",
"'hosts'",
"]",
")",
")",
"self",
".",
"statsmgr",
".",
"counter",
"(",
"'retention-save.hosts'",
",",
"len",
"(",
"data_to_save",
"[",
"'hosts'",
"]",
")",
")",
"logger",
".",
"info",
"(",
"'%d services saved in retention'",
",",
"len",
"(",
"data_to_save",
"[",
"'services'",
"]",
")",
")",
"self",
".",
"statsmgr",
".",
"counter",
"(",
"'retention-save.services'",
",",
"len",
"(",
"data_to_save",
"[",
"'services'",
"]",
")",
")",
"self",
".",
"statsmgr",
".",
"timer",
"(",
"'retention-save.time'",
",",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
"logger",
".",
"info",
"(",
"\"Retention data saved in %s seconds\"",
",",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
")",
"except",
"Exception",
"as",
"exp",
":",
"# pylint: disable=broad-except",
"self",
".",
"enabled",
"=",
"False",
"logger",
".",
"warning",
"(",
"\"Retention save failed: %s\"",
",",
"exp",
")",
"logger",
".",
"exception",
"(",
"exp",
")",
"return",
"False",
"return",
"True"
] |
Save retention data to a Json formated file
:param scheduler: scheduler instance of alignak
:type scheduler: object
:return: None
|
[
"Save",
"retention",
"data",
"to",
"a",
"Json",
"formated",
"file"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/modules/inner_retention.py#L259-L326
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/checkmodulation.py
|
CheckModulation.get_check_command
|
def get_check_command(self, timeperiods, t_to_go):
"""Get the check_command if we are in the check period modulation
:param t_to_go: time to check if we are in the timeperiod
:type t_to_go:
:return: A check command if we are in the check period, None otherwise
:rtype: alignak.objects.command.Command
"""
if not self.check_period or timeperiods[self.check_period].is_time_valid(t_to_go):
return self.check_command
return None
|
python
|
def get_check_command(self, timeperiods, t_to_go):
"""Get the check_command if we are in the check period modulation
:param t_to_go: time to check if we are in the timeperiod
:type t_to_go:
:return: A check command if we are in the check period, None otherwise
:rtype: alignak.objects.command.Command
"""
if not self.check_period or timeperiods[self.check_period].is_time_valid(t_to_go):
return self.check_command
return None
|
[
"def",
"get_check_command",
"(",
"self",
",",
"timeperiods",
",",
"t_to_go",
")",
":",
"if",
"not",
"self",
".",
"check_period",
"or",
"timeperiods",
"[",
"self",
".",
"check_period",
"]",
".",
"is_time_valid",
"(",
"t_to_go",
")",
":",
"return",
"self",
".",
"check_command",
"return",
"None"
] |
Get the check_command if we are in the check period modulation
:param t_to_go: time to check if we are in the timeperiod
:type t_to_go:
:return: A check command if we are in the check period, None otherwise
:rtype: alignak.objects.command.Command
|
[
"Get",
"the",
"check_command",
"if",
"we",
"are",
"in",
"the",
"check",
"period",
"modulation"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/checkmodulation.py#L113-L123
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/checkmodulation.py
|
CheckModulations.linkify
|
def linkify(self, timeperiods, commands):
"""Replace check_period by real Timeperiod object into each CheckModulation
Replace check_command by real Command object into each CheckModulation
:param timeperiods: timeperiods to link to
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param commands: commands to link to
:type commands: alignak.objects.command.Commands
:return: None
"""
self.linkify_with_timeperiods(timeperiods, 'check_period')
self.linkify_one_command_with_commands(commands, 'check_command')
|
python
|
def linkify(self, timeperiods, commands):
"""Replace check_period by real Timeperiod object into each CheckModulation
Replace check_command by real Command object into each CheckModulation
:param timeperiods: timeperiods to link to
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param commands: commands to link to
:type commands: alignak.objects.command.Commands
:return: None
"""
self.linkify_with_timeperiods(timeperiods, 'check_period')
self.linkify_one_command_with_commands(commands, 'check_command')
|
[
"def",
"linkify",
"(",
"self",
",",
"timeperiods",
",",
"commands",
")",
":",
"self",
".",
"linkify_with_timeperiods",
"(",
"timeperiods",
",",
"'check_period'",
")",
"self",
".",
"linkify_one_command_with_commands",
"(",
"commands",
",",
"'check_command'",
")"
] |
Replace check_period by real Timeperiod object into each CheckModulation
Replace check_command by real Command object into each CheckModulation
:param timeperiods: timeperiods to link to
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param commands: commands to link to
:type commands: alignak.objects.command.Commands
:return: None
|
[
"Replace",
"check_period",
"by",
"real",
"Timeperiod",
"object",
"into",
"each",
"CheckModulation",
"Replace",
"check_command",
"by",
"real",
"Command",
"object",
"into",
"each",
"CheckModulation"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/checkmodulation.py#L167-L178
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/checkmodulation.py
|
CheckModulations.new_inner_member
|
def new_inner_member(self, name=None, params=None):
"""Create a CheckModulation object and add it to items
:param name: CheckModulation name
:type name: str
:param params: parameters to init CheckModulation
:type params: dict
:return: None
TODO: Remove this default mutable argument. Usually result in unexpected behavior
"""
if name is None:
name = 'Generated_checkmodulation_%s' % uuid.uuid4()
if params is None:
params = {}
params['checkmodulation_name'] = name
checkmodulation = CheckModulation(params)
self.add_item(checkmodulation)
|
python
|
def new_inner_member(self, name=None, params=None):
"""Create a CheckModulation object and add it to items
:param name: CheckModulation name
:type name: str
:param params: parameters to init CheckModulation
:type params: dict
:return: None
TODO: Remove this default mutable argument. Usually result in unexpected behavior
"""
if name is None:
name = 'Generated_checkmodulation_%s' % uuid.uuid4()
if params is None:
params = {}
params['checkmodulation_name'] = name
checkmodulation = CheckModulation(params)
self.add_item(checkmodulation)
|
[
"def",
"new_inner_member",
"(",
"self",
",",
"name",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"'Generated_checkmodulation_%s'",
"%",
"uuid",
".",
"uuid4",
"(",
")",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"{",
"}",
"params",
"[",
"'checkmodulation_name'",
"]",
"=",
"name",
"checkmodulation",
"=",
"CheckModulation",
"(",
"params",
")",
"self",
".",
"add_item",
"(",
"checkmodulation",
")"
] |
Create a CheckModulation object and add it to items
:param name: CheckModulation name
:type name: str
:param params: parameters to init CheckModulation
:type params: dict
:return: None
TODO: Remove this default mutable argument. Usually result in unexpected behavior
|
[
"Create",
"a",
"CheckModulation",
"object",
"and",
"add",
"it",
"to",
"items"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/checkmodulation.py#L180-L198
|
train
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/arduino.py
|
ArduinoBoard.open
|
def open(self):
"""
Open the serial connection.
"""
if not self._is_connected:
print("Connecting to arduino on {}... ".format(self.device),end="")
self.comm = serial.Serial()
self.comm.port = self.device
self.comm.baudrate = self.baud_rate
self.comm.timeout = self.timeout
self.dtr = self.enable_dtr
self.comm.open()
time.sleep(self.settle_time)
self._is_connected = True
print("done.")
|
python
|
def open(self):
"""
Open the serial connection.
"""
if not self._is_connected:
print("Connecting to arduino on {}... ".format(self.device),end="")
self.comm = serial.Serial()
self.comm.port = self.device
self.comm.baudrate = self.baud_rate
self.comm.timeout = self.timeout
self.dtr = self.enable_dtr
self.comm.open()
time.sleep(self.settle_time)
self._is_connected = True
print("done.")
|
[
"def",
"open",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_is_connected",
":",
"print",
"(",
"\"Connecting to arduino on {}... \"",
".",
"format",
"(",
"self",
".",
"device",
")",
",",
"end",
"=",
"\"\"",
")",
"self",
".",
"comm",
"=",
"serial",
".",
"Serial",
"(",
")",
"self",
".",
"comm",
".",
"port",
"=",
"self",
".",
"device",
"self",
".",
"comm",
".",
"baudrate",
"=",
"self",
".",
"baud_rate",
"self",
".",
"comm",
".",
"timeout",
"=",
"self",
".",
"timeout",
"self",
".",
"dtr",
"=",
"self",
".",
"enable_dtr",
"self",
".",
"comm",
".",
"open",
"(",
")",
"time",
".",
"sleep",
"(",
"self",
".",
"settle_time",
")",
"self",
".",
"_is_connected",
"=",
"True",
"print",
"(",
"\"done.\"",
")"
] |
Open the serial connection.
|
[
"Open",
"the",
"serial",
"connection",
"."
] |
215d6f9402262662a14a2996f532934339639a5b
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/arduino.py#L147-L166
|
train
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/arduino.py
|
ArduinoBoard.close
|
def close(self):
"""
Close serial connection.
"""
if self._is_connected:
self.comm.close()
self._is_connected = False
|
python
|
def close(self):
"""
Close serial connection.
"""
if self._is_connected:
self.comm.close()
self._is_connected = False
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_is_connected",
":",
"self",
".",
"comm",
".",
"close",
"(",
")",
"self",
".",
"_is_connected",
"=",
"False"
] |
Close serial connection.
|
[
"Close",
"serial",
"connection",
"."
] |
215d6f9402262662a14a2996f532934339639a5b
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/arduino.py#L189-L196
|
train
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger.receive
|
def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
|
python
|
def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
|
[
"def",
"receive",
"(",
"self",
",",
"arg_formats",
"=",
"None",
")",
":",
"# Read serial input until a command separator or empty character is",
"# reached ",
"msg",
"=",
"[",
"[",
"]",
"]",
"raw_msg",
"=",
"[",
"]",
"escaped",
"=",
"False",
"command_sep_found",
"=",
"False",
"while",
"True",
":",
"tmp",
"=",
"self",
".",
"board",
".",
"read",
"(",
")",
"raw_msg",
".",
"append",
"(",
"tmp",
")",
"if",
"escaped",
":",
"# Either drop the escape character or, if this wasn't really",
"# an escape, keep previous escape character and new character",
"if",
"tmp",
"in",
"self",
".",
"_escaped_characters",
":",
"msg",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"tmp",
")",
"escaped",
"=",
"False",
"else",
":",
"msg",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"self",
".",
"_byte_escape_sep",
")",
"msg",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"tmp",
")",
"escaped",
"=",
"False",
"else",
":",
"# look for escape character",
"if",
"tmp",
"==",
"self",
".",
"_byte_escape_sep",
":",
"escaped",
"=",
"True",
"# or field separator",
"elif",
"tmp",
"==",
"self",
".",
"_byte_field_sep",
":",
"msg",
".",
"append",
"(",
"[",
"]",
")",
"# or command separator",
"elif",
"tmp",
"==",
"self",
".",
"_byte_command_sep",
":",
"command_sep_found",
"=",
"True",
"break",
"# or any empty characater ",
"elif",
"tmp",
"==",
"b''",
":",
"break",
"# okay, must be something",
"else",
":",
"msg",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"tmp",
")",
"# No message received given timeouts",
"if",
"len",
"(",
"msg",
")",
"==",
"1",
"and",
"len",
"(",
"msg",
"[",
"0",
"]",
")",
"==",
"0",
":",
"return",
"None",
"# Make sure the message terminated properly",
"if",
"not",
"command_sep_found",
":",
"# empty message (likely from line endings being included) ",
"joined_raw",
"=",
"b''",
".",
"join",
"(",
"raw_msg",
")",
"if",
"joined_raw",
".",
"strip",
"(",
")",
"==",
"b''",
":",
"return",
"None",
"err",
"=",
"\"Incomplete message ({})\"",
".",
"format",
"(",
"joined_raw",
".",
"decode",
"(",
")",
")",
"raise",
"EOFError",
"(",
"err",
")",
"# Turn message into fields",
"fields",
"=",
"[",
"b''",
".",
"join",
"(",
"m",
")",
"for",
"m",
"in",
"msg",
"]",
"# Get the command name.",
"cmd",
"=",
"fields",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"decode",
"(",
")",
"try",
":",
"cmd_name",
"=",
"self",
".",
"_int_to_cmd_name",
"[",
"int",
"(",
"cmd",
")",
"]",
"except",
"(",
"ValueError",
",",
"IndexError",
")",
":",
"if",
"self",
".",
"give_warnings",
":",
"cmd_name",
"=",
"\"unknown\"",
"w",
"=",
"\"Recieved unrecognized command ({}).\"",
".",
"format",
"(",
"cmd",
")",
"warnings",
".",
"warn",
"(",
"w",
",",
"Warning",
")",
"# Figure out what formats to use for each argument. ",
"arg_format_list",
"=",
"[",
"]",
"if",
"arg_formats",
"!=",
"None",
":",
"# The user specified formats",
"arg_format_list",
"=",
"list",
"(",
"arg_formats",
")",
"else",
":",
"try",
":",
"# See if class was initialized with a format for arguments to this",
"# command",
"arg_format_list",
"=",
"self",
".",
"_cmd_name_to_format",
"[",
"cmd_name",
"]",
"except",
"KeyError",
":",
"# if not, guess for all arguments",
"arg_format_list",
"=",
"[",
"\"g\"",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"fields",
"[",
"1",
":",
"]",
")",
")",
"]",
"# Deal with \"*\" format ",
"arg_format_list",
"=",
"self",
".",
"_treat_star_format",
"(",
"arg_format_list",
",",
"fields",
"[",
"1",
":",
"]",
")",
"if",
"len",
"(",
"fields",
"[",
"1",
":",
"]",
")",
">",
"0",
":",
"if",
"len",
"(",
"arg_format_list",
")",
"!=",
"len",
"(",
"fields",
"[",
"1",
":",
"]",
")",
":",
"err",
"=",
"\"Number of argument formats must match the number of recieved arguments.\"",
"raise",
"ValueError",
"(",
"err",
")",
"received",
"=",
"[",
"]",
"for",
"i",
",",
"f",
"in",
"enumerate",
"(",
"fields",
"[",
"1",
":",
"]",
")",
":",
"received",
".",
"append",
"(",
"self",
".",
"_recv_methods",
"[",
"arg_format_list",
"[",
"i",
"]",
"]",
"(",
"f",
")",
")",
"# Record the time the message arrived",
"message_time",
"=",
"time",
".",
"time",
"(",
")",
"return",
"cmd_name",
",",
"received",
",",
"message_time"
] |
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
|
[
"Recieve",
"commands",
"coming",
"off",
"the",
"serial",
"port",
"."
] |
215d6f9402262662a14a2996f532934339639a5b
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L175-L289
|
train
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._send_char
|
def _send_char(self,value):
"""
Convert a single char to a bytes object.
"""
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value)
|
python
|
def _send_char(self,value):
"""
Convert a single char to a bytes object.
"""
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value)
|
[
"def",
"_send_char",
"(",
"self",
",",
"value",
")",
":",
"if",
"type",
"(",
"value",
")",
"!=",
"str",
"and",
"type",
"(",
"value",
")",
"!=",
"bytes",
":",
"err",
"=",
"\"char requires a string or bytes array of length 1\"",
"raise",
"ValueError",
"(",
"err",
")",
"if",
"len",
"(",
"value",
")",
"!=",
"1",
":",
"err",
"=",
"\"char must be a single character, not \\\"{}\\\"\"",
".",
"format",
"(",
"value",
")",
"raise",
"ValueError",
"(",
"err",
")",
"if",
"type",
"(",
"value",
")",
"!=",
"bytes",
":",
"value",
"=",
"value",
".",
"encode",
"(",
"\"ascii\"",
")",
"if",
"value",
"in",
"self",
".",
"_escaped_characters",
":",
"err",
"=",
"\"Cannot send a control character as a single char to arduino. Send as string instead.\"",
"raise",
"OverflowError",
"(",
"err",
")",
"return",
"struct",
".",
"pack",
"(",
"'c'",
",",
"value",
")"
] |
Convert a single char to a bytes object.
|
[
"Convert",
"a",
"single",
"char",
"to",
"a",
"bytes",
"object",
"."
] |
215d6f9402262662a14a2996f532934339639a5b
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L319-L339
|
train
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._send_byte
|
def _send_byte(self,value):
"""
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value)
|
python
|
def _send_byte(self,value):
"""
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > 255 or value < 0:
err = "Value {} exceeds the size of the board's byte.".format(value)
raise OverflowError(err)
return struct.pack("B",value)
|
[
"def",
"_send_byte",
"(",
"self",
",",
"value",
")",
":",
"# Coerce to int. This will throw a ValueError if the value can't",
"# actually be converted.",
"if",
"type",
"(",
"value",
")",
"!=",
"int",
":",
"new_value",
"=",
"int",
"(",
"value",
")",
"if",
"self",
".",
"give_warnings",
":",
"w",
"=",
"\"Coercing {} into int ({})\"",
".",
"format",
"(",
"value",
",",
"new_value",
")",
"warnings",
".",
"warn",
"(",
"w",
",",
"Warning",
")",
"value",
"=",
"new_value",
"# Range check",
"if",
"value",
">",
"255",
"or",
"value",
"<",
"0",
":",
"err",
"=",
"\"Value {} exceeds the size of the board's byte.\"",
".",
"format",
"(",
"value",
")",
"raise",
"OverflowError",
"(",
"err",
")",
"return",
"struct",
".",
"pack",
"(",
"\"B\"",
",",
"value",
")"
] |
Convert a numerical value into an integer, then to a byte object. Check
bounds for byte.
|
[
"Convert",
"a",
"numerical",
"value",
"into",
"an",
"integer",
"then",
"to",
"a",
"byte",
"object",
".",
"Check",
"bounds",
"for",
"byte",
"."
] |
215d6f9402262662a14a2996f532934339639a5b
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L341-L362
|
train
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._send_int
|
def _send_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.int_max or value < self.board.int_min:
err = "Value {} exceeds the size of the board's int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.int_type,value)
|
python
|
def _send_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.int_max or value < self.board.int_min:
err = "Value {} exceeds the size of the board's int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.int_type,value)
|
[
"def",
"_send_int",
"(",
"self",
",",
"value",
")",
":",
"# Coerce to int. This will throw a ValueError if the value can't ",
"# actually be converted.",
"if",
"type",
"(",
"value",
")",
"!=",
"int",
":",
"new_value",
"=",
"int",
"(",
"value",
")",
"if",
"self",
".",
"give_warnings",
":",
"w",
"=",
"\"Coercing {} into int ({})\"",
".",
"format",
"(",
"value",
",",
"new_value",
")",
"warnings",
".",
"warn",
"(",
"w",
",",
"Warning",
")",
"value",
"=",
"new_value",
"# Range check",
"if",
"value",
">",
"self",
".",
"board",
".",
"int_max",
"or",
"value",
"<",
"self",
".",
"board",
".",
"int_min",
":",
"err",
"=",
"\"Value {} exceeds the size of the board's int.\"",
".",
"format",
"(",
"value",
")",
"raise",
"OverflowError",
"(",
"err",
")",
"return",
"struct",
".",
"pack",
"(",
"self",
".",
"board",
".",
"int_type",
",",
"value",
")"
] |
Convert a numerical value into an integer, then to a bytes object Check
bounds for signed int.
|
[
"Convert",
"a",
"numerical",
"value",
"into",
"an",
"integer",
"then",
"to",
"a",
"bytes",
"object",
"Check",
"bounds",
"for",
"signed",
"int",
"."
] |
215d6f9402262662a14a2996f532934339639a5b
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L364-L385
|
train
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._send_unsigned_int
|
def _send_unsigned_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_int_max or value < self.board.unsigned_int_min:
err = "Value {} exceeds the size of the board's unsigned int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_int_type,value)
|
python
|
def _send_unsigned_int(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_int_max or value < self.board.unsigned_int_min:
err = "Value {} exceeds the size of the board's unsigned int.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_int_type,value)
|
[
"def",
"_send_unsigned_int",
"(",
"self",
",",
"value",
")",
":",
"# Coerce to int. This will throw a ValueError if the value can't ",
"# actually be converted.",
"if",
"type",
"(",
"value",
")",
"!=",
"int",
":",
"new_value",
"=",
"int",
"(",
"value",
")",
"if",
"self",
".",
"give_warnings",
":",
"w",
"=",
"\"Coercing {} into int ({})\"",
".",
"format",
"(",
"value",
",",
"new_value",
")",
"warnings",
".",
"warn",
"(",
"w",
",",
"Warning",
")",
"value",
"=",
"new_value",
"# Range check",
"if",
"value",
">",
"self",
".",
"board",
".",
"unsigned_int_max",
"or",
"value",
"<",
"self",
".",
"board",
".",
"unsigned_int_min",
":",
"err",
"=",
"\"Value {} exceeds the size of the board's unsigned int.\"",
".",
"format",
"(",
"value",
")",
"raise",
"OverflowError",
"(",
"err",
")",
"return",
"struct",
".",
"pack",
"(",
"self",
".",
"board",
".",
"unsigned_int_type",
",",
"value",
")"
] |
Convert a numerical value into an integer, then to a bytes object. Check
bounds for unsigned int.
|
[
"Convert",
"a",
"numerical",
"value",
"into",
"an",
"integer",
"then",
"to",
"a",
"bytes",
"object",
".",
"Check",
"bounds",
"for",
"unsigned",
"int",
"."
] |
215d6f9402262662a14a2996f532934339639a5b
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L387-L407
|
train
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._send_long
|
def _send_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.long_max or value < self.board.long_min:
err = "Value {} exceeds the size of the board's long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.long_type,value)
|
python
|
def _send_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.long_max or value < self.board.long_min:
err = "Value {} exceeds the size of the board's long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.long_type,value)
|
[
"def",
"_send_long",
"(",
"self",
",",
"value",
")",
":",
"# Coerce to int. This will throw a ValueError if the value can't ",
"# actually be converted.",
"if",
"type",
"(",
"value",
")",
"!=",
"int",
":",
"new_value",
"=",
"int",
"(",
"value",
")",
"if",
"self",
".",
"give_warnings",
":",
"w",
"=",
"\"Coercing {} into int ({})\"",
".",
"format",
"(",
"value",
",",
"new_value",
")",
"warnings",
".",
"warn",
"(",
"w",
",",
"Warning",
")",
"value",
"=",
"new_value",
"# Range check",
"if",
"value",
">",
"self",
".",
"board",
".",
"long_max",
"or",
"value",
"<",
"self",
".",
"board",
".",
"long_min",
":",
"err",
"=",
"\"Value {} exceeds the size of the board's long.\"",
".",
"format",
"(",
"value",
")",
"raise",
"OverflowError",
"(",
"err",
")",
"return",
"struct",
".",
"pack",
"(",
"self",
".",
"board",
".",
"long_type",
",",
"value",
")"
] |
Convert a numerical value into an integer, then to a bytes object. Check
bounds for signed long.
|
[
"Convert",
"a",
"numerical",
"value",
"into",
"an",
"integer",
"then",
"to",
"a",
"bytes",
"object",
".",
"Check",
"bounds",
"for",
"signed",
"long",
"."
] |
215d6f9402262662a14a2996f532934339639a5b
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L409-L430
|
train
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._send_unsigned_long
|
def _send_unsigned_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
|
python
|
def _send_unsigned_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
|
[
"def",
"_send_unsigned_long",
"(",
"self",
",",
"value",
")",
":",
"# Coerce to int. This will throw a ValueError if the value can't ",
"# actually be converted.",
"if",
"type",
"(",
"value",
")",
"!=",
"int",
":",
"new_value",
"=",
"int",
"(",
"value",
")",
"if",
"self",
".",
"give_warnings",
":",
"w",
"=",
"\"Coercing {} into int ({})\"",
".",
"format",
"(",
"value",
",",
"new_value",
")",
"warnings",
".",
"warn",
"(",
"w",
",",
"Warning",
")",
"value",
"=",
"new_value",
"# Range check",
"if",
"value",
">",
"self",
".",
"board",
".",
"unsigned_long_max",
"or",
"value",
"<",
"self",
".",
"board",
".",
"unsigned_long_min",
":",
"err",
"=",
"\"Value {} exceeds the size of the board's unsigned long.\"",
".",
"format",
"(",
"value",
")",
"raise",
"OverflowError",
"(",
"err",
")",
"return",
"struct",
".",
"pack",
"(",
"self",
".",
"board",
".",
"unsigned_long_type",
",",
"value",
")"
] |
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
|
[
"Convert",
"a",
"numerical",
"value",
"into",
"an",
"integer",
"then",
"to",
"a",
"bytes",
"object",
".",
"Check",
"bounds",
"for",
"unsigned",
"long",
"."
] |
215d6f9402262662a14a2996f532934339639a5b
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L432-L453
|
train
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._send_string
|
def _send_string(self,value):
"""
Convert a string to a bytes object. If value is not a string, it is
be converted to one with a standard string.format call.
"""
if type(value) != bytes:
value = "{}".format(value).encode("ascii")
return value
|
python
|
def _send_string(self,value):
"""
Convert a string to a bytes object. If value is not a string, it is
be converted to one with a standard string.format call.
"""
if type(value) != bytes:
value = "{}".format(value).encode("ascii")
return value
|
[
"def",
"_send_string",
"(",
"self",
",",
"value",
")",
":",
"if",
"type",
"(",
"value",
")",
"!=",
"bytes",
":",
"value",
"=",
"\"{}\"",
".",
"format",
"(",
"value",
")",
".",
"encode",
"(",
"\"ascii\"",
")",
"return",
"value"
] |
Convert a string to a bytes object. If value is not a string, it is
be converted to one with a standard string.format call.
|
[
"Convert",
"a",
"string",
"to",
"a",
"bytes",
"object",
".",
"If",
"value",
"is",
"not",
"a",
"string",
"it",
"is",
"be",
"converted",
"to",
"one",
"with",
"a",
"standard",
"string",
".",
"format",
"call",
"."
] |
215d6f9402262662a14a2996f532934339639a5b
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L489-L498
|
train
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._send_bool
|
def _send_bool(self,value):
"""
Convert a boolean value into a bytes object. Uses 0 and 1 as output.
"""
# Sanity check.
if type(value) != bool and value not in [0,1]:
err = "{} is not boolean.".format(value)
raise ValueError(err)
return struct.pack("?",value)
|
python
|
def _send_bool(self,value):
"""
Convert a boolean value into a bytes object. Uses 0 and 1 as output.
"""
# Sanity check.
if type(value) != bool and value not in [0,1]:
err = "{} is not boolean.".format(value)
raise ValueError(err)
return struct.pack("?",value)
|
[
"def",
"_send_bool",
"(",
"self",
",",
"value",
")",
":",
"# Sanity check.",
"if",
"type",
"(",
"value",
")",
"!=",
"bool",
"and",
"value",
"not",
"in",
"[",
"0",
",",
"1",
"]",
":",
"err",
"=",
"\"{} is not boolean.\"",
".",
"format",
"(",
"value",
")",
"raise",
"ValueError",
"(",
"err",
")",
"return",
"struct",
".",
"pack",
"(",
"\"?\"",
",",
"value",
")"
] |
Convert a boolean value into a bytes object. Uses 0 and 1 as output.
|
[
"Convert",
"a",
"boolean",
"value",
"into",
"a",
"bytes",
"object",
".",
"Uses",
"0",
"and",
"1",
"as",
"output",
"."
] |
215d6f9402262662a14a2996f532934339639a5b
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L500-L510
|
train
|
harmsm/PyCmdMessenger
|
PyCmdMessenger/PyCmdMessenger.py
|
CmdMessenger._recv_guess
|
def _recv_guess(self,value):
"""
Take the binary spew and try to make it into a float or integer. If
that can't be done, return a string.
Note: this is generally a bad idea, as values can be seriously mangled
by going from float -> string -> float. You'll generally be better off
using a format specifier and binary argument passing.
"""
if self.give_warnings:
w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
tmp_value = value.decode()
try:
float(tmp_value)
if len(tmp_value.split(".")) == 1:
# integer
return int(tmp_value)
else:
# float
return float(tmp_value)
except ValueError:
pass
# Return as string
return self._recv_string(value)
|
python
|
def _recv_guess(self,value):
"""
Take the binary spew and try to make it into a float or integer. If
that can't be done, return a string.
Note: this is generally a bad idea, as values can be seriously mangled
by going from float -> string -> float. You'll generally be better off
using a format specifier and binary argument passing.
"""
if self.give_warnings:
w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data.".format(value)
warnings.warn(w,Warning)
tmp_value = value.decode()
try:
float(tmp_value)
if len(tmp_value.split(".")) == 1:
# integer
return int(tmp_value)
else:
# float
return float(tmp_value)
except ValueError:
pass
# Return as string
return self._recv_string(value)
|
[
"def",
"_recv_guess",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"give_warnings",
":",
"w",
"=",
"\"Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data.\"",
".",
"format",
"(",
"value",
")",
"warnings",
".",
"warn",
"(",
"w",
",",
"Warning",
")",
"tmp_value",
"=",
"value",
".",
"decode",
"(",
")",
"try",
":",
"float",
"(",
"tmp_value",
")",
"if",
"len",
"(",
"tmp_value",
".",
"split",
"(",
"\".\"",
")",
")",
"==",
"1",
":",
"# integer",
"return",
"int",
"(",
"tmp_value",
")",
"else",
":",
"# float",
"return",
"float",
"(",
"tmp_value",
")",
"except",
"ValueError",
":",
"pass",
"# Return as string",
"return",
"self",
".",
"_recv_string",
"(",
"value",
")"
] |
Take the binary spew and try to make it into a float or integer. If
that can't be done, return a string.
Note: this is generally a bad idea, as values can be seriously mangled
by going from float -> string -> float. You'll generally be better off
using a format specifier and binary argument passing.
|
[
"Take",
"the",
"binary",
"spew",
"and",
"try",
"to",
"make",
"it",
"into",
"a",
"float",
"or",
"integer",
".",
"If",
"that",
"can",
"t",
"be",
"done",
"return",
"a",
"string",
"."
] |
215d6f9402262662a14a2996f532934339639a5b
|
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L610-L640
|
train
|
severb/graypy
|
graypy/handler.py
|
BaseGELFHandler._add_full_message
|
def _add_full_message(gelf_dict, record):
"""Add the ``full_message`` field to the ``gelf_dict`` if any
traceback information exists within the logging record
:param gelf_dict: dictionary representation of a GELF log.
:type gelf_dict: dict
:param record: :class:`logging.LogRecord` to extract a full
logging message from to insert into the given ``gelf_dict``.
:type record: logging.LogRecord
"""
# if a traceback exists add it to the log as the full_message field
full_message = None
# format exception information if present
if record.exc_info:
full_message = '\n'.join(
traceback.format_exception(*record.exc_info))
# use pre-formatted exception information in cases where the primary
# exception information was removed, eg. for LogRecord serialization
if record.exc_text:
full_message = record.exc_text
if full_message:
gelf_dict["full_message"] = full_message
|
python
|
def _add_full_message(gelf_dict, record):
"""Add the ``full_message`` field to the ``gelf_dict`` if any
traceback information exists within the logging record
:param gelf_dict: dictionary representation of a GELF log.
:type gelf_dict: dict
:param record: :class:`logging.LogRecord` to extract a full
logging message from to insert into the given ``gelf_dict``.
:type record: logging.LogRecord
"""
# if a traceback exists add it to the log as the full_message field
full_message = None
# format exception information if present
if record.exc_info:
full_message = '\n'.join(
traceback.format_exception(*record.exc_info))
# use pre-formatted exception information in cases where the primary
# exception information was removed, eg. for LogRecord serialization
if record.exc_text:
full_message = record.exc_text
if full_message:
gelf_dict["full_message"] = full_message
|
[
"def",
"_add_full_message",
"(",
"gelf_dict",
",",
"record",
")",
":",
"# if a traceback exists add it to the log as the full_message field",
"full_message",
"=",
"None",
"# format exception information if present",
"if",
"record",
".",
"exc_info",
":",
"full_message",
"=",
"'\\n'",
".",
"join",
"(",
"traceback",
".",
"format_exception",
"(",
"*",
"record",
".",
"exc_info",
")",
")",
"# use pre-formatted exception information in cases where the primary",
"# exception information was removed, eg. for LogRecord serialization",
"if",
"record",
".",
"exc_text",
":",
"full_message",
"=",
"record",
".",
"exc_text",
"if",
"full_message",
":",
"gelf_dict",
"[",
"\"full_message\"",
"]",
"=",
"full_message"
] |
Add the ``full_message`` field to the ``gelf_dict`` if any
traceback information exists within the logging record
:param gelf_dict: dictionary representation of a GELF log.
:type gelf_dict: dict
:param record: :class:`logging.LogRecord` to extract a full
logging message from to insert into the given ``gelf_dict``.
:type record: logging.LogRecord
|
[
"Add",
"the",
"full_message",
"field",
"to",
"the",
"gelf_dict",
"if",
"any",
"traceback",
"information",
"exists",
"within",
"the",
"logging",
"record"
] |
32018c41a792e71a8de9f9e14f770d1bc60c2313
|
https://github.com/severb/graypy/blob/32018c41a792e71a8de9f9e14f770d1bc60c2313/graypy/handler.py#L194-L216
|
train
|
severb/graypy
|
graypy/handler.py
|
BaseGELFHandler._resolve_host
|
def _resolve_host(fqdn, localname):
"""Resolve the ``host`` GELF field
:param fqdn: Boolean indicating whether to use :meth:`socket.getfqdn`
to obtain the ``host`` GELF field.
:type fqdn: bool
:param localname: Use specified hostname as the ``host`` GELF field.
:type localname: str or None
:return: String value representing the ``host`` GELF field.
:rtype: str
"""
if fqdn:
return socket.getfqdn()
elif localname is not None:
return localname
return socket.gethostname()
|
python
|
def _resolve_host(fqdn, localname):
"""Resolve the ``host`` GELF field
:param fqdn: Boolean indicating whether to use :meth:`socket.getfqdn`
to obtain the ``host`` GELF field.
:type fqdn: bool
:param localname: Use specified hostname as the ``host`` GELF field.
:type localname: str or None
:return: String value representing the ``host`` GELF field.
:rtype: str
"""
if fqdn:
return socket.getfqdn()
elif localname is not None:
return localname
return socket.gethostname()
|
[
"def",
"_resolve_host",
"(",
"fqdn",
",",
"localname",
")",
":",
"if",
"fqdn",
":",
"return",
"socket",
".",
"getfqdn",
"(",
")",
"elif",
"localname",
"is",
"not",
"None",
":",
"return",
"localname",
"return",
"socket",
".",
"gethostname",
"(",
")"
] |
Resolve the ``host`` GELF field
:param fqdn: Boolean indicating whether to use :meth:`socket.getfqdn`
to obtain the ``host`` GELF field.
:type fqdn: bool
:param localname: Use specified hostname as the ``host`` GELF field.
:type localname: str or None
:return: String value representing the ``host`` GELF field.
:rtype: str
|
[
"Resolve",
"the",
"host",
"GELF",
"field"
] |
32018c41a792e71a8de9f9e14f770d1bc60c2313
|
https://github.com/severb/graypy/blob/32018c41a792e71a8de9f9e14f770d1bc60c2313/graypy/handler.py#L219-L236
|
train
|
severb/graypy
|
graypy/handler.py
|
BaseGELFHandler._add_debugging_fields
|
def _add_debugging_fields(gelf_dict, record):
"""Add debugging fields to the given ``gelf_dict``
:param gelf_dict: dictionary representation of a GELF log.
:type gelf_dict: dict
:param record: :class:`logging.LogRecord` to extract debugging
fields from to insert into the given ``gelf_dict``.
:type record: logging.LogRecord
"""
gelf_dict.update({
'file': record.pathname,
'line': record.lineno,
'_function': record.funcName,
'_pid': record.process,
'_thread_name': record.threadName,
})
# record.processName was added in Python 2.6.2
pn = getattr(record, 'processName', None)
if pn is not None:
gelf_dict['_process_name'] = pn
|
python
|
def _add_debugging_fields(gelf_dict, record):
"""Add debugging fields to the given ``gelf_dict``
:param gelf_dict: dictionary representation of a GELF log.
:type gelf_dict: dict
:param record: :class:`logging.LogRecord` to extract debugging
fields from to insert into the given ``gelf_dict``.
:type record: logging.LogRecord
"""
gelf_dict.update({
'file': record.pathname,
'line': record.lineno,
'_function': record.funcName,
'_pid': record.process,
'_thread_name': record.threadName,
})
# record.processName was added in Python 2.6.2
pn = getattr(record, 'processName', None)
if pn is not None:
gelf_dict['_process_name'] = pn
|
[
"def",
"_add_debugging_fields",
"(",
"gelf_dict",
",",
"record",
")",
":",
"gelf_dict",
".",
"update",
"(",
"{",
"'file'",
":",
"record",
".",
"pathname",
",",
"'line'",
":",
"record",
".",
"lineno",
",",
"'_function'",
":",
"record",
".",
"funcName",
",",
"'_pid'",
":",
"record",
".",
"process",
",",
"'_thread_name'",
":",
"record",
".",
"threadName",
",",
"}",
")",
"# record.processName was added in Python 2.6.2",
"pn",
"=",
"getattr",
"(",
"record",
",",
"'processName'",
",",
"None",
")",
"if",
"pn",
"is",
"not",
"None",
":",
"gelf_dict",
"[",
"'_process_name'",
"]",
"=",
"pn"
] |
Add debugging fields to the given ``gelf_dict``
:param gelf_dict: dictionary representation of a GELF log.
:type gelf_dict: dict
:param record: :class:`logging.LogRecord` to extract debugging
fields from to insert into the given ``gelf_dict``.
:type record: logging.LogRecord
|
[
"Add",
"debugging",
"fields",
"to",
"the",
"given",
"gelf_dict"
] |
32018c41a792e71a8de9f9e14f770d1bc60c2313
|
https://github.com/severb/graypy/blob/32018c41a792e71a8de9f9e14f770d1bc60c2313/graypy/handler.py#L239-L259
|
train
|
severb/graypy
|
graypy/handler.py
|
BaseGELFHandler._add_extra_fields
|
def _add_extra_fields(gelf_dict, record):
"""Add extra fields to the given ``gelf_dict``
However, this does not add additional fields in to ``message_dict``
that are either duplicated from standard :class:`logging.LogRecord`
attributes, duplicated from the python logging module source
(e.g. ``exc_text``), or violate GLEF format (i.e. ``id``).
.. seealso::
The list of standard :class:`logging.LogRecord` attributes can be
found at:
http://docs.python.org/library/logging.html#logrecord-attributes
:param gelf_dict: dictionary representation of a GELF log.
:type gelf_dict: dict
:param record: :class:`logging.LogRecord` to extract extra fields
from to insert into the given ``gelf_dict``.
:type record: logging.LogRecord
"""
# skip_list is used to filter additional fields in a log message.
skip_list = (
'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename',
'funcName', 'id', 'levelname', 'levelno', 'lineno', 'module',
'msecs', 'message', 'msg', 'name', 'pathname', 'process',
'processName', 'relativeCreated', 'thread', 'threadName')
for key, value in record.__dict__.items():
if key not in skip_list and not key.startswith('_'):
gelf_dict['_%s' % key] = value
|
python
|
def _add_extra_fields(gelf_dict, record):
"""Add extra fields to the given ``gelf_dict``
However, this does not add additional fields in to ``message_dict``
that are either duplicated from standard :class:`logging.LogRecord`
attributes, duplicated from the python logging module source
(e.g. ``exc_text``), or violate GLEF format (i.e. ``id``).
.. seealso::
The list of standard :class:`logging.LogRecord` attributes can be
found at:
http://docs.python.org/library/logging.html#logrecord-attributes
:param gelf_dict: dictionary representation of a GELF log.
:type gelf_dict: dict
:param record: :class:`logging.LogRecord` to extract extra fields
from to insert into the given ``gelf_dict``.
:type record: logging.LogRecord
"""
# skip_list is used to filter additional fields in a log message.
skip_list = (
'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename',
'funcName', 'id', 'levelname', 'levelno', 'lineno', 'module',
'msecs', 'message', 'msg', 'name', 'pathname', 'process',
'processName', 'relativeCreated', 'thread', 'threadName')
for key, value in record.__dict__.items():
if key not in skip_list and not key.startswith('_'):
gelf_dict['_%s' % key] = value
|
[
"def",
"_add_extra_fields",
"(",
"gelf_dict",
",",
"record",
")",
":",
"# skip_list is used to filter additional fields in a log message.",
"skip_list",
"=",
"(",
"'args'",
",",
"'asctime'",
",",
"'created'",
",",
"'exc_info'",
",",
"'exc_text'",
",",
"'filename'",
",",
"'funcName'",
",",
"'id'",
",",
"'levelname'",
",",
"'levelno'",
",",
"'lineno'",
",",
"'module'",
",",
"'msecs'",
",",
"'message'",
",",
"'msg'",
",",
"'name'",
",",
"'pathname'",
",",
"'process'",
",",
"'processName'",
",",
"'relativeCreated'",
",",
"'thread'",
",",
"'threadName'",
")",
"for",
"key",
",",
"value",
"in",
"record",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"if",
"key",
"not",
"in",
"skip_list",
"and",
"not",
"key",
".",
"startswith",
"(",
"'_'",
")",
":",
"gelf_dict",
"[",
"'_%s'",
"%",
"key",
"]",
"=",
"value"
] |
Add extra fields to the given ``gelf_dict``
However, this does not add additional fields in to ``message_dict``
that are either duplicated from standard :class:`logging.LogRecord`
attributes, duplicated from the python logging module source
(e.g. ``exc_text``), or violate GLEF format (i.e. ``id``).
.. seealso::
The list of standard :class:`logging.LogRecord` attributes can be
found at:
http://docs.python.org/library/logging.html#logrecord-attributes
:param gelf_dict: dictionary representation of a GELF log.
:type gelf_dict: dict
:param record: :class:`logging.LogRecord` to extract extra fields
from to insert into the given ``gelf_dict``.
:type record: logging.LogRecord
|
[
"Add",
"extra",
"fields",
"to",
"the",
"given",
"gelf_dict"
] |
32018c41a792e71a8de9f9e14f770d1bc60c2313
|
https://github.com/severb/graypy/blob/32018c41a792e71a8de9f9e14f770d1bc60c2313/graypy/handler.py#L262-L294
|
train
|
severb/graypy
|
graypy/handler.py
|
BaseGELFHandler._pack_gelf_dict
|
def _pack_gelf_dict(gelf_dict):
"""Convert a given ``gelf_dict`` to a JSON-encoded string, thus,
creating an uncompressed GELF log ready for consumption by Graylog.
Since we cannot be 100% sure of what is contained in the ``gelf_dict``
we have to do some sanitation.
:param gelf_dict: dictionary representation of a GELF log.
:type gelf_dict: dict
:return: A prepped JSON-encoded GELF log as a bytes string
encoded in UTF-8.
:rtype: bytes
"""
gelf_dict = BaseGELFHandler._sanitize_to_unicode(gelf_dict)
packed = json.dumps(
gelf_dict,
separators=',:',
default=BaseGELFHandler._object_to_json
)
return packed.encode('utf-8')
|
python
|
def _pack_gelf_dict(gelf_dict):
"""Convert a given ``gelf_dict`` to a JSON-encoded string, thus,
creating an uncompressed GELF log ready for consumption by Graylog.
Since we cannot be 100% sure of what is contained in the ``gelf_dict``
we have to do some sanitation.
:param gelf_dict: dictionary representation of a GELF log.
:type gelf_dict: dict
:return: A prepped JSON-encoded GELF log as a bytes string
encoded in UTF-8.
:rtype: bytes
"""
gelf_dict = BaseGELFHandler._sanitize_to_unicode(gelf_dict)
packed = json.dumps(
gelf_dict,
separators=',:',
default=BaseGELFHandler._object_to_json
)
return packed.encode('utf-8')
|
[
"def",
"_pack_gelf_dict",
"(",
"gelf_dict",
")",
":",
"gelf_dict",
"=",
"BaseGELFHandler",
".",
"_sanitize_to_unicode",
"(",
"gelf_dict",
")",
"packed",
"=",
"json",
".",
"dumps",
"(",
"gelf_dict",
",",
"separators",
"=",
"',:'",
",",
"default",
"=",
"BaseGELFHandler",
".",
"_object_to_json",
")",
"return",
"packed",
".",
"encode",
"(",
"'utf-8'",
")"
] |
Convert a given ``gelf_dict`` to a JSON-encoded string, thus,
creating an uncompressed GELF log ready for consumption by Graylog.
Since we cannot be 100% sure of what is contained in the ``gelf_dict``
we have to do some sanitation.
:param gelf_dict: dictionary representation of a GELF log.
:type gelf_dict: dict
:return: A prepped JSON-encoded GELF log as a bytes string
encoded in UTF-8.
:rtype: bytes
|
[
"Convert",
"a",
"given",
"gelf_dict",
"to",
"a",
"JSON",
"-",
"encoded",
"string",
"thus",
"creating",
"an",
"uncompressed",
"GELF",
"log",
"ready",
"for",
"consumption",
"by",
"Graylog",
"."
] |
32018c41a792e71a8de9f9e14f770d1bc60c2313
|
https://github.com/severb/graypy/blob/32018c41a792e71a8de9f9e14f770d1bc60c2313/graypy/handler.py#L297-L317
|
train
|
severb/graypy
|
graypy/handler.py
|
BaseGELFHandler._sanitize_to_unicode
|
def _sanitize_to_unicode(obj):
"""Convert all strings records of the object to unicode
:param obj: object to sanitize to unicode.
:type obj: object
:return: Unicode string representation of the given object.
:rtype: str
"""
if isinstance(obj, dict):
return dict((BaseGELFHandler._sanitize_to_unicode(k), BaseGELFHandler._sanitize_to_unicode(v)) for k, v in obj.items())
if isinstance(obj, (list, tuple)):
return obj.__class__([BaseGELFHandler._sanitize_to_unicode(i) for i in obj])
if isinstance(obj, data):
obj = obj.decode('utf-8', errors='replace')
return obj
|
python
|
def _sanitize_to_unicode(obj):
"""Convert all strings records of the object to unicode
:param obj: object to sanitize to unicode.
:type obj: object
:return: Unicode string representation of the given object.
:rtype: str
"""
if isinstance(obj, dict):
return dict((BaseGELFHandler._sanitize_to_unicode(k), BaseGELFHandler._sanitize_to_unicode(v)) for k, v in obj.items())
if isinstance(obj, (list, tuple)):
return obj.__class__([BaseGELFHandler._sanitize_to_unicode(i) for i in obj])
if isinstance(obj, data):
obj = obj.decode('utf-8', errors='replace')
return obj
|
[
"def",
"_sanitize_to_unicode",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"return",
"dict",
"(",
"(",
"BaseGELFHandler",
".",
"_sanitize_to_unicode",
"(",
"k",
")",
",",
"BaseGELFHandler",
".",
"_sanitize_to_unicode",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"obj",
".",
"items",
"(",
")",
")",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"obj",
".",
"__class__",
"(",
"[",
"BaseGELFHandler",
".",
"_sanitize_to_unicode",
"(",
"i",
")",
"for",
"i",
"in",
"obj",
"]",
")",
"if",
"isinstance",
"(",
"obj",
",",
"data",
")",
":",
"obj",
"=",
"obj",
".",
"decode",
"(",
"'utf-8'",
",",
"errors",
"=",
"'replace'",
")",
"return",
"obj"
] |
Convert all strings records of the object to unicode
:param obj: object to sanitize to unicode.
:type obj: object
:return: Unicode string representation of the given object.
:rtype: str
|
[
"Convert",
"all",
"strings",
"records",
"of",
"the",
"object",
"to",
"unicode"
] |
32018c41a792e71a8de9f9e14f770d1bc60c2313
|
https://github.com/severb/graypy/blob/32018c41a792e71a8de9f9e14f770d1bc60c2313/graypy/handler.py#L320-L335
|
train
|
severb/graypy
|
graypy/handler.py
|
BaseGELFHandler._object_to_json
|
def _object_to_json(obj):
"""Convert objects that cannot be natively serialized into JSON
into their string representation
For datetime based objects convert them into their ISO formatted
string as specified by :meth:`datetime.datetime.isoformat`.
:param obj: object to convert into a JSON via getting its string
representation.
:type obj: object
:return: String value representing the given object ready to be
encoded into a JSON.
:rtype: str
"""
if isinstance(obj, datetime.datetime):
return obj.isoformat()
return repr(obj)
|
python
|
def _object_to_json(obj):
"""Convert objects that cannot be natively serialized into JSON
into their string representation
For datetime based objects convert them into their ISO formatted
string as specified by :meth:`datetime.datetime.isoformat`.
:param obj: object to convert into a JSON via getting its string
representation.
:type obj: object
:return: String value representing the given object ready to be
encoded into a JSON.
:rtype: str
"""
if isinstance(obj, datetime.datetime):
return obj.isoformat()
return repr(obj)
|
[
"def",
"_object_to_json",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"datetime",
".",
"datetime",
")",
":",
"return",
"obj",
".",
"isoformat",
"(",
")",
"return",
"repr",
"(",
"obj",
")"
] |
Convert objects that cannot be natively serialized into JSON
into their string representation
For datetime based objects convert them into their ISO formatted
string as specified by :meth:`datetime.datetime.isoformat`.
:param obj: object to convert into a JSON via getting its string
representation.
:type obj: object
:return: String value representing the given object ready to be
encoded into a JSON.
:rtype: str
|
[
"Convert",
"objects",
"that",
"cannot",
"be",
"natively",
"serialized",
"into",
"JSON",
"into",
"their",
"string",
"representation"
] |
32018c41a792e71a8de9f9e14f770d1bc60c2313
|
https://github.com/severb/graypy/blob/32018c41a792e71a8de9f9e14f770d1bc60c2313/graypy/handler.py#L338-L355
|
train
|
severb/graypy
|
graypy/handler.py
|
GELFTLSHandler.makeSocket
|
def makeSocket(self, timeout=1):
"""Override SocketHandler.makeSocket, to allow creating wrapped
TLS sockets"""
plain_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(plain_socket, 'settimeout'):
plain_socket.settimeout(timeout)
wrapped_socket = ssl.wrap_socket(
plain_socket,
ca_certs=self.ca_certs,
cert_reqs=self.reqs,
keyfile=self.keyfile,
certfile=self.certfile
)
wrapped_socket.connect((self.host, self.port))
return wrapped_socket
|
python
|
def makeSocket(self, timeout=1):
"""Override SocketHandler.makeSocket, to allow creating wrapped
TLS sockets"""
plain_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(plain_socket, 'settimeout'):
plain_socket.settimeout(timeout)
wrapped_socket = ssl.wrap_socket(
plain_socket,
ca_certs=self.ca_certs,
cert_reqs=self.reqs,
keyfile=self.keyfile,
certfile=self.certfile
)
wrapped_socket.connect((self.host, self.port))
return wrapped_socket
|
[
"def",
"makeSocket",
"(",
"self",
",",
"timeout",
"=",
"1",
")",
":",
"plain_socket",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"if",
"hasattr",
"(",
"plain_socket",
",",
"'settimeout'",
")",
":",
"plain_socket",
".",
"settimeout",
"(",
"timeout",
")",
"wrapped_socket",
"=",
"ssl",
".",
"wrap_socket",
"(",
"plain_socket",
",",
"ca_certs",
"=",
"self",
".",
"ca_certs",
",",
"cert_reqs",
"=",
"self",
".",
"reqs",
",",
"keyfile",
"=",
"self",
".",
"keyfile",
",",
"certfile",
"=",
"self",
".",
"certfile",
")",
"wrapped_socket",
".",
"connect",
"(",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
")",
"return",
"wrapped_socket"
] |
Override SocketHandler.makeSocket, to allow creating wrapped
TLS sockets
|
[
"Override",
"SocketHandler",
".",
"makeSocket",
"to",
"allow",
"creating",
"wrapped",
"TLS",
"sockets"
] |
32018c41a792e71a8de9f9e14f770d1bc60c2313
|
https://github.com/severb/graypy/blob/32018c41a792e71a8de9f9e14f770d1bc60c2313/graypy/handler.py#L451-L468
|
train
|
codeinthehole/purl
|
purl/url.py
|
to_unicode
|
def to_unicode(string):
"""
Ensure a passed string is unicode
"""
if isinstance(string, six.binary_type):
return string.decode('utf8')
if isinstance(string, six.text_type):
return string
if six.PY2:
return unicode(string)
return str(string)
|
python
|
def to_unicode(string):
"""
Ensure a passed string is unicode
"""
if isinstance(string, six.binary_type):
return string.decode('utf8')
if isinstance(string, six.text_type):
return string
if six.PY2:
return unicode(string)
return str(string)
|
[
"def",
"to_unicode",
"(",
"string",
")",
":",
"if",
"isinstance",
"(",
"string",
",",
"six",
".",
"binary_type",
")",
":",
"return",
"string",
".",
"decode",
"(",
"'utf8'",
")",
"if",
"isinstance",
"(",
"string",
",",
"six",
".",
"text_type",
")",
":",
"return",
"string",
"if",
"six",
".",
"PY2",
":",
"return",
"unicode",
"(",
"string",
")",
"return",
"str",
"(",
"string",
")"
] |
Ensure a passed string is unicode
|
[
"Ensure",
"a",
"passed",
"string",
"is",
"unicode"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L22-L32
|
train
|
codeinthehole/purl
|
purl/url.py
|
to_utf8
|
def to_utf8(string):
"""
Encode a string as a UTF8 bytestring. This function could be passed a
bytestring or unicode string so must distinguish between the two.
"""
if isinstance(string, six.text_type):
return string.encode('utf8')
if isinstance(string, six.binary_type):
return string
return str(string)
|
python
|
def to_utf8(string):
"""
Encode a string as a UTF8 bytestring. This function could be passed a
bytestring or unicode string so must distinguish between the two.
"""
if isinstance(string, six.text_type):
return string.encode('utf8')
if isinstance(string, six.binary_type):
return string
return str(string)
|
[
"def",
"to_utf8",
"(",
"string",
")",
":",
"if",
"isinstance",
"(",
"string",
",",
"six",
".",
"text_type",
")",
":",
"return",
"string",
".",
"encode",
"(",
"'utf8'",
")",
"if",
"isinstance",
"(",
"string",
",",
"six",
".",
"binary_type",
")",
":",
"return",
"string",
"return",
"str",
"(",
"string",
")"
] |
Encode a string as a UTF8 bytestring. This function could be passed a
bytestring or unicode string so must distinguish between the two.
|
[
"Encode",
"a",
"string",
"as",
"a",
"UTF8",
"bytestring",
".",
"This",
"function",
"could",
"be",
"passed",
"a",
"bytestring",
"or",
"unicode",
"string",
"so",
"must",
"distinguish",
"between",
"the",
"two",
"."
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L35-L44
|
train
|
codeinthehole/purl
|
purl/url.py
|
dict_to_unicode
|
def dict_to_unicode(raw_dict):
"""
Ensure all keys and values in a dict are unicode.
The passed dict is assumed to have lists for all values.
"""
decoded = {}
for key, value in raw_dict.items():
decoded[to_unicode(key)] = map(
to_unicode, value)
return decoded
|
python
|
def dict_to_unicode(raw_dict):
"""
Ensure all keys and values in a dict are unicode.
The passed dict is assumed to have lists for all values.
"""
decoded = {}
for key, value in raw_dict.items():
decoded[to_unicode(key)] = map(
to_unicode, value)
return decoded
|
[
"def",
"dict_to_unicode",
"(",
"raw_dict",
")",
":",
"decoded",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"raw_dict",
".",
"items",
"(",
")",
":",
"decoded",
"[",
"to_unicode",
"(",
"key",
")",
"]",
"=",
"map",
"(",
"to_unicode",
",",
"value",
")",
"return",
"decoded"
] |
Ensure all keys and values in a dict are unicode.
The passed dict is assumed to have lists for all values.
|
[
"Ensure",
"all",
"keys",
"and",
"values",
"in",
"a",
"dict",
"are",
"unicode",
"."
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L47-L57
|
train
|
codeinthehole/purl
|
purl/url.py
|
unicode_urlencode
|
def unicode_urlencode(query, doseq=True):
"""
Custom wrapper around urlencode to support unicode
Python urlencode doesn't handle unicode well so we need to convert to
bytestrings before using it:
http://stackoverflow.com/questions/6480723/urllib-urlencode-doesnt-like-unicode-values-how-about-this-workaround
"""
pairs = []
for key, value in query.items():
if isinstance(value, list):
value = list(map(to_utf8, value))
else:
value = to_utf8(value)
pairs.append((to_utf8(key), value))
encoded_query = dict(pairs)
xx = urlencode(encoded_query, doseq)
return xx
|
python
|
def unicode_urlencode(query, doseq=True):
"""
Custom wrapper around urlencode to support unicode
Python urlencode doesn't handle unicode well so we need to convert to
bytestrings before using it:
http://stackoverflow.com/questions/6480723/urllib-urlencode-doesnt-like-unicode-values-how-about-this-workaround
"""
pairs = []
for key, value in query.items():
if isinstance(value, list):
value = list(map(to_utf8, value))
else:
value = to_utf8(value)
pairs.append((to_utf8(key), value))
encoded_query = dict(pairs)
xx = urlencode(encoded_query, doseq)
return xx
|
[
"def",
"unicode_urlencode",
"(",
"query",
",",
"doseq",
"=",
"True",
")",
":",
"pairs",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"query",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"value",
"=",
"list",
"(",
"map",
"(",
"to_utf8",
",",
"value",
")",
")",
"else",
":",
"value",
"=",
"to_utf8",
"(",
"value",
")",
"pairs",
".",
"append",
"(",
"(",
"to_utf8",
"(",
"key",
")",
",",
"value",
")",
")",
"encoded_query",
"=",
"dict",
"(",
"pairs",
")",
"xx",
"=",
"urlencode",
"(",
"encoded_query",
",",
"doseq",
")",
"return",
"xx"
] |
Custom wrapper around urlencode to support unicode
Python urlencode doesn't handle unicode well so we need to convert to
bytestrings before using it:
http://stackoverflow.com/questions/6480723/urllib-urlencode-doesnt-like-unicode-values-how-about-this-workaround
|
[
"Custom",
"wrapper",
"around",
"urlencode",
"to",
"support",
"unicode"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L80-L97
|
train
|
codeinthehole/purl
|
purl/url.py
|
parse
|
def parse(url_str):
"""
Extract all parts from a URL string and return them as a dictionary
"""
url_str = to_unicode(url_str)
result = urlparse(url_str)
netloc_parts = result.netloc.rsplit('@', 1)
if len(netloc_parts) == 1:
username = password = None
host = netloc_parts[0]
else:
user_and_pass = netloc_parts[0].split(':')
if len(user_and_pass) == 2:
username, password = user_and_pass
elif len(user_and_pass) == 1:
username = user_and_pass[0]
password = None
host = netloc_parts[1]
if host and ':' in host:
host = host.split(':')[0]
return {'host': host,
'username': username,
'password': password,
'scheme': result.scheme,
'port': result.port,
'path': result.path,
'query': result.query,
'fragment': result.fragment}
|
python
|
def parse(url_str):
"""
Extract all parts from a URL string and return them as a dictionary
"""
url_str = to_unicode(url_str)
result = urlparse(url_str)
netloc_parts = result.netloc.rsplit('@', 1)
if len(netloc_parts) == 1:
username = password = None
host = netloc_parts[0]
else:
user_and_pass = netloc_parts[0].split(':')
if len(user_and_pass) == 2:
username, password = user_and_pass
elif len(user_and_pass) == 1:
username = user_and_pass[0]
password = None
host = netloc_parts[1]
if host and ':' in host:
host = host.split(':')[0]
return {'host': host,
'username': username,
'password': password,
'scheme': result.scheme,
'port': result.port,
'path': result.path,
'query': result.query,
'fragment': result.fragment}
|
[
"def",
"parse",
"(",
"url_str",
")",
":",
"url_str",
"=",
"to_unicode",
"(",
"url_str",
")",
"result",
"=",
"urlparse",
"(",
"url_str",
")",
"netloc_parts",
"=",
"result",
".",
"netloc",
".",
"rsplit",
"(",
"'@'",
",",
"1",
")",
"if",
"len",
"(",
"netloc_parts",
")",
"==",
"1",
":",
"username",
"=",
"password",
"=",
"None",
"host",
"=",
"netloc_parts",
"[",
"0",
"]",
"else",
":",
"user_and_pass",
"=",
"netloc_parts",
"[",
"0",
"]",
".",
"split",
"(",
"':'",
")",
"if",
"len",
"(",
"user_and_pass",
")",
"==",
"2",
":",
"username",
",",
"password",
"=",
"user_and_pass",
"elif",
"len",
"(",
"user_and_pass",
")",
"==",
"1",
":",
"username",
"=",
"user_and_pass",
"[",
"0",
"]",
"password",
"=",
"None",
"host",
"=",
"netloc_parts",
"[",
"1",
"]",
"if",
"host",
"and",
"':'",
"in",
"host",
":",
"host",
"=",
"host",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
"return",
"{",
"'host'",
":",
"host",
",",
"'username'",
":",
"username",
",",
"'password'",
":",
"password",
",",
"'scheme'",
":",
"result",
".",
"scheme",
",",
"'port'",
":",
"result",
".",
"port",
",",
"'path'",
":",
"result",
".",
"path",
",",
"'query'",
":",
"result",
".",
"query",
",",
"'fragment'",
":",
"result",
".",
"fragment",
"}"
] |
Extract all parts from a URL string and return them as a dictionary
|
[
"Extract",
"all",
"parts",
"from",
"a",
"URL",
"string",
"and",
"return",
"them",
"as",
"a",
"dictionary"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L100-L129
|
train
|
codeinthehole/purl
|
purl/url.py
|
URL.netloc
|
def netloc(self):
"""
Return the netloc
"""
url = self._tuple
if url.username and url.password:
netloc = '%s:%s@%s' % (url.username, url.password, url.host)
elif url.username and not url.password:
netloc = '%s@%s' % (url.username, url.host)
else:
netloc = url.host
if url.port:
netloc = '%s:%s' % (netloc, url.port)
return netloc
|
python
|
def netloc(self):
"""
Return the netloc
"""
url = self._tuple
if url.username and url.password:
netloc = '%s:%s@%s' % (url.username, url.password, url.host)
elif url.username and not url.password:
netloc = '%s@%s' % (url.username, url.host)
else:
netloc = url.host
if url.port:
netloc = '%s:%s' % (netloc, url.port)
return netloc
|
[
"def",
"netloc",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"_tuple",
"if",
"url",
".",
"username",
"and",
"url",
".",
"password",
":",
"netloc",
"=",
"'%s:%s@%s'",
"%",
"(",
"url",
".",
"username",
",",
"url",
".",
"password",
",",
"url",
".",
"host",
")",
"elif",
"url",
".",
"username",
"and",
"not",
"url",
".",
"password",
":",
"netloc",
"=",
"'%s@%s'",
"%",
"(",
"url",
".",
"username",
",",
"url",
".",
"host",
")",
"else",
":",
"netloc",
"=",
"url",
".",
"host",
"if",
"url",
".",
"port",
":",
"netloc",
"=",
"'%s:%s'",
"%",
"(",
"netloc",
",",
"url",
".",
"port",
")",
"return",
"netloc"
] |
Return the netloc
|
[
"Return",
"the",
"netloc"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L221-L234
|
train
|
codeinthehole/purl
|
purl/url.py
|
URL.host
|
def host(self, value=None):
"""
Return the host
:param string value: new host string
"""
if value is not None:
return URL._mutate(self, host=value)
return self._tuple.host
|
python
|
def host(self, value=None):
"""
Return the host
:param string value: new host string
"""
if value is not None:
return URL._mutate(self, host=value)
return self._tuple.host
|
[
"def",
"host",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"return",
"URL",
".",
"_mutate",
"(",
"self",
",",
"host",
"=",
"value",
")",
"return",
"self",
".",
"_tuple",
".",
"host"
] |
Return the host
:param string value: new host string
|
[
"Return",
"the",
"host"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L236-L244
|
train
|
codeinthehole/purl
|
purl/url.py
|
URL.username
|
def username(self, value=None):
"""
Return or set the username
:param string value: the new username to use
:returns: string or new :class:`URL` instance
"""
if value is not None:
return URL._mutate(self, username=value)
return unicode_unquote(self._tuple.username)
|
python
|
def username(self, value=None):
"""
Return or set the username
:param string value: the new username to use
:returns: string or new :class:`URL` instance
"""
if value is not None:
return URL._mutate(self, username=value)
return unicode_unquote(self._tuple.username)
|
[
"def",
"username",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"return",
"URL",
".",
"_mutate",
"(",
"self",
",",
"username",
"=",
"value",
")",
"return",
"unicode_unquote",
"(",
"self",
".",
"_tuple",
".",
"username",
")"
] |
Return or set the username
:param string value: the new username to use
:returns: string or new :class:`URL` instance
|
[
"Return",
"or",
"set",
"the",
"username"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L248-L257
|
train
|
codeinthehole/purl
|
purl/url.py
|
URL.password
|
def password(self, value=None):
"""
Return or set the password
:param string value: the new password to use
:returns: string or new :class:`URL` instance
"""
if value is not None:
return URL._mutate(self, password=value)
return unicode_unquote(self._tuple.password)
|
python
|
def password(self, value=None):
"""
Return or set the password
:param string value: the new password to use
:returns: string or new :class:`URL` instance
"""
if value is not None:
return URL._mutate(self, password=value)
return unicode_unquote(self._tuple.password)
|
[
"def",
"password",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"return",
"URL",
".",
"_mutate",
"(",
"self",
",",
"password",
"=",
"value",
")",
"return",
"unicode_unquote",
"(",
"self",
".",
"_tuple",
".",
"password",
")"
] |
Return or set the password
:param string value: the new password to use
:returns: string or new :class:`URL` instance
|
[
"Return",
"or",
"set",
"the",
"password"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L259-L268
|
train
|
codeinthehole/purl
|
purl/url.py
|
URL.scheme
|
def scheme(self, value=None):
"""
Return or set the scheme.
:param string value: the new scheme to use
:returns: string or new :class:`URL` instance
"""
if value is not None:
return URL._mutate(self, scheme=value)
return self._tuple.scheme
|
python
|
def scheme(self, value=None):
"""
Return or set the scheme.
:param string value: the new scheme to use
:returns: string or new :class:`URL` instance
"""
if value is not None:
return URL._mutate(self, scheme=value)
return self._tuple.scheme
|
[
"def",
"scheme",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"return",
"URL",
".",
"_mutate",
"(",
"self",
",",
"scheme",
"=",
"value",
")",
"return",
"self",
".",
"_tuple",
".",
"scheme"
] |
Return or set the scheme.
:param string value: the new scheme to use
:returns: string or new :class:`URL` instance
|
[
"Return",
"or",
"set",
"the",
"scheme",
"."
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L295-L304
|
train
|
codeinthehole/purl
|
purl/url.py
|
URL.path
|
def path(self, value=None):
"""
Return or set the path
:param string value: the new path to use
:returns: string or new :class:`URL` instance
"""
if value is not None:
if not value.startswith('/'):
value = '/' + value
encoded_value = unicode_quote(value)
return URL._mutate(self, path=encoded_value)
return self._tuple.path
|
python
|
def path(self, value=None):
"""
Return or set the path
:param string value: the new path to use
:returns: string or new :class:`URL` instance
"""
if value is not None:
if not value.startswith('/'):
value = '/' + value
encoded_value = unicode_quote(value)
return URL._mutate(self, path=encoded_value)
return self._tuple.path
|
[
"def",
"path",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"if",
"not",
"value",
".",
"startswith",
"(",
"'/'",
")",
":",
"value",
"=",
"'/'",
"+",
"value",
"encoded_value",
"=",
"unicode_quote",
"(",
"value",
")",
"return",
"URL",
".",
"_mutate",
"(",
"self",
",",
"path",
"=",
"encoded_value",
")",
"return",
"self",
".",
"_tuple",
".",
"path"
] |
Return or set the path
:param string value: the new path to use
:returns: string or new :class:`URL` instance
|
[
"Return",
"or",
"set",
"the",
"path"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L306-L318
|
train
|
codeinthehole/purl
|
purl/url.py
|
URL.query
|
def query(self, value=None):
"""
Return or set the query string
:param string value: the new query string to use
:returns: string or new :class:`URL` instance
"""
if value is not None:
return URL._mutate(self, query=value)
return self._tuple.query
|
python
|
def query(self, value=None):
"""
Return or set the query string
:param string value: the new query string to use
:returns: string or new :class:`URL` instance
"""
if value is not None:
return URL._mutate(self, query=value)
return self._tuple.query
|
[
"def",
"query",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"return",
"URL",
".",
"_mutate",
"(",
"self",
",",
"query",
"=",
"value",
")",
"return",
"self",
".",
"_tuple",
".",
"query"
] |
Return or set the query string
:param string value: the new query string to use
:returns: string or new :class:`URL` instance
|
[
"Return",
"or",
"set",
"the",
"query",
"string"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L320-L329
|
train
|
codeinthehole/purl
|
purl/url.py
|
URL.port
|
def port(self, value=None):
"""
Return or set the port
:param string value: the new port to use
:returns: string or new :class:`URL` instance
"""
if value is not None:
return URL._mutate(self, port=value)
return self._tuple.port
|
python
|
def port(self, value=None):
"""
Return or set the port
:param string value: the new port to use
:returns: string or new :class:`URL` instance
"""
if value is not None:
return URL._mutate(self, port=value)
return self._tuple.port
|
[
"def",
"port",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"return",
"URL",
".",
"_mutate",
"(",
"self",
",",
"port",
"=",
"value",
")",
"return",
"self",
".",
"_tuple",
".",
"port"
] |
Return or set the port
:param string value: the new port to use
:returns: string or new :class:`URL` instance
|
[
"Return",
"or",
"set",
"the",
"port"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L331-L340
|
train
|
codeinthehole/purl
|
purl/url.py
|
URL.path_segment
|
def path_segment(self, index, value=None, default=None):
"""
Return the path segment at the given index
:param integer index:
:param string value: the new segment value
:param string default: the default value to return if no path segment exists with the given index
"""
if value is not None:
segments = list(self.path_segments())
segments[index] = unicode_quote_path_segment(value)
new_path = '/' + '/'.join(segments)
if self._tuple.path.endswith('/'):
new_path += '/'
return URL._mutate(self, path=new_path)
try:
return self.path_segments()[index]
except IndexError:
return default
|
python
|
def path_segment(self, index, value=None, default=None):
"""
Return the path segment at the given index
:param integer index:
:param string value: the new segment value
:param string default: the default value to return if no path segment exists with the given index
"""
if value is not None:
segments = list(self.path_segments())
segments[index] = unicode_quote_path_segment(value)
new_path = '/' + '/'.join(segments)
if self._tuple.path.endswith('/'):
new_path += '/'
return URL._mutate(self, path=new_path)
try:
return self.path_segments()[index]
except IndexError:
return default
|
[
"def",
"path_segment",
"(",
"self",
",",
"index",
",",
"value",
"=",
"None",
",",
"default",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"segments",
"=",
"list",
"(",
"self",
".",
"path_segments",
"(",
")",
")",
"segments",
"[",
"index",
"]",
"=",
"unicode_quote_path_segment",
"(",
"value",
")",
"new_path",
"=",
"'/'",
"+",
"'/'",
".",
"join",
"(",
"segments",
")",
"if",
"self",
".",
"_tuple",
".",
"path",
".",
"endswith",
"(",
"'/'",
")",
":",
"new_path",
"+=",
"'/'",
"return",
"URL",
".",
"_mutate",
"(",
"self",
",",
"path",
"=",
"new_path",
")",
"try",
":",
"return",
"self",
".",
"path_segments",
"(",
")",
"[",
"index",
"]",
"except",
"IndexError",
":",
"return",
"default"
] |
Return the path segment at the given index
:param integer index:
:param string value: the new segment value
:param string default: the default value to return if no path segment exists with the given index
|
[
"Return",
"the",
"path",
"segment",
"at",
"the",
"given",
"index"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L365-L383
|
train
|
codeinthehole/purl
|
purl/url.py
|
URL.path_segments
|
def path_segments(self, value=None):
"""
Return the path segments
:param list value: the new path segments to use
"""
if value is not None:
encoded_values = map(unicode_quote_path_segment, value)
new_path = '/' + '/'.join(encoded_values)
return URL._mutate(self, path=new_path)
parts = self._tuple.path.split('/')
segments = parts[1:]
if self._tuple.path.endswith('/'):
segments.pop()
segments = map(unicode_unquote, segments)
return tuple(segments)
|
python
|
def path_segments(self, value=None):
"""
Return the path segments
:param list value: the new path segments to use
"""
if value is not None:
encoded_values = map(unicode_quote_path_segment, value)
new_path = '/' + '/'.join(encoded_values)
return URL._mutate(self, path=new_path)
parts = self._tuple.path.split('/')
segments = parts[1:]
if self._tuple.path.endswith('/'):
segments.pop()
segments = map(unicode_unquote, segments)
return tuple(segments)
|
[
"def",
"path_segments",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"encoded_values",
"=",
"map",
"(",
"unicode_quote_path_segment",
",",
"value",
")",
"new_path",
"=",
"'/'",
"+",
"'/'",
".",
"join",
"(",
"encoded_values",
")",
"return",
"URL",
".",
"_mutate",
"(",
"self",
",",
"path",
"=",
"new_path",
")",
"parts",
"=",
"self",
".",
"_tuple",
".",
"path",
".",
"split",
"(",
"'/'",
")",
"segments",
"=",
"parts",
"[",
"1",
":",
"]",
"if",
"self",
".",
"_tuple",
".",
"path",
".",
"endswith",
"(",
"'/'",
")",
":",
"segments",
".",
"pop",
"(",
")",
"segments",
"=",
"map",
"(",
"unicode_unquote",
",",
"segments",
")",
"return",
"tuple",
"(",
"segments",
")"
] |
Return the path segments
:param list value: the new path segments to use
|
[
"Return",
"the",
"path",
"segments"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L385-L400
|
train
|
codeinthehole/purl
|
purl/url.py
|
URL.add_path_segment
|
def add_path_segment(self, value):
"""
Add a new path segment to the end of the current string
:param string value: the new path segment to use
Example::
>>> u = URL('http://example.com/foo/')
>>> u.add_path_segment('bar').as_string()
'http://example.com/foo/bar'
"""
segments = self.path_segments() + (to_unicode(value),)
return self.path_segments(segments)
|
python
|
def add_path_segment(self, value):
"""
Add a new path segment to the end of the current string
:param string value: the new path segment to use
Example::
>>> u = URL('http://example.com/foo/')
>>> u.add_path_segment('bar').as_string()
'http://example.com/foo/bar'
"""
segments = self.path_segments() + (to_unicode(value),)
return self.path_segments(segments)
|
[
"def",
"add_path_segment",
"(",
"self",
",",
"value",
")",
":",
"segments",
"=",
"self",
".",
"path_segments",
"(",
")",
"+",
"(",
"to_unicode",
"(",
"value",
")",
",",
")",
"return",
"self",
".",
"path_segments",
"(",
"segments",
")"
] |
Add a new path segment to the end of the current string
:param string value: the new path segment to use
Example::
>>> u = URL('http://example.com/foo/')
>>> u.add_path_segment('bar').as_string()
'http://example.com/foo/bar'
|
[
"Add",
"a",
"new",
"path",
"segment",
"to",
"the",
"end",
"of",
"the",
"current",
"string"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L402-L415
|
train
|
codeinthehole/purl
|
purl/url.py
|
URL.query_param
|
def query_param(self, key, value=None, default=None, as_list=False):
"""
Return or set a query parameter for the given key
The value can be a list.
:param string key: key to look for
:param string default: value to return if ``key`` isn't found
:param boolean as_list: whether to return the values as a list
:param string value: the new query parameter to use
"""
parse_result = self.query_params()
if value is not None:
# Need to ensure all strings are unicode
if isinstance(value, (list, tuple)):
value = list(map(to_unicode, value))
else:
value = to_unicode(value)
parse_result[to_unicode(key)] = value
return URL._mutate(
self, query=unicode_urlencode(parse_result, doseq=True))
try:
result = parse_result[key]
except KeyError:
return default
if as_list:
return result
return result[0] if len(result) == 1 else result
|
python
|
def query_param(self, key, value=None, default=None, as_list=False):
"""
Return or set a query parameter for the given key
The value can be a list.
:param string key: key to look for
:param string default: value to return if ``key`` isn't found
:param boolean as_list: whether to return the values as a list
:param string value: the new query parameter to use
"""
parse_result = self.query_params()
if value is not None:
# Need to ensure all strings are unicode
if isinstance(value, (list, tuple)):
value = list(map(to_unicode, value))
else:
value = to_unicode(value)
parse_result[to_unicode(key)] = value
return URL._mutate(
self, query=unicode_urlencode(parse_result, doseq=True))
try:
result = parse_result[key]
except KeyError:
return default
if as_list:
return result
return result[0] if len(result) == 1 else result
|
[
"def",
"query_param",
"(",
"self",
",",
"key",
",",
"value",
"=",
"None",
",",
"default",
"=",
"None",
",",
"as_list",
"=",
"False",
")",
":",
"parse_result",
"=",
"self",
".",
"query_params",
"(",
")",
"if",
"value",
"is",
"not",
"None",
":",
"# Need to ensure all strings are unicode",
"if",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"value",
"=",
"list",
"(",
"map",
"(",
"to_unicode",
",",
"value",
")",
")",
"else",
":",
"value",
"=",
"to_unicode",
"(",
"value",
")",
"parse_result",
"[",
"to_unicode",
"(",
"key",
")",
"]",
"=",
"value",
"return",
"URL",
".",
"_mutate",
"(",
"self",
",",
"query",
"=",
"unicode_urlencode",
"(",
"parse_result",
",",
"doseq",
"=",
"True",
")",
")",
"try",
":",
"result",
"=",
"parse_result",
"[",
"key",
"]",
"except",
"KeyError",
":",
"return",
"default",
"if",
"as_list",
":",
"return",
"result",
"return",
"result",
"[",
"0",
"]",
"if",
"len",
"(",
"result",
")",
"==",
"1",
"else",
"result"
] |
Return or set a query parameter for the given key
The value can be a list.
:param string key: key to look for
:param string default: value to return if ``key`` isn't found
:param boolean as_list: whether to return the values as a list
:param string value: the new query parameter to use
|
[
"Return",
"or",
"set",
"a",
"query",
"parameter",
"for",
"the",
"given",
"key"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L437-L465
|
train
|
codeinthehole/purl
|
purl/url.py
|
URL.append_query_param
|
def append_query_param(self, key, value):
"""
Append a query parameter
:param string key: The query param key
:param string value: The new value
"""
values = self.query_param(key, as_list=True, default=[])
values.append(value)
return self.query_param(key, values)
|
python
|
def append_query_param(self, key, value):
"""
Append a query parameter
:param string key: The query param key
:param string value: The new value
"""
values = self.query_param(key, as_list=True, default=[])
values.append(value)
return self.query_param(key, values)
|
[
"def",
"append_query_param",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"values",
"=",
"self",
".",
"query_param",
"(",
"key",
",",
"as_list",
"=",
"True",
",",
"default",
"=",
"[",
"]",
")",
"values",
".",
"append",
"(",
"value",
")",
"return",
"self",
".",
"query_param",
"(",
"key",
",",
"values",
")"
] |
Append a query parameter
:param string key: The query param key
:param string value: The new value
|
[
"Append",
"a",
"query",
"parameter"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L467-L476
|
train
|
codeinthehole/purl
|
purl/url.py
|
URL.query_params
|
def query_params(self, value=None):
"""
Return or set a dictionary of query params
:param dict value: new dictionary of values
"""
if value is not None:
return URL._mutate(self, query=unicode_urlencode(value, doseq=True))
query = '' if self._tuple.query is None else self._tuple.query
# In Python 2.6, urlparse needs a bytestring so we encode and then
# decode the result.
if not six.PY3:
result = parse_qs(to_utf8(query), True)
return dict_to_unicode(result)
return parse_qs(query, True)
|
python
|
def query_params(self, value=None):
"""
Return or set a dictionary of query params
:param dict value: new dictionary of values
"""
if value is not None:
return URL._mutate(self, query=unicode_urlencode(value, doseq=True))
query = '' if self._tuple.query is None else self._tuple.query
# In Python 2.6, urlparse needs a bytestring so we encode and then
# decode the result.
if not six.PY3:
result = parse_qs(to_utf8(query), True)
return dict_to_unicode(result)
return parse_qs(query, True)
|
[
"def",
"query_params",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"return",
"URL",
".",
"_mutate",
"(",
"self",
",",
"query",
"=",
"unicode_urlencode",
"(",
"value",
",",
"doseq",
"=",
"True",
")",
")",
"query",
"=",
"''",
"if",
"self",
".",
"_tuple",
".",
"query",
"is",
"None",
"else",
"self",
".",
"_tuple",
".",
"query",
"# In Python 2.6, urlparse needs a bytestring so we encode and then",
"# decode the result.",
"if",
"not",
"six",
".",
"PY3",
":",
"result",
"=",
"parse_qs",
"(",
"to_utf8",
"(",
"query",
")",
",",
"True",
")",
"return",
"dict_to_unicode",
"(",
"result",
")",
"return",
"parse_qs",
"(",
"query",
",",
"True",
")"
] |
Return or set a dictionary of query params
:param dict value: new dictionary of values
|
[
"Return",
"or",
"set",
"a",
"dictionary",
"of",
"query",
"params"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L478-L494
|
train
|
codeinthehole/purl
|
purl/url.py
|
URL.remove_query_param
|
def remove_query_param(self, key, value=None):
"""
Remove a query param from a URL
Set the value parameter if removing from a list.
:param string key: The key to delete
:param string value: The value of the param to delete (of more than one)
"""
parse_result = self.query_params()
if value is not None:
index = parse_result[key].index(value)
del parse_result[key][index]
else:
del parse_result[key]
return URL._mutate(self, query=unicode_urlencode(parse_result, doseq=True))
|
python
|
def remove_query_param(self, key, value=None):
"""
Remove a query param from a URL
Set the value parameter if removing from a list.
:param string key: The key to delete
:param string value: The value of the param to delete (of more than one)
"""
parse_result = self.query_params()
if value is not None:
index = parse_result[key].index(value)
del parse_result[key][index]
else:
del parse_result[key]
return URL._mutate(self, query=unicode_urlencode(parse_result, doseq=True))
|
[
"def",
"remove_query_param",
"(",
"self",
",",
"key",
",",
"value",
"=",
"None",
")",
":",
"parse_result",
"=",
"self",
".",
"query_params",
"(",
")",
"if",
"value",
"is",
"not",
"None",
":",
"index",
"=",
"parse_result",
"[",
"key",
"]",
".",
"index",
"(",
"value",
")",
"del",
"parse_result",
"[",
"key",
"]",
"[",
"index",
"]",
"else",
":",
"del",
"parse_result",
"[",
"key",
"]",
"return",
"URL",
".",
"_mutate",
"(",
"self",
",",
"query",
"=",
"unicode_urlencode",
"(",
"parse_result",
",",
"doseq",
"=",
"True",
")",
")"
] |
Remove a query param from a URL
Set the value parameter if removing from a list.
:param string key: The key to delete
:param string value: The value of the param to delete (of more than one)
|
[
"Remove",
"a",
"query",
"param",
"from",
"a",
"URL"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L496-L511
|
train
|
codeinthehole/purl
|
purl/template.py
|
expand
|
def expand(template, variables=None):
"""
Expand a URL template string using the passed variables
"""
if variables is None:
variables = {}
return patterns.sub(functools.partial(_replace, variables), template)
|
python
|
def expand(template, variables=None):
"""
Expand a URL template string using the passed variables
"""
if variables is None:
variables = {}
return patterns.sub(functools.partial(_replace, variables), template)
|
[
"def",
"expand",
"(",
"template",
",",
"variables",
"=",
"None",
")",
":",
"if",
"variables",
"is",
"None",
":",
"variables",
"=",
"{",
"}",
"return",
"patterns",
".",
"sub",
"(",
"functools",
".",
"partial",
"(",
"_replace",
",",
"variables",
")",
",",
"template",
")"
] |
Expand a URL template string using the passed variables
|
[
"Expand",
"a",
"URL",
"template",
"string",
"using",
"the",
"passed",
"variables"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/template.py#L31-L37
|
train
|
codeinthehole/purl
|
purl/template.py
|
_format_pair_no_equals
|
def _format_pair_no_equals(explode, separator, escape, key, value):
"""
Format a key, value pair but don't include the equals sign
when there is no value
"""
if not value:
return key
return _format_pair(explode, separator, escape, key, value)
|
python
|
def _format_pair_no_equals(explode, separator, escape, key, value):
"""
Format a key, value pair but don't include the equals sign
when there is no value
"""
if not value:
return key
return _format_pair(explode, separator, escape, key, value)
|
[
"def",
"_format_pair_no_equals",
"(",
"explode",
",",
"separator",
",",
"escape",
",",
"key",
",",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"key",
"return",
"_format_pair",
"(",
"explode",
",",
"separator",
",",
"escape",
",",
"key",
",",
"value",
")"
] |
Format a key, value pair but don't include the equals sign
when there is no value
|
[
"Format",
"a",
"key",
"value",
"pair",
"but",
"don",
"t",
"include",
"the",
"equals",
"sign",
"when",
"there",
"is",
"no",
"value"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/template.py#L56-L63
|
train
|
codeinthehole/purl
|
purl/template.py
|
_format_pair_with_equals
|
def _format_pair_with_equals(explode, separator, escape, key, value):
"""
Format a key, value pair including the equals sign
when there is no value
"""
if not value:
return key + '='
return _format_pair(explode, separator, escape, key, value)
|
python
|
def _format_pair_with_equals(explode, separator, escape, key, value):
"""
Format a key, value pair including the equals sign
when there is no value
"""
if not value:
return key + '='
return _format_pair(explode, separator, escape, key, value)
|
[
"def",
"_format_pair_with_equals",
"(",
"explode",
",",
"separator",
",",
"escape",
",",
"key",
",",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"key",
"+",
"'='",
"return",
"_format_pair",
"(",
"explode",
",",
"separator",
",",
"escape",
",",
"key",
",",
"value",
")"
] |
Format a key, value pair including the equals sign
when there is no value
|
[
"Format",
"a",
"key",
"value",
"pair",
"including",
"the",
"equals",
"sign",
"when",
"there",
"is",
"no",
"value"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/template.py#L66-L73
|
train
|
codeinthehole/purl
|
purl/template.py
|
_replace
|
def _replace(variables, match):
"""
Return the appropriate replacement for `match` using the passed variables
"""
expression = match.group(1)
# Look-up chars and functions for the specified operator
(prefix_char, separator_char, split_fn, escape_fn,
format_fn) = operator_map.get(expression[0], defaults)
replacements = []
for key, modify_fn, explode in split_fn(expression):
if key in variables:
variable = modify_fn(variables[key])
replacement = format_fn(
explode, separator_char, escape_fn, key, variable)
replacements.append(replacement)
if not replacements:
return ''
return prefix_char + separator_char.join(replacements)
|
python
|
def _replace(variables, match):
"""
Return the appropriate replacement for `match` using the passed variables
"""
expression = match.group(1)
# Look-up chars and functions for the specified operator
(prefix_char, separator_char, split_fn, escape_fn,
format_fn) = operator_map.get(expression[0], defaults)
replacements = []
for key, modify_fn, explode in split_fn(expression):
if key in variables:
variable = modify_fn(variables[key])
replacement = format_fn(
explode, separator_char, escape_fn, key, variable)
replacements.append(replacement)
if not replacements:
return ''
return prefix_char + separator_char.join(replacements)
|
[
"def",
"_replace",
"(",
"variables",
",",
"match",
")",
":",
"expression",
"=",
"match",
".",
"group",
"(",
"1",
")",
"# Look-up chars and functions for the specified operator",
"(",
"prefix_char",
",",
"separator_char",
",",
"split_fn",
",",
"escape_fn",
",",
"format_fn",
")",
"=",
"operator_map",
".",
"get",
"(",
"expression",
"[",
"0",
"]",
",",
"defaults",
")",
"replacements",
"=",
"[",
"]",
"for",
"key",
",",
"modify_fn",
",",
"explode",
"in",
"split_fn",
"(",
"expression",
")",
":",
"if",
"key",
"in",
"variables",
":",
"variable",
"=",
"modify_fn",
"(",
"variables",
"[",
"key",
"]",
")",
"replacement",
"=",
"format_fn",
"(",
"explode",
",",
"separator_char",
",",
"escape_fn",
",",
"key",
",",
"variable",
")",
"replacements",
".",
"append",
"(",
"replacement",
")",
"if",
"not",
"replacements",
":",
"return",
"''",
"return",
"prefix_char",
"+",
"separator_char",
".",
"join",
"(",
"replacements",
")"
] |
Return the appropriate replacement for `match` using the passed variables
|
[
"Return",
"the",
"appropriate",
"replacement",
"for",
"match",
"using",
"the",
"passed",
"variables"
] |
e70ed132f1fdc17d00c78199cedb1e3adcb2bc55
|
https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/template.py#L195-L214
|
train
|
LucidtechAI/las-sdk-python
|
las/api_client.py
|
ApiClient.predict
|
def predict(self, document_path: str, model_name: str, consent_id: str = None) -> Prediction:
"""Run inference and create prediction on document.
This method takes care of creating and uploading a document specified by document_path.
as well as running inference using model specified by model_name to create prediction on the document.
>>> from las import ApiClient
>>> api_client = ApiClient(endpoint='<api endpoint>')
>>> api_client.predict(document_path='document.jpeg', model_name='invoice')
:param document_path: Path to document to run inference on
:type document_path: str
:param model_name: The name of the model to use for inference
:type model_name: str
:param consent_id: An identifier to mark the owner of the document handle
:type consent_id: str
:return: Prediction on document
:rtype: Prediction
:raises InvalidCredentialsException: If the credentials are invalid
:raises TooManyRequestsException: If limit of requests per second is reached
:raises LimitExceededException: If limit of total requests per month is reached
:raises requests.exception.RequestException: If error was raised by requests
"""
content_type = self._get_content_type(document_path)
consent_id = consent_id or str(uuid4())
document_id = self._upload_document(document_path, content_type, consent_id)
prediction_response = self.post_predictions(document_id, model_name)
return Prediction(document_id, consent_id, model_name, prediction_response)
|
python
|
def predict(self, document_path: str, model_name: str, consent_id: str = None) -> Prediction:
"""Run inference and create prediction on document.
This method takes care of creating and uploading a document specified by document_path.
as well as running inference using model specified by model_name to create prediction on the document.
>>> from las import ApiClient
>>> api_client = ApiClient(endpoint='<api endpoint>')
>>> api_client.predict(document_path='document.jpeg', model_name='invoice')
:param document_path: Path to document to run inference on
:type document_path: str
:param model_name: The name of the model to use for inference
:type model_name: str
:param consent_id: An identifier to mark the owner of the document handle
:type consent_id: str
:return: Prediction on document
:rtype: Prediction
:raises InvalidCredentialsException: If the credentials are invalid
:raises TooManyRequestsException: If limit of requests per second is reached
:raises LimitExceededException: If limit of total requests per month is reached
:raises requests.exception.RequestException: If error was raised by requests
"""
content_type = self._get_content_type(document_path)
consent_id = consent_id or str(uuid4())
document_id = self._upload_document(document_path, content_type, consent_id)
prediction_response = self.post_predictions(document_id, model_name)
return Prediction(document_id, consent_id, model_name, prediction_response)
|
[
"def",
"predict",
"(",
"self",
",",
"document_path",
":",
"str",
",",
"model_name",
":",
"str",
",",
"consent_id",
":",
"str",
"=",
"None",
")",
"->",
"Prediction",
":",
"content_type",
"=",
"self",
".",
"_get_content_type",
"(",
"document_path",
")",
"consent_id",
"=",
"consent_id",
"or",
"str",
"(",
"uuid4",
"(",
")",
")",
"document_id",
"=",
"self",
".",
"_upload_document",
"(",
"document_path",
",",
"content_type",
",",
"consent_id",
")",
"prediction_response",
"=",
"self",
".",
"post_predictions",
"(",
"document_id",
",",
"model_name",
")",
"return",
"Prediction",
"(",
"document_id",
",",
"consent_id",
",",
"model_name",
",",
"prediction_response",
")"
] |
Run inference and create prediction on document.
This method takes care of creating and uploading a document specified by document_path.
as well as running inference using model specified by model_name to create prediction on the document.
>>> from las import ApiClient
>>> api_client = ApiClient(endpoint='<api endpoint>')
>>> api_client.predict(document_path='document.jpeg', model_name='invoice')
:param document_path: Path to document to run inference on
:type document_path: str
:param model_name: The name of the model to use for inference
:type model_name: str
:param consent_id: An identifier to mark the owner of the document handle
:type consent_id: str
:return: Prediction on document
:rtype: Prediction
:raises InvalidCredentialsException: If the credentials are invalid
:raises TooManyRequestsException: If limit of requests per second is reached
:raises LimitExceededException: If limit of total requests per month is reached
:raises requests.exception.RequestException: If error was raised by requests
|
[
"Run",
"inference",
"and",
"create",
"prediction",
"on",
"document",
".",
"This",
"method",
"takes",
"care",
"of",
"creating",
"and",
"uploading",
"a",
"document",
"specified",
"by",
"document_path",
".",
"as",
"well",
"as",
"running",
"inference",
"using",
"model",
"specified",
"by",
"model_name",
"to",
"create",
"prediction",
"on",
"the",
"document",
"."
] |
5f39dee7983baff28a1deb93c12d36414d835d12
|
https://github.com/LucidtechAI/las-sdk-python/blob/5f39dee7983baff28a1deb93c12d36414d835d12/las/api_client.py#L30-L57
|
train
|
LucidtechAI/las-sdk-python
|
las/api_client.py
|
ApiClient.send_feedback
|
def send_feedback(self, document_id: str, feedback: List[Field]) -> dict:
"""Send feedback to the model.
This method takes care of sending feedback related to document specified by document_id.
Feedback consists of ground truth values for the document specified as a list of Field instances.
>>> from las import ApiClient
>>> api_client = ApiClient(endpoint='<api endpoint>')
>>> feedback = [Field(label='total_amount', value='120.00'), Field(label='purchase_date', value='2019-03-10')]
>>> api_client.send_feedback('<document id>', feedback)
:param document_id: The document id of the document that will receive the feedback
:type document_id: str
:param feedback: A list of :py:class:`~las.Field` representing the ground truth values for the document
:type feedback: List[Field]
:return: Feedback response
:rtype: dict
:raises InvalidCredentialsException: If the credentials are invalid
:raises TooManyRequestsException: If limit of requests per second is reached
:raises LimitExceededException: If limit of total requests per month is reached
:raises requests.exception.RequestException: If error was raised by requests
"""
return self.post_document_id(document_id, feedback)
|
python
|
def send_feedback(self, document_id: str, feedback: List[Field]) -> dict:
"""Send feedback to the model.
This method takes care of sending feedback related to document specified by document_id.
Feedback consists of ground truth values for the document specified as a list of Field instances.
>>> from las import ApiClient
>>> api_client = ApiClient(endpoint='<api endpoint>')
>>> feedback = [Field(label='total_amount', value='120.00'), Field(label='purchase_date', value='2019-03-10')]
>>> api_client.send_feedback('<document id>', feedback)
:param document_id: The document id of the document that will receive the feedback
:type document_id: str
:param feedback: A list of :py:class:`~las.Field` representing the ground truth values for the document
:type feedback: List[Field]
:return: Feedback response
:rtype: dict
:raises InvalidCredentialsException: If the credentials are invalid
:raises TooManyRequestsException: If limit of requests per second is reached
:raises LimitExceededException: If limit of total requests per month is reached
:raises requests.exception.RequestException: If error was raised by requests
"""
return self.post_document_id(document_id, feedback)
|
[
"def",
"send_feedback",
"(",
"self",
",",
"document_id",
":",
"str",
",",
"feedback",
":",
"List",
"[",
"Field",
"]",
")",
"->",
"dict",
":",
"return",
"self",
".",
"post_document_id",
"(",
"document_id",
",",
"feedback",
")"
] |
Send feedback to the model.
This method takes care of sending feedback related to document specified by document_id.
Feedback consists of ground truth values for the document specified as a list of Field instances.
>>> from las import ApiClient
>>> api_client = ApiClient(endpoint='<api endpoint>')
>>> feedback = [Field(label='total_amount', value='120.00'), Field(label='purchase_date', value='2019-03-10')]
>>> api_client.send_feedback('<document id>', feedback)
:param document_id: The document id of the document that will receive the feedback
:type document_id: str
:param feedback: A list of :py:class:`~las.Field` representing the ground truth values for the document
:type feedback: List[Field]
:return: Feedback response
:rtype: dict
:raises InvalidCredentialsException: If the credentials are invalid
:raises TooManyRequestsException: If limit of requests per second is reached
:raises LimitExceededException: If limit of total requests per month is reached
:raises requests.exception.RequestException: If error was raised by requests
|
[
"Send",
"feedback",
"to",
"the",
"model",
".",
"This",
"method",
"takes",
"care",
"of",
"sending",
"feedback",
"related",
"to",
"document",
"specified",
"by",
"document_id",
".",
"Feedback",
"consists",
"of",
"ground",
"truth",
"values",
"for",
"the",
"document",
"specified",
"as",
"a",
"list",
"of",
"Field",
"instances",
"."
] |
5f39dee7983baff28a1deb93c12d36414d835d12
|
https://github.com/LucidtechAI/las-sdk-python/blob/5f39dee7983baff28a1deb93c12d36414d835d12/las/api_client.py#L59-L81
|
train
|
LucidtechAI/las-sdk-python
|
las/_extrahdr.py
|
extra_what
|
def extra_what(file, h=None):
"""Code mostly copied from imghdr.what"""
tests = []
def test_pdf(h, f):
if b'PDF' in h[0:10]:
return 'pdf'
tests.append(test_pdf)
f = None
try:
if h is None:
if isinstance(file, (str, PathLike)):
f = open(file, 'rb')
h = f.read(32)
else:
location = file.tell()
h = file.read(32)
file.seek(location)
for tf in tests:
res = tf(h, f)
if res:
return res
finally:
if f:
f.close()
return None
|
python
|
def extra_what(file, h=None):
"""Code mostly copied from imghdr.what"""
tests = []
def test_pdf(h, f):
if b'PDF' in h[0:10]:
return 'pdf'
tests.append(test_pdf)
f = None
try:
if h is None:
if isinstance(file, (str, PathLike)):
f = open(file, 'rb')
h = f.read(32)
else:
location = file.tell()
h = file.read(32)
file.seek(location)
for tf in tests:
res = tf(h, f)
if res:
return res
finally:
if f:
f.close()
return None
|
[
"def",
"extra_what",
"(",
"file",
",",
"h",
"=",
"None",
")",
":",
"tests",
"=",
"[",
"]",
"def",
"test_pdf",
"(",
"h",
",",
"f",
")",
":",
"if",
"b'PDF'",
"in",
"h",
"[",
"0",
":",
"10",
"]",
":",
"return",
"'pdf'",
"tests",
".",
"append",
"(",
"test_pdf",
")",
"f",
"=",
"None",
"try",
":",
"if",
"h",
"is",
"None",
":",
"if",
"isinstance",
"(",
"file",
",",
"(",
"str",
",",
"PathLike",
")",
")",
":",
"f",
"=",
"open",
"(",
"file",
",",
"'rb'",
")",
"h",
"=",
"f",
".",
"read",
"(",
"32",
")",
"else",
":",
"location",
"=",
"file",
".",
"tell",
"(",
")",
"h",
"=",
"file",
".",
"read",
"(",
"32",
")",
"file",
".",
"seek",
"(",
"location",
")",
"for",
"tf",
"in",
"tests",
":",
"res",
"=",
"tf",
"(",
"h",
",",
"f",
")",
"if",
"res",
":",
"return",
"res",
"finally",
":",
"if",
"f",
":",
"f",
".",
"close",
"(",
")",
"return",
"None"
] |
Code mostly copied from imghdr.what
|
[
"Code",
"mostly",
"copied",
"from",
"imghdr",
".",
"what"
] |
5f39dee7983baff28a1deb93c12d36414d835d12
|
https://github.com/LucidtechAI/las-sdk-python/blob/5f39dee7983baff28a1deb93c12d36414d835d12/las/_extrahdr.py#L4-L31
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.