repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
robdmc/crontabs | crontabs/crontabs.py | Tab.starting_at | def starting_at(self, datetime_or_str):
"""
Set the starting time for the cron job. If not specified, the starting time will always
be the beginning of the interval that is current when the cron is started.
:param datetime_or_str: a datetime object or a string that dateutil.parser can understand
:return: self
"""
if isinstance(datetime_or_str, str):
self._starting_at = parse(datetime_or_str)
elif isinstance(datetime_or_str, datetime.datetime):
self._starting_at = datetime_or_str
else:
raise ValueError('.starting_at() method can only take strings or datetime objects')
return self | python | def starting_at(self, datetime_or_str):
"""
Set the starting time for the cron job. If not specified, the starting time will always
be the beginning of the interval that is current when the cron is started.
:param datetime_or_str: a datetime object or a string that dateutil.parser can understand
:return: self
"""
if isinstance(datetime_or_str, str):
self._starting_at = parse(datetime_or_str)
elif isinstance(datetime_or_str, datetime.datetime):
self._starting_at = datetime_or_str
else:
raise ValueError('.starting_at() method can only take strings or datetime objects')
return self | [
"def",
"starting_at",
"(",
"self",
",",
"datetime_or_str",
")",
":",
"if",
"isinstance",
"(",
"datetime_or_str",
",",
"str",
")",
":",
"self",
".",
"_starting_at",
"=",
"parse",
"(",
"datetime_or_str",
")",
"elif",
"isinstance",
"(",
"datetime_or_str",
",",
"datetime",
".",
"datetime",
")",
":",
"self",
".",
"_starting_at",
"=",
"datetime_or_str",
"else",
":",
"raise",
"ValueError",
"(",
"'.starting_at() method can only take strings or datetime objects'",
")",
"return",
"self"
]
| Set the starting time for the cron job. If not specified, the starting time will always
be the beginning of the interval that is current when the cron is started.
:param datetime_or_str: a datetime object or a string that dateutil.parser can understand
:return: self | [
"Set",
"the",
"starting",
"time",
"for",
"the",
"cron",
"job",
".",
"If",
"not",
"specified",
"the",
"starting",
"time",
"will",
"always",
"be",
"the",
"beginning",
"of",
"the",
"interval",
"that",
"is",
"current",
"when",
"the",
"cron",
"is",
"started",
"."
]
| 3a347f9309eb1b4c7c222363ede338a158f5072c | https://github.com/robdmc/crontabs/blob/3a347f9309eb1b4c7c222363ede338a158f5072c/crontabs/crontabs.py#L61-L75 | train |
robdmc/crontabs | crontabs/crontabs.py | Tab.run | def run(self, func, *func_args, **func__kwargs):
"""
Specify the function to run at the scheduled times
:param func: a callable
:param func_args: the args to the callable
:param func__kwargs: the kwargs to the callable
:return:
"""
self._func = func
self._func_args = func_args
self._func_kwargs = func__kwargs
return self | python | def run(self, func, *func_args, **func__kwargs):
"""
Specify the function to run at the scheduled times
:param func: a callable
:param func_args: the args to the callable
:param func__kwargs: the kwargs to the callable
:return:
"""
self._func = func
self._func_args = func_args
self._func_kwargs = func__kwargs
return self | [
"def",
"run",
"(",
"self",
",",
"func",
",",
"*",
"func_args",
",",
"*",
"*",
"func__kwargs",
")",
":",
"self",
".",
"_func",
"=",
"func",
"self",
".",
"_func_args",
"=",
"func_args",
"self",
".",
"_func_kwargs",
"=",
"func__kwargs",
"return",
"self"
]
| Specify the function to run at the scheduled times
:param func: a callable
:param func_args: the args to the callable
:param func__kwargs: the kwargs to the callable
:return: | [
"Specify",
"the",
"function",
"to",
"run",
"at",
"the",
"scheduled",
"times"
]
| 3a347f9309eb1b4c7c222363ede338a158f5072c | https://github.com/robdmc/crontabs/blob/3a347f9309eb1b4c7c222363ede338a158f5072c/crontabs/crontabs.py#L93-L105 | train |
robdmc/crontabs | crontabs/crontabs.py | Tab._get_target | def _get_target(self):
"""
returns a callable with no arguments designed
to be the target of a Subprocess
"""
if None in [self._func, self._func_kwargs, self._func_kwargs, self._every_kwargs]:
raise ValueError('You must call the .every() and .run() methods on every tab.')
return self._loop | python | def _get_target(self):
"""
returns a callable with no arguments designed
to be the target of a Subprocess
"""
if None in [self._func, self._func_kwargs, self._func_kwargs, self._every_kwargs]:
raise ValueError('You must call the .every() and .run() methods on every tab.')
return self._loop | [
"def",
"_get_target",
"(",
"self",
")",
":",
"if",
"None",
"in",
"[",
"self",
".",
"_func",
",",
"self",
".",
"_func_kwargs",
",",
"self",
".",
"_func_kwargs",
",",
"self",
".",
"_every_kwargs",
"]",
":",
"raise",
"ValueError",
"(",
"'You must call the .every() and .run() methods on every tab.'",
")",
"return",
"self",
".",
"_loop"
]
| returns a callable with no arguments designed
to be the target of a Subprocess | [
"returns",
"a",
"callable",
"with",
"no",
"arguments",
"designed",
"to",
"be",
"the",
"target",
"of",
"a",
"Subprocess"
]
| 3a347f9309eb1b4c7c222363ede338a158f5072c | https://github.com/robdmc/crontabs/blob/3a347f9309eb1b4c7c222363ede338a158f5072c/crontabs/crontabs.py#L197-L204 | train |
robdmc/crontabs | crontabs/processes.py | wrapped_target | def wrapped_target(target, q_stdout, q_stderr, q_error, robust, name, *args, **kwargs): # pragma: no cover
"""
Wraps a target with queues replacing stdout and stderr
"""
import sys
sys.stdout = IOQueue(q_stdout)
sys.stderr = IOQueue(q_stderr)
try:
target(*args, **kwargs)
except:
if not robust:
s = 'Error in tab\n' + traceback.format_exc()
logger = daiquiri.getLogger(name)
logger.error(s)
else:
raise
if not robust:
q_error.put(name)
raise | python | def wrapped_target(target, q_stdout, q_stderr, q_error, robust, name, *args, **kwargs): # pragma: no cover
"""
Wraps a target with queues replacing stdout and stderr
"""
import sys
sys.stdout = IOQueue(q_stdout)
sys.stderr = IOQueue(q_stderr)
try:
target(*args, **kwargs)
except:
if not robust:
s = 'Error in tab\n' + traceback.format_exc()
logger = daiquiri.getLogger(name)
logger.error(s)
else:
raise
if not robust:
q_error.put(name)
raise | [
"def",
"wrapped_target",
"(",
"target",
",",
"q_stdout",
",",
"q_stderr",
",",
"q_error",
",",
"robust",
",",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# pragma: no cover",
"import",
"sys",
"sys",
".",
"stdout",
"=",
"IOQueue",
"(",
"q_stdout",
")",
"sys",
".",
"stderr",
"=",
"IOQueue",
"(",
"q_stderr",
")",
"try",
":",
"target",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
":",
"if",
"not",
"robust",
":",
"s",
"=",
"'Error in tab\\n'",
"+",
"traceback",
".",
"format_exc",
"(",
")",
"logger",
"=",
"daiquiri",
".",
"getLogger",
"(",
"name",
")",
"logger",
".",
"error",
"(",
"s",
")",
"else",
":",
"raise",
"if",
"not",
"robust",
":",
"q_error",
".",
"put",
"(",
"name",
")",
"raise"
]
| Wraps a target with queues replacing stdout and stderr | [
"Wraps",
"a",
"target",
"with",
"queues",
"replacing",
"stdout",
"and",
"stderr"
]
| 3a347f9309eb1b4c7c222363ede338a158f5072c | https://github.com/robdmc/crontabs/blob/3a347f9309eb1b4c7c222363ede338a158f5072c/crontabs/processes.py#L85-L107 | train |
robdmc/crontabs | crontabs/processes.py | ProcessMonitor.loop | def loop(self, max_seconds=None):
"""
Main loop for the process. This will run continuously until maxiter
"""
loop_started = datetime.datetime.now()
self._is_running = True
while self._is_running:
self.process_error_queue(self.q_error)
if max_seconds is not None:
if (datetime.datetime.now() - loop_started).total_seconds() > max_seconds:
break
for subprocess in self._subprocesses:
if not subprocess.is_alive():
subprocess.start()
self.process_io_queue(self.q_stdout, sys.stdout)
self.process_io_queue(self.q_stderr, sys.stderr) | python | def loop(self, max_seconds=None):
"""
Main loop for the process. This will run continuously until maxiter
"""
loop_started = datetime.datetime.now()
self._is_running = True
while self._is_running:
self.process_error_queue(self.q_error)
if max_seconds is not None:
if (datetime.datetime.now() - loop_started).total_seconds() > max_seconds:
break
for subprocess in self._subprocesses:
if not subprocess.is_alive():
subprocess.start()
self.process_io_queue(self.q_stdout, sys.stdout)
self.process_io_queue(self.q_stderr, sys.stderr) | [
"def",
"loop",
"(",
"self",
",",
"max_seconds",
"=",
"None",
")",
":",
"loop_started",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"self",
".",
"_is_running",
"=",
"True",
"while",
"self",
".",
"_is_running",
":",
"self",
".",
"process_error_queue",
"(",
"self",
".",
"q_error",
")",
"if",
"max_seconds",
"is",
"not",
"None",
":",
"if",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"-",
"loop_started",
")",
".",
"total_seconds",
"(",
")",
">",
"max_seconds",
":",
"break",
"for",
"subprocess",
"in",
"self",
".",
"_subprocesses",
":",
"if",
"not",
"subprocess",
".",
"is_alive",
"(",
")",
":",
"subprocess",
".",
"start",
"(",
")",
"self",
".",
"process_io_queue",
"(",
"self",
".",
"q_stdout",
",",
"sys",
".",
"stdout",
")",
"self",
".",
"process_io_queue",
"(",
"self",
".",
"q_stderr",
",",
"sys",
".",
"stderr",
")"
]
| Main loop for the process. This will run continuously until maxiter | [
"Main",
"loop",
"for",
"the",
"process",
".",
"This",
"will",
"run",
"continuously",
"until",
"maxiter"
]
| 3a347f9309eb1b4c7c222363ede338a158f5072c | https://github.com/robdmc/crontabs/blob/3a347f9309eb1b4c7c222363ede338a158f5072c/crontabs/processes.py#L156-L174 | train |
gusutabopb/aioinflux | aioinflux/serialization/common.py | escape | def escape(string, escape_pattern):
"""Assistant function for string escaping"""
try:
return string.translate(escape_pattern)
except AttributeError:
warnings.warn("Non-string-like data passed. "
"Attempting to convert to 'str'.")
return str(string).translate(tag_escape) | python | def escape(string, escape_pattern):
"""Assistant function for string escaping"""
try:
return string.translate(escape_pattern)
except AttributeError:
warnings.warn("Non-string-like data passed. "
"Attempting to convert to 'str'.")
return str(string).translate(tag_escape) | [
"def",
"escape",
"(",
"string",
",",
"escape_pattern",
")",
":",
"try",
":",
"return",
"string",
".",
"translate",
"(",
"escape_pattern",
")",
"except",
"AttributeError",
":",
"warnings",
".",
"warn",
"(",
"\"Non-string-like data passed. \"",
"\"Attempting to convert to 'str'.\"",
")",
"return",
"str",
"(",
"string",
")",
".",
"translate",
"(",
"tag_escape",
")"
]
| Assistant function for string escaping | [
"Assistant",
"function",
"for",
"string",
"escaping"
]
| 2e4b7b3e13604e7618c686d89a0673f0bc70b24e | https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/serialization/common.py#L13-L20 | train |
gusutabopb/aioinflux | aioinflux/serialization/usertype.py | _make_serializer | def _make_serializer(meas, schema, rm_none, extra_tags, placeholder): # noqa: C901
"""Factory of line protocol parsers"""
_validate_schema(schema, placeholder)
tags = []
fields = []
ts = None
meas = meas
for k, t in schema.items():
if t is MEASUREMENT:
meas = f"{{i.{k}}}"
elif t is TIMEINT:
ts = f"{{i.{k}}}"
elif t is TIMESTR:
if pd:
ts = f"{{pd.Timestamp(i.{k} or 0).value}}"
else:
ts = f"{{dt_to_int(str_to_dt(i.{k}))}}"
elif t is TIMEDT:
if pd:
ts = f"{{pd.Timestamp(i.{k} or 0).value}}"
else:
ts = f"{{dt_to_int(i.{k})}}"
elif t is TAG:
tags.append(f"{k}={{str(i.{k}).translate(tag_escape)}}")
elif t is TAGENUM:
tags.append(f"{k}={{getattr(i.{k}, 'name', i.{k} or None)}}")
elif t in (FLOAT, BOOL):
fields.append(f"{k}={{i.{k}}}")
elif t is INT:
fields.append(f"{k}={{i.{k}}}i")
elif t is STR:
fields.append(f"{k}=\\\"{{str(i.{k}).translate(str_escape)}}\\\"")
elif t is ENUM:
fields.append(f"{k}=\\\"{{getattr(i.{k}, 'name', i.{k} or None)}}\\\"")
else:
raise SchemaError(f"Invalid attribute type {k!r}: {t!r}")
extra_tags = extra_tags or {}
for k, v in extra_tags.items():
tags.append(f"{k}={v}")
if placeholder:
fields.insert(0, f"_=true")
sep = ',' if tags else ''
ts = f' {ts}' if ts else ''
fmt = f"{meas}{sep}{','.join(tags)} {','.join(fields)}{ts}"
if rm_none:
# Has substantial runtime impact. Best avoided if performance is critical.
# First field can't be removed.
pat = r',\w+="?None"?i?'
f = eval('lambda i: re.sub(r\'{}\', "", f"{}").encode()'.format(pat, fmt))
else:
f = eval('lambda i: f"{}".encode()'.format(fmt))
f.__doc__ = "Returns InfluxDB line protocol representation of user-defined class"
f._args = dict(meas=meas, schema=schema, rm_none=rm_none,
extra_tags=extra_tags, placeholder=placeholder)
return f | python | def _make_serializer(meas, schema, rm_none, extra_tags, placeholder): # noqa: C901
"""Factory of line protocol parsers"""
_validate_schema(schema, placeholder)
tags = []
fields = []
ts = None
meas = meas
for k, t in schema.items():
if t is MEASUREMENT:
meas = f"{{i.{k}}}"
elif t is TIMEINT:
ts = f"{{i.{k}}}"
elif t is TIMESTR:
if pd:
ts = f"{{pd.Timestamp(i.{k} or 0).value}}"
else:
ts = f"{{dt_to_int(str_to_dt(i.{k}))}}"
elif t is TIMEDT:
if pd:
ts = f"{{pd.Timestamp(i.{k} or 0).value}}"
else:
ts = f"{{dt_to_int(i.{k})}}"
elif t is TAG:
tags.append(f"{k}={{str(i.{k}).translate(tag_escape)}}")
elif t is TAGENUM:
tags.append(f"{k}={{getattr(i.{k}, 'name', i.{k} or None)}}")
elif t in (FLOAT, BOOL):
fields.append(f"{k}={{i.{k}}}")
elif t is INT:
fields.append(f"{k}={{i.{k}}}i")
elif t is STR:
fields.append(f"{k}=\\\"{{str(i.{k}).translate(str_escape)}}\\\"")
elif t is ENUM:
fields.append(f"{k}=\\\"{{getattr(i.{k}, 'name', i.{k} or None)}}\\\"")
else:
raise SchemaError(f"Invalid attribute type {k!r}: {t!r}")
extra_tags = extra_tags or {}
for k, v in extra_tags.items():
tags.append(f"{k}={v}")
if placeholder:
fields.insert(0, f"_=true")
sep = ',' if tags else ''
ts = f' {ts}' if ts else ''
fmt = f"{meas}{sep}{','.join(tags)} {','.join(fields)}{ts}"
if rm_none:
# Has substantial runtime impact. Best avoided if performance is critical.
# First field can't be removed.
pat = r',\w+="?None"?i?'
f = eval('lambda i: re.sub(r\'{}\', "", f"{}").encode()'.format(pat, fmt))
else:
f = eval('lambda i: f"{}".encode()'.format(fmt))
f.__doc__ = "Returns InfluxDB line protocol representation of user-defined class"
f._args = dict(meas=meas, schema=schema, rm_none=rm_none,
extra_tags=extra_tags, placeholder=placeholder)
return f | [
"def",
"_make_serializer",
"(",
"meas",
",",
"schema",
",",
"rm_none",
",",
"extra_tags",
",",
"placeholder",
")",
":",
"# noqa: C901",
"_validate_schema",
"(",
"schema",
",",
"placeholder",
")",
"tags",
"=",
"[",
"]",
"fields",
"=",
"[",
"]",
"ts",
"=",
"None",
"meas",
"=",
"meas",
"for",
"k",
",",
"t",
"in",
"schema",
".",
"items",
"(",
")",
":",
"if",
"t",
"is",
"MEASUREMENT",
":",
"meas",
"=",
"f\"{{i.{k}}}\"",
"elif",
"t",
"is",
"TIMEINT",
":",
"ts",
"=",
"f\"{{i.{k}}}\"",
"elif",
"t",
"is",
"TIMESTR",
":",
"if",
"pd",
":",
"ts",
"=",
"f\"{{pd.Timestamp(i.{k} or 0).value}}\"",
"else",
":",
"ts",
"=",
"f\"{{dt_to_int(str_to_dt(i.{k}))}}\"",
"elif",
"t",
"is",
"TIMEDT",
":",
"if",
"pd",
":",
"ts",
"=",
"f\"{{pd.Timestamp(i.{k} or 0).value}}\"",
"else",
":",
"ts",
"=",
"f\"{{dt_to_int(i.{k})}}\"",
"elif",
"t",
"is",
"TAG",
":",
"tags",
".",
"append",
"(",
"f\"{k}={{str(i.{k}).translate(tag_escape)}}\"",
")",
"elif",
"t",
"is",
"TAGENUM",
":",
"tags",
".",
"append",
"(",
"f\"{k}={{getattr(i.{k}, 'name', i.{k} or None)}}\"",
")",
"elif",
"t",
"in",
"(",
"FLOAT",
",",
"BOOL",
")",
":",
"fields",
".",
"append",
"(",
"f\"{k}={{i.{k}}}\"",
")",
"elif",
"t",
"is",
"INT",
":",
"fields",
".",
"append",
"(",
"f\"{k}={{i.{k}}}i\"",
")",
"elif",
"t",
"is",
"STR",
":",
"fields",
".",
"append",
"(",
"f\"{k}=\\\\\\\"{{str(i.{k}).translate(str_escape)}}\\\\\\\"\"",
")",
"elif",
"t",
"is",
"ENUM",
":",
"fields",
".",
"append",
"(",
"f\"{k}=\\\\\\\"{{getattr(i.{k}, 'name', i.{k} or None)}}\\\\\\\"\"",
")",
"else",
":",
"raise",
"SchemaError",
"(",
"f\"Invalid attribute type {k!r}: {t!r}\"",
")",
"extra_tags",
"=",
"extra_tags",
"or",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"extra_tags",
".",
"items",
"(",
")",
":",
"tags",
".",
"append",
"(",
"f\"{k}={v}\"",
")",
"if",
"placeholder",
":",
"fields",
".",
"insert",
"(",
"0",
",",
"f\"_=true\"",
")",
"sep",
"=",
"','",
"if",
"tags",
"else",
"''",
"ts",
"=",
"f' {ts}'",
"if",
"ts",
"else",
"''",
"fmt",
"=",
"f\"{meas}{sep}{','.join(tags)} {','.join(fields)}{ts}\"",
"if",
"rm_none",
":",
"# Has substantial runtime impact. Best avoided if performance is critical.",
"# First field can't be removed.",
"pat",
"=",
"r',\\w+=\"?None\"?i?'",
"f",
"=",
"eval",
"(",
"'lambda i: re.sub(r\\'{}\\', \"\", f\"{}\").encode()'",
".",
"format",
"(",
"pat",
",",
"fmt",
")",
")",
"else",
":",
"f",
"=",
"eval",
"(",
"'lambda i: f\"{}\".encode()'",
".",
"format",
"(",
"fmt",
")",
")",
"f",
".",
"__doc__",
"=",
"\"Returns InfluxDB line protocol representation of user-defined class\"",
"f",
".",
"_args",
"=",
"dict",
"(",
"meas",
"=",
"meas",
",",
"schema",
"=",
"schema",
",",
"rm_none",
"=",
"rm_none",
",",
"extra_tags",
"=",
"extra_tags",
",",
"placeholder",
"=",
"placeholder",
")",
"return",
"f"
]
| Factory of line protocol parsers | [
"Factory",
"of",
"line",
"protocol",
"parsers"
]
| 2e4b7b3e13604e7618c686d89a0673f0bc70b24e | https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/serialization/usertype.py#L67-L122 | train |
gusutabopb/aioinflux | aioinflux/serialization/usertype.py | lineprotocol | def lineprotocol(
cls=None,
*,
schema: Optional[Mapping[str, type]] = None,
rm_none: bool = False,
extra_tags: Optional[Mapping[str, str]] = None,
placeholder: bool = False
):
"""Adds ``to_lineprotocol`` method to arbitrary user-defined classes
:param cls: Class to monkey-patch
:param schema: Schema dictionary (attr/type pairs).
:param rm_none: Whether apply a regex to remove ``None`` values.
If ``False``, passing ``None`` values to boolean, integer or float or time fields
will result in write errors. Setting to ``True`` is "safer" but impacts performance.
:param extra_tags: Hard coded tags to be added to every point generated.
:param placeholder: If no field attributes are present, add a placeholder attribute (``_``)
which is always equal to ``True``. This is a workaround for creating field-less points
(which is not supported natively by InfluxDB)
"""
def _lineprotocol(cls):
_schema = schema or getattr(cls, '__annotations__', {})
f = _make_serializer(cls.__name__, _schema, rm_none, extra_tags, placeholder)
cls.to_lineprotocol = f
return cls
return _lineprotocol(cls) if cls else _lineprotocol | python | def lineprotocol(
cls=None,
*,
schema: Optional[Mapping[str, type]] = None,
rm_none: bool = False,
extra_tags: Optional[Mapping[str, str]] = None,
placeholder: bool = False
):
"""Adds ``to_lineprotocol`` method to arbitrary user-defined classes
:param cls: Class to monkey-patch
:param schema: Schema dictionary (attr/type pairs).
:param rm_none: Whether apply a regex to remove ``None`` values.
If ``False``, passing ``None`` values to boolean, integer or float or time fields
will result in write errors. Setting to ``True`` is "safer" but impacts performance.
:param extra_tags: Hard coded tags to be added to every point generated.
:param placeholder: If no field attributes are present, add a placeholder attribute (``_``)
which is always equal to ``True``. This is a workaround for creating field-less points
(which is not supported natively by InfluxDB)
"""
def _lineprotocol(cls):
_schema = schema or getattr(cls, '__annotations__', {})
f = _make_serializer(cls.__name__, _schema, rm_none, extra_tags, placeholder)
cls.to_lineprotocol = f
return cls
return _lineprotocol(cls) if cls else _lineprotocol | [
"def",
"lineprotocol",
"(",
"cls",
"=",
"None",
",",
"*",
",",
"schema",
":",
"Optional",
"[",
"Mapping",
"[",
"str",
",",
"type",
"]",
"]",
"=",
"None",
",",
"rm_none",
":",
"bool",
"=",
"False",
",",
"extra_tags",
":",
"Optional",
"[",
"Mapping",
"[",
"str",
",",
"str",
"]",
"]",
"=",
"None",
",",
"placeholder",
":",
"bool",
"=",
"False",
")",
":",
"def",
"_lineprotocol",
"(",
"cls",
")",
":",
"_schema",
"=",
"schema",
"or",
"getattr",
"(",
"cls",
",",
"'__annotations__'",
",",
"{",
"}",
")",
"f",
"=",
"_make_serializer",
"(",
"cls",
".",
"__name__",
",",
"_schema",
",",
"rm_none",
",",
"extra_tags",
",",
"placeholder",
")",
"cls",
".",
"to_lineprotocol",
"=",
"f",
"return",
"cls",
"return",
"_lineprotocol",
"(",
"cls",
")",
"if",
"cls",
"else",
"_lineprotocol"
]
| Adds ``to_lineprotocol`` method to arbitrary user-defined classes
:param cls: Class to monkey-patch
:param schema: Schema dictionary (attr/type pairs).
:param rm_none: Whether apply a regex to remove ``None`` values.
If ``False``, passing ``None`` values to boolean, integer or float or time fields
will result in write errors. Setting to ``True`` is "safer" but impacts performance.
:param extra_tags: Hard coded tags to be added to every point generated.
:param placeholder: If no field attributes are present, add a placeholder attribute (``_``)
which is always equal to ``True``. This is a workaround for creating field-less points
(which is not supported natively by InfluxDB) | [
"Adds",
"to_lineprotocol",
"method",
"to",
"arbitrary",
"user",
"-",
"defined",
"classes"
]
| 2e4b7b3e13604e7618c686d89a0673f0bc70b24e | https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/serialization/usertype.py#L125-L152 | train |
gusutabopb/aioinflux | aioinflux/serialization/mapping.py | _serialize_fields | def _serialize_fields(point):
"""Field values can be floats, integers, strings, or Booleans."""
output = []
for k, v in point['fields'].items():
k = escape(k, key_escape)
if isinstance(v, bool):
output.append(f'{k}={v}')
elif isinstance(v, int):
output.append(f'{k}={v}i')
elif isinstance(v, str):
output.append(f'{k}="{v.translate(str_escape)}"')
elif v is None:
# Empty values
continue
else:
# Floats
output.append(f'{k}={v}')
return ','.join(output) | python | def _serialize_fields(point):
"""Field values can be floats, integers, strings, or Booleans."""
output = []
for k, v in point['fields'].items():
k = escape(k, key_escape)
if isinstance(v, bool):
output.append(f'{k}={v}')
elif isinstance(v, int):
output.append(f'{k}={v}i')
elif isinstance(v, str):
output.append(f'{k}="{v.translate(str_escape)}"')
elif v is None:
# Empty values
continue
else:
# Floats
output.append(f'{k}={v}')
return ','.join(output) | [
"def",
"_serialize_fields",
"(",
"point",
")",
":",
"output",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"point",
"[",
"'fields'",
"]",
".",
"items",
"(",
")",
":",
"k",
"=",
"escape",
"(",
"k",
",",
"key_escape",
")",
"if",
"isinstance",
"(",
"v",
",",
"bool",
")",
":",
"output",
".",
"append",
"(",
"f'{k}={v}'",
")",
"elif",
"isinstance",
"(",
"v",
",",
"int",
")",
":",
"output",
".",
"append",
"(",
"f'{k}={v}i'",
")",
"elif",
"isinstance",
"(",
"v",
",",
"str",
")",
":",
"output",
".",
"append",
"(",
"f'{k}=\"{v.translate(str_escape)}\"'",
")",
"elif",
"v",
"is",
"None",
":",
"# Empty values",
"continue",
"else",
":",
"# Floats",
"output",
".",
"append",
"(",
"f'{k}={v}'",
")",
"return",
"','",
".",
"join",
"(",
"output",
")"
]
| Field values can be floats, integers, strings, or Booleans. | [
"Field",
"values",
"can",
"be",
"floats",
"integers",
"strings",
"or",
"Booleans",
"."
]
| 2e4b7b3e13604e7618c686d89a0673f0bc70b24e | https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/serialization/mapping.py#L57-L74 | train |
gusutabopb/aioinflux | aioinflux/serialization/__init__.py | serialize | def serialize(data, measurement=None, tag_columns=None, **extra_tags):
"""Converts input data into line protocol format"""
if isinstance(data, bytes):
return data
elif isinstance(data, str):
return data.encode('utf-8')
elif hasattr(data, 'to_lineprotocol'):
return data.to_lineprotocol()
elif pd is not None and isinstance(data, pd.DataFrame):
return dataframe.serialize(data, measurement, tag_columns, **extra_tags)
elif isinstance(data, dict):
return mapping.serialize(data, measurement, **extra_tags)
elif hasattr(data, '__iter__'):
return b'\n'.join([serialize(i, measurement, tag_columns, **extra_tags) for i in data])
else:
raise ValueError('Invalid input', data) | python | def serialize(data, measurement=None, tag_columns=None, **extra_tags):
"""Converts input data into line protocol format"""
if isinstance(data, bytes):
return data
elif isinstance(data, str):
return data.encode('utf-8')
elif hasattr(data, 'to_lineprotocol'):
return data.to_lineprotocol()
elif pd is not None and isinstance(data, pd.DataFrame):
return dataframe.serialize(data, measurement, tag_columns, **extra_tags)
elif isinstance(data, dict):
return mapping.serialize(data, measurement, **extra_tags)
elif hasattr(data, '__iter__'):
return b'\n'.join([serialize(i, measurement, tag_columns, **extra_tags) for i in data])
else:
raise ValueError('Invalid input', data) | [
"def",
"serialize",
"(",
"data",
",",
"measurement",
"=",
"None",
",",
"tag_columns",
"=",
"None",
",",
"*",
"*",
"extra_tags",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"bytes",
")",
":",
"return",
"data",
"elif",
"isinstance",
"(",
"data",
",",
"str",
")",
":",
"return",
"data",
".",
"encode",
"(",
"'utf-8'",
")",
"elif",
"hasattr",
"(",
"data",
",",
"'to_lineprotocol'",
")",
":",
"return",
"data",
".",
"to_lineprotocol",
"(",
")",
"elif",
"pd",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"data",
",",
"pd",
".",
"DataFrame",
")",
":",
"return",
"dataframe",
".",
"serialize",
"(",
"data",
",",
"measurement",
",",
"tag_columns",
",",
"*",
"*",
"extra_tags",
")",
"elif",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"return",
"mapping",
".",
"serialize",
"(",
"data",
",",
"measurement",
",",
"*",
"*",
"extra_tags",
")",
"elif",
"hasattr",
"(",
"data",
",",
"'__iter__'",
")",
":",
"return",
"b'\\n'",
".",
"join",
"(",
"[",
"serialize",
"(",
"i",
",",
"measurement",
",",
"tag_columns",
",",
"*",
"*",
"extra_tags",
")",
"for",
"i",
"in",
"data",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid input'",
",",
"data",
")"
]
| Converts input data into line protocol format | [
"Converts",
"input",
"data",
"into",
"line",
"protocol",
"format"
]
| 2e4b7b3e13604e7618c686d89a0673f0bc70b24e | https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/serialization/__init__.py#L9-L24 | train |
gusutabopb/aioinflux | aioinflux/iterutils.py | iterpoints | def iterpoints(resp: dict, parser: Optional[Callable] = None) -> Iterator[Any]:
"""Iterates a response JSON yielding data point by point.
Can be used with both regular and chunked responses.
By default, returns just a plain list of values representing each point,
without column names, or other metadata.
In case a specific format is needed, an optional ``parser`` argument can be passed.
``parser`` is a function/callable that takes data point values
and, optionally, a ``meta`` parameter containing which takes a
dictionary containing all or a subset of the following:
``{'columns', 'name', 'tags', 'statement_id'}``.
Sample parser functions:
.. code:: python
# Function optional meta argument
def parser(*x, meta):
return dict(zip(meta['columns'], x))
# Namedtuple (callable)
from collections import namedtuple
parser = namedtuple('MyPoint', ['col1', 'col2', 'col3'])
:param resp: Dictionary containing parsed JSON (output from InfluxDBClient.query)
:param parser: Optional parser function/callable
:return: Generator object
"""
for statement in resp['results']:
if 'series' not in statement:
continue
for series in statement['series']:
if parser is None:
return (x for x in series['values'])
elif 'meta' in inspect.signature(parser).parameters:
meta = {k: series[k] for k in series if k != 'values'}
meta['statement_id'] = statement['statement_id']
return (parser(*x, meta=meta) for x in series['values'])
else:
return (parser(*x) for x in series['values'])
return iter([]) | python | def iterpoints(resp: dict, parser: Optional[Callable] = None) -> Iterator[Any]:
"""Iterates a response JSON yielding data point by point.
Can be used with both regular and chunked responses.
By default, returns just a plain list of values representing each point,
without column names, or other metadata.
In case a specific format is needed, an optional ``parser`` argument can be passed.
``parser`` is a function/callable that takes data point values
and, optionally, a ``meta`` parameter containing which takes a
dictionary containing all or a subset of the following:
``{'columns', 'name', 'tags', 'statement_id'}``.
Sample parser functions:
.. code:: python
# Function optional meta argument
def parser(*x, meta):
return dict(zip(meta['columns'], x))
# Namedtuple (callable)
from collections import namedtuple
parser = namedtuple('MyPoint', ['col1', 'col2', 'col3'])
:param resp: Dictionary containing parsed JSON (output from InfluxDBClient.query)
:param parser: Optional parser function/callable
:return: Generator object
"""
for statement in resp['results']:
if 'series' not in statement:
continue
for series in statement['series']:
if parser is None:
return (x for x in series['values'])
elif 'meta' in inspect.signature(parser).parameters:
meta = {k: series[k] for k in series if k != 'values'}
meta['statement_id'] = statement['statement_id']
return (parser(*x, meta=meta) for x in series['values'])
else:
return (parser(*x) for x in series['values'])
return iter([]) | [
"def",
"iterpoints",
"(",
"resp",
":",
"dict",
",",
"parser",
":",
"Optional",
"[",
"Callable",
"]",
"=",
"None",
")",
"->",
"Iterator",
"[",
"Any",
"]",
":",
"for",
"statement",
"in",
"resp",
"[",
"'results'",
"]",
":",
"if",
"'series'",
"not",
"in",
"statement",
":",
"continue",
"for",
"series",
"in",
"statement",
"[",
"'series'",
"]",
":",
"if",
"parser",
"is",
"None",
":",
"return",
"(",
"x",
"for",
"x",
"in",
"series",
"[",
"'values'",
"]",
")",
"elif",
"'meta'",
"in",
"inspect",
".",
"signature",
"(",
"parser",
")",
".",
"parameters",
":",
"meta",
"=",
"{",
"k",
":",
"series",
"[",
"k",
"]",
"for",
"k",
"in",
"series",
"if",
"k",
"!=",
"'values'",
"}",
"meta",
"[",
"'statement_id'",
"]",
"=",
"statement",
"[",
"'statement_id'",
"]",
"return",
"(",
"parser",
"(",
"*",
"x",
",",
"meta",
"=",
"meta",
")",
"for",
"x",
"in",
"series",
"[",
"'values'",
"]",
")",
"else",
":",
"return",
"(",
"parser",
"(",
"*",
"x",
")",
"for",
"x",
"in",
"series",
"[",
"'values'",
"]",
")",
"return",
"iter",
"(",
"[",
"]",
")"
]
| Iterates a response JSON yielding data point by point.
Can be used with both regular and chunked responses.
By default, returns just a plain list of values representing each point,
without column names, or other metadata.
In case a specific format is needed, an optional ``parser`` argument can be passed.
``parser`` is a function/callable that takes data point values
and, optionally, a ``meta`` parameter containing which takes a
dictionary containing all or a subset of the following:
``{'columns', 'name', 'tags', 'statement_id'}``.
Sample parser functions:
.. code:: python
# Function optional meta argument
def parser(*x, meta):
return dict(zip(meta['columns'], x))
# Namedtuple (callable)
from collections import namedtuple
parser = namedtuple('MyPoint', ['col1', 'col2', 'col3'])
:param resp: Dictionary containing parsed JSON (output from InfluxDBClient.query)
:param parser: Optional parser function/callable
:return: Generator object | [
"Iterates",
"a",
"response",
"JSON",
"yielding",
"data",
"point",
"by",
"point",
"."
]
| 2e4b7b3e13604e7618c686d89a0673f0bc70b24e | https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/iterutils.py#L6-L48 | train |
gusutabopb/aioinflux | aioinflux/serialization/dataframe.py | parse | def parse(resp) -> DataFrameType:
"""Makes a dictionary of DataFrames from a response object"""
statements = []
for statement in resp['results']:
series = {}
for s in statement.get('series', []):
series[_get_name(s)] = _drop_zero_index(_serializer(s))
statements.append(series)
if len(statements) == 1:
series: dict = statements[0]
if len(series) == 1:
return list(series.values())[0] # DataFrame
else:
return series # dict
return statements | python | def parse(resp) -> DataFrameType:
"""Makes a dictionary of DataFrames from a response object"""
statements = []
for statement in resp['results']:
series = {}
for s in statement.get('series', []):
series[_get_name(s)] = _drop_zero_index(_serializer(s))
statements.append(series)
if len(statements) == 1:
series: dict = statements[0]
if len(series) == 1:
return list(series.values())[0] # DataFrame
else:
return series # dict
return statements | [
"def",
"parse",
"(",
"resp",
")",
"->",
"DataFrameType",
":",
"statements",
"=",
"[",
"]",
"for",
"statement",
"in",
"resp",
"[",
"'results'",
"]",
":",
"series",
"=",
"{",
"}",
"for",
"s",
"in",
"statement",
".",
"get",
"(",
"'series'",
",",
"[",
"]",
")",
":",
"series",
"[",
"_get_name",
"(",
"s",
")",
"]",
"=",
"_drop_zero_index",
"(",
"_serializer",
"(",
"s",
")",
")",
"statements",
".",
"append",
"(",
"series",
")",
"if",
"len",
"(",
"statements",
")",
"==",
"1",
":",
"series",
":",
"dict",
"=",
"statements",
"[",
"0",
"]",
"if",
"len",
"(",
"series",
")",
"==",
"1",
":",
"return",
"list",
"(",
"series",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"# DataFrame",
"else",
":",
"return",
"series",
"# dict",
"return",
"statements"
]
| Makes a dictionary of DataFrames from a response object | [
"Makes",
"a",
"dictionary",
"of",
"DataFrames",
"from",
"a",
"response",
"object"
]
| 2e4b7b3e13604e7618c686d89a0673f0bc70b24e | https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/serialization/dataframe.py#L44-L59 | train |
gusutabopb/aioinflux | aioinflux/serialization/dataframe.py | _itertuples | def _itertuples(df):
"""Custom implementation of ``DataFrame.itertuples`` that
returns plain tuples instead of namedtuples. About 50% faster.
"""
cols = [df.iloc[:, k] for k in range(len(df.columns))]
return zip(df.index, *cols) | python | def _itertuples(df):
"""Custom implementation of ``DataFrame.itertuples`` that
returns plain tuples instead of namedtuples. About 50% faster.
"""
cols = [df.iloc[:, k] for k in range(len(df.columns))]
return zip(df.index, *cols) | [
"def",
"_itertuples",
"(",
"df",
")",
":",
"cols",
"=",
"[",
"df",
".",
"iloc",
"[",
":",
",",
"k",
"]",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"df",
".",
"columns",
")",
")",
"]",
"return",
"zip",
"(",
"df",
".",
"index",
",",
"*",
"cols",
")"
]
| Custom implementation of ``DataFrame.itertuples`` that
returns plain tuples instead of namedtuples. About 50% faster. | [
"Custom",
"implementation",
"of",
"DataFrame",
".",
"itertuples",
"that",
"returns",
"plain",
"tuples",
"instead",
"of",
"namedtuples",
".",
"About",
"50%",
"faster",
"."
]
| 2e4b7b3e13604e7618c686d89a0673f0bc70b24e | https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/serialization/dataframe.py#L65-L70 | train |
gusutabopb/aioinflux | aioinflux/serialization/dataframe.py | serialize | def serialize(df, measurement, tag_columns=None, **extra_tags) -> bytes:
"""Converts a Pandas DataFrame into line protocol format"""
# Pre-processing
if measurement is None:
raise ValueError("Missing 'measurement'")
if not isinstance(df.index, pd.DatetimeIndex):
raise ValueError('DataFrame index is not DatetimeIndex')
tag_columns = set(tag_columns or [])
isnull = df.isnull().any(axis=1)
# Make parser function
tags = []
fields = []
for k, v in extra_tags.items():
tags.append(f"{k}={escape(v, key_escape)}")
for i, (k, v) in enumerate(df.dtypes.items()):
k = k.translate(key_escape)
if k in tag_columns:
tags.append(f"{k}={{p[{i+1}]}}")
elif issubclass(v.type, np.integer):
fields.append(f"{k}={{p[{i+1}]}}i")
elif issubclass(v.type, (np.float, np.bool_)):
fields.append(f"{k}={{p[{i+1}]}}")
else:
# String escaping is skipped for performance reasons
# Strings containing double-quotes can cause strange write errors
# and should be sanitized by the user.
# e.g., df[k] = df[k].astype('str').str.translate(str_escape)
fields.append(f"{k}=\"{{p[{i+1}]}}\"")
fmt = (f'{measurement}', f'{"," if tags else ""}', ','.join(tags),
' ', ','.join(fields), ' {p[0].value}')
f = eval("lambda p: f'{}'".format(''.join(fmt)))
# Map/concat
if isnull.any():
lp = map(f, _itertuples(df[~isnull]))
rep = _replace(df)
lp_nan = (reduce(lambda a, b: re.sub(*b, a), rep, f(p))
for p in _itertuples(df[isnull]))
return '\n'.join(chain(lp, lp_nan)).encode('utf-8')
else:
return '\n'.join(map(f, _itertuples(df))).encode('utf-8') | python | def serialize(df, measurement, tag_columns=None, **extra_tags) -> bytes:
"""Converts a Pandas DataFrame into line protocol format"""
# Pre-processing
if measurement is None:
raise ValueError("Missing 'measurement'")
if not isinstance(df.index, pd.DatetimeIndex):
raise ValueError('DataFrame index is not DatetimeIndex')
tag_columns = set(tag_columns or [])
isnull = df.isnull().any(axis=1)
# Make parser function
tags = []
fields = []
for k, v in extra_tags.items():
tags.append(f"{k}={escape(v, key_escape)}")
for i, (k, v) in enumerate(df.dtypes.items()):
k = k.translate(key_escape)
if k in tag_columns:
tags.append(f"{k}={{p[{i+1}]}}")
elif issubclass(v.type, np.integer):
fields.append(f"{k}={{p[{i+1}]}}i")
elif issubclass(v.type, (np.float, np.bool_)):
fields.append(f"{k}={{p[{i+1}]}}")
else:
# String escaping is skipped for performance reasons
# Strings containing double-quotes can cause strange write errors
# and should be sanitized by the user.
# e.g., df[k] = df[k].astype('str').str.translate(str_escape)
fields.append(f"{k}=\"{{p[{i+1}]}}\"")
fmt = (f'{measurement}', f'{"," if tags else ""}', ','.join(tags),
' ', ','.join(fields), ' {p[0].value}')
f = eval("lambda p: f'{}'".format(''.join(fmt)))
# Map/concat
if isnull.any():
lp = map(f, _itertuples(df[~isnull]))
rep = _replace(df)
lp_nan = (reduce(lambda a, b: re.sub(*b, a), rep, f(p))
for p in _itertuples(df[isnull]))
return '\n'.join(chain(lp, lp_nan)).encode('utf-8')
else:
return '\n'.join(map(f, _itertuples(df))).encode('utf-8') | [
"def",
"serialize",
"(",
"df",
",",
"measurement",
",",
"tag_columns",
"=",
"None",
",",
"*",
"*",
"extra_tags",
")",
"->",
"bytes",
":",
"# Pre-processing",
"if",
"measurement",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Missing 'measurement'\"",
")",
"if",
"not",
"isinstance",
"(",
"df",
".",
"index",
",",
"pd",
".",
"DatetimeIndex",
")",
":",
"raise",
"ValueError",
"(",
"'DataFrame index is not DatetimeIndex'",
")",
"tag_columns",
"=",
"set",
"(",
"tag_columns",
"or",
"[",
"]",
")",
"isnull",
"=",
"df",
".",
"isnull",
"(",
")",
".",
"any",
"(",
"axis",
"=",
"1",
")",
"# Make parser function",
"tags",
"=",
"[",
"]",
"fields",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"extra_tags",
".",
"items",
"(",
")",
":",
"tags",
".",
"append",
"(",
"f\"{k}={escape(v, key_escape)}\"",
")",
"for",
"i",
",",
"(",
"k",
",",
"v",
")",
"in",
"enumerate",
"(",
"df",
".",
"dtypes",
".",
"items",
"(",
")",
")",
":",
"k",
"=",
"k",
".",
"translate",
"(",
"key_escape",
")",
"if",
"k",
"in",
"tag_columns",
":",
"tags",
".",
"append",
"(",
"f\"{k}={{p[{i+1}]}}\"",
")",
"elif",
"issubclass",
"(",
"v",
".",
"type",
",",
"np",
".",
"integer",
")",
":",
"fields",
".",
"append",
"(",
"f\"{k}={{p[{i+1}]}}i\"",
")",
"elif",
"issubclass",
"(",
"v",
".",
"type",
",",
"(",
"np",
".",
"float",
",",
"np",
".",
"bool_",
")",
")",
":",
"fields",
".",
"append",
"(",
"f\"{k}={{p[{i+1}]}}\"",
")",
"else",
":",
"# String escaping is skipped for performance reasons",
"# Strings containing double-quotes can cause strange write errors",
"# and should be sanitized by the user.",
"# e.g., df[k] = df[k].astype('str').str.translate(str_escape)",
"fields",
".",
"append",
"(",
"f\"{k}=\\\"{{p[{i+1}]}}\\\"\"",
")",
"fmt",
"=",
"(",
"f'{measurement}'",
",",
"f'{\",\" if tags else \"\"}'",
",",
"','",
".",
"join",
"(",
"tags",
")",
",",
"' '",
",",
"','",
".",
"join",
"(",
"fields",
")",
",",
"' {p[0].value}'",
")",
"f",
"=",
"eval",
"(",
"\"lambda p: f'{}'\"",
".",
"format",
"(",
"''",
".",
"join",
"(",
"fmt",
")",
")",
")",
"# Map/concat",
"if",
"isnull",
".",
"any",
"(",
")",
":",
"lp",
"=",
"map",
"(",
"f",
",",
"_itertuples",
"(",
"df",
"[",
"~",
"isnull",
"]",
")",
")",
"rep",
"=",
"_replace",
"(",
"df",
")",
"lp_nan",
"=",
"(",
"reduce",
"(",
"lambda",
"a",
",",
"b",
":",
"re",
".",
"sub",
"(",
"*",
"b",
",",
"a",
")",
",",
"rep",
",",
"f",
"(",
"p",
")",
")",
"for",
"p",
"in",
"_itertuples",
"(",
"df",
"[",
"isnull",
"]",
")",
")",
"return",
"'\\n'",
".",
"join",
"(",
"chain",
"(",
"lp",
",",
"lp_nan",
")",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"else",
":",
"return",
"'\\n'",
".",
"join",
"(",
"map",
"(",
"f",
",",
"_itertuples",
"(",
"df",
")",
")",
")",
".",
"encode",
"(",
"'utf-8'",
")"
]
| Converts a Pandas DataFrame into line protocol format | [
"Converts",
"a",
"Pandas",
"DataFrame",
"into",
"line",
"protocol",
"format"
]
| 2e4b7b3e13604e7618c686d89a0673f0bc70b24e | https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/serialization/dataframe.py#L86-L127 | train |
gusutabopb/aioinflux | aioinflux/client.py | runner | def runner(coro):
"""Function execution decorator."""
@wraps(coro)
def inner(self, *args, **kwargs):
if self.mode == 'async':
return coro(self, *args, **kwargs)
return self._loop.run_until_complete(coro(self, *args, **kwargs))
return inner | python | def runner(coro):
"""Function execution decorator."""
@wraps(coro)
def inner(self, *args, **kwargs):
if self.mode == 'async':
return coro(self, *args, **kwargs)
return self._loop.run_until_complete(coro(self, *args, **kwargs))
return inner | [
"def",
"runner",
"(",
"coro",
")",
":",
"@",
"wraps",
"(",
"coro",
")",
"def",
"inner",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"mode",
"==",
"'async'",
":",
"return",
"coro",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"_loop",
".",
"run_until_complete",
"(",
"coro",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
"inner"
]
| Function execution decorator. | [
"Function",
"execution",
"decorator",
"."
]
| 2e4b7b3e13604e7618c686d89a0673f0bc70b24e | https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/client.py#L25-L34 | train |
gusutabopb/aioinflux | aioinflux/client.py | InfluxDBClient._check_error | def _check_error(response):
"""Checks for JSON error messages and raises Python exception"""
if 'error' in response:
raise InfluxDBError(response['error'])
elif 'results' in response:
for statement in response['results']:
if 'error' in statement:
msg = '{d[error]} (statement {d[statement_id]})'
raise InfluxDBError(msg.format(d=statement)) | python | def _check_error(response):
"""Checks for JSON error messages and raises Python exception"""
if 'error' in response:
raise InfluxDBError(response['error'])
elif 'results' in response:
for statement in response['results']:
if 'error' in statement:
msg = '{d[error]} (statement {d[statement_id]})'
raise InfluxDBError(msg.format(d=statement)) | [
"def",
"_check_error",
"(",
"response",
")",
":",
"if",
"'error'",
"in",
"response",
":",
"raise",
"InfluxDBError",
"(",
"response",
"[",
"'error'",
"]",
")",
"elif",
"'results'",
"in",
"response",
":",
"for",
"statement",
"in",
"response",
"[",
"'results'",
"]",
":",
"if",
"'error'",
"in",
"statement",
":",
"msg",
"=",
"'{d[error]} (statement {d[statement_id]})'",
"raise",
"InfluxDBError",
"(",
"msg",
".",
"format",
"(",
"d",
"=",
"statement",
")",
")"
]
| Checks for JSON error messages and raises Python exception | [
"Checks",
"for",
"JSON",
"error",
"messages",
"and",
"raises",
"Python",
"exception"
]
| 2e4b7b3e13604e7618c686d89a0673f0bc70b24e | https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/client.py#L384-L392 | train |
remcohaszing/pywakeonlan | wakeonlan.py | create_magic_packet | def create_magic_packet(macaddress):
"""
Create a magic packet.
A magic packet is a packet that can be used with the for wake on lan
protocol to wake up a computer. The packet is constructed from the
mac address given as a parameter.
Args:
macaddress (str): the mac address that should be parsed into a
magic packet.
"""
if len(macaddress) == 12:
pass
elif len(macaddress) == 17:
sep = macaddress[2]
macaddress = macaddress.replace(sep, '')
else:
raise ValueError('Incorrect MAC address format')
# Pad the synchronization stream
data = b'FFFFFFFFFFFF' + (macaddress * 16).encode()
send_data = b''
# Split up the hex values in pack
for i in range(0, len(data), 2):
send_data += struct.pack(b'B', int(data[i: i + 2], 16))
return send_data | python | def create_magic_packet(macaddress):
"""
Create a magic packet.
A magic packet is a packet that can be used with the for wake on lan
protocol to wake up a computer. The packet is constructed from the
mac address given as a parameter.
Args:
macaddress (str): the mac address that should be parsed into a
magic packet.
"""
if len(macaddress) == 12:
pass
elif len(macaddress) == 17:
sep = macaddress[2]
macaddress = macaddress.replace(sep, '')
else:
raise ValueError('Incorrect MAC address format')
# Pad the synchronization stream
data = b'FFFFFFFFFFFF' + (macaddress * 16).encode()
send_data = b''
# Split up the hex values in pack
for i in range(0, len(data), 2):
send_data += struct.pack(b'B', int(data[i: i + 2], 16))
return send_data | [
"def",
"create_magic_packet",
"(",
"macaddress",
")",
":",
"if",
"len",
"(",
"macaddress",
")",
"==",
"12",
":",
"pass",
"elif",
"len",
"(",
"macaddress",
")",
"==",
"17",
":",
"sep",
"=",
"macaddress",
"[",
"2",
"]",
"macaddress",
"=",
"macaddress",
".",
"replace",
"(",
"sep",
",",
"''",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Incorrect MAC address format'",
")",
"# Pad the synchronization stream",
"data",
"=",
"b'FFFFFFFFFFFF'",
"+",
"(",
"macaddress",
"*",
"16",
")",
".",
"encode",
"(",
")",
"send_data",
"=",
"b''",
"# Split up the hex values in pack",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"data",
")",
",",
"2",
")",
":",
"send_data",
"+=",
"struct",
".",
"pack",
"(",
"b'B'",
",",
"int",
"(",
"data",
"[",
"i",
":",
"i",
"+",
"2",
"]",
",",
"16",
")",
")",
"return",
"send_data"
]
| Create a magic packet.
A magic packet is a packet that can be used with the for wake on lan
protocol to wake up a computer. The packet is constructed from the
mac address given as a parameter.
Args:
macaddress (str): the mac address that should be parsed into a
magic packet. | [
"Create",
"a",
"magic",
"packet",
"."
]
| d30b66172c483c4baadb426f493c3de30fecc19b | https://github.com/remcohaszing/pywakeonlan/blob/d30b66172c483c4baadb426f493c3de30fecc19b/wakeonlan.py#L19-L47 | train |
remcohaszing/pywakeonlan | wakeonlan.py | send_magic_packet | def send_magic_packet(*macs, **kwargs):
"""
Wake up computers having any of the given mac addresses.
Wake on lan must be enabled on the host device.
Args:
macs (str): One or more macaddresses of machines to wake.
Keyword Args:
ip_address (str): the ip address of the host to send the magic packet
to (default "255.255.255.255")
port (int): the port of the host to send the magic packet to
(default 9)
"""
packets = []
ip = kwargs.pop('ip_address', BROADCAST_IP)
port = kwargs.pop('port', DEFAULT_PORT)
for k in kwargs:
raise TypeError('send_magic_packet() got an unexpected keyword '
'argument {!r}'.format(k))
for mac in macs:
packet = create_magic_packet(mac)
packets.append(packet)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.connect((ip, port))
for packet in packets:
sock.send(packet)
sock.close() | python | def send_magic_packet(*macs, **kwargs):
"""
Wake up computers having any of the given mac addresses.
Wake on lan must be enabled on the host device.
Args:
macs (str): One or more macaddresses of machines to wake.
Keyword Args:
ip_address (str): the ip address of the host to send the magic packet
to (default "255.255.255.255")
port (int): the port of the host to send the magic packet to
(default 9)
"""
packets = []
ip = kwargs.pop('ip_address', BROADCAST_IP)
port = kwargs.pop('port', DEFAULT_PORT)
for k in kwargs:
raise TypeError('send_magic_packet() got an unexpected keyword '
'argument {!r}'.format(k))
for mac in macs:
packet = create_magic_packet(mac)
packets.append(packet)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.connect((ip, port))
for packet in packets:
sock.send(packet)
sock.close() | [
"def",
"send_magic_packet",
"(",
"*",
"macs",
",",
"*",
"*",
"kwargs",
")",
":",
"packets",
"=",
"[",
"]",
"ip",
"=",
"kwargs",
".",
"pop",
"(",
"'ip_address'",
",",
"BROADCAST_IP",
")",
"port",
"=",
"kwargs",
".",
"pop",
"(",
"'port'",
",",
"DEFAULT_PORT",
")",
"for",
"k",
"in",
"kwargs",
":",
"raise",
"TypeError",
"(",
"'send_magic_packet() got an unexpected keyword '",
"'argument {!r}'",
".",
"format",
"(",
"k",
")",
")",
"for",
"mac",
"in",
"macs",
":",
"packet",
"=",
"create_magic_packet",
"(",
"mac",
")",
"packets",
".",
"append",
"(",
"packet",
")",
"sock",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_DGRAM",
")",
"sock",
".",
"setsockopt",
"(",
"socket",
".",
"SOL_SOCKET",
",",
"socket",
".",
"SO_BROADCAST",
",",
"1",
")",
"sock",
".",
"connect",
"(",
"(",
"ip",
",",
"port",
")",
")",
"for",
"packet",
"in",
"packets",
":",
"sock",
".",
"send",
"(",
"packet",
")",
"sock",
".",
"close",
"(",
")"
]
| Wake up computers having any of the given mac addresses.
Wake on lan must be enabled on the host device.
Args:
macs (str): One or more macaddresses of machines to wake.
Keyword Args:
ip_address (str): the ip address of the host to send the magic packet
to (default "255.255.255.255")
port (int): the port of the host to send the magic packet to
(default 9) | [
"Wake",
"up",
"computers",
"having",
"any",
"of",
"the",
"given",
"mac",
"addresses",
"."
]
| d30b66172c483c4baadb426f493c3de30fecc19b | https://github.com/remcohaszing/pywakeonlan/blob/d30b66172c483c4baadb426f493c3de30fecc19b/wakeonlan.py#L50-L82 | train |
remcohaszing/pywakeonlan | wakeonlan.py | main | def main(argv=None):
"""
Run wake on lan as a CLI application.
"""
parser = argparse.ArgumentParser(
description='Wake one or more computers using the wake on lan'
' protocol.')
parser.add_argument(
'macs',
metavar='mac address',
nargs='+',
help='The mac addresses or of the computers you are trying to wake.')
parser.add_argument(
'-i',
metavar='ip',
default=BROADCAST_IP,
help='The ip address of the host to send the magic packet to.'
' (default {})'.format(BROADCAST_IP))
parser.add_argument(
'-p',
metavar='port',
type=int,
default=DEFAULT_PORT,
help='The port of the host to send the magic packet to (default 9)')
args = parser.parse_args(argv)
send_magic_packet(*args.macs, ip_address=args.i, port=args.p) | python | def main(argv=None):
"""
Run wake on lan as a CLI application.
"""
parser = argparse.ArgumentParser(
description='Wake one or more computers using the wake on lan'
' protocol.')
parser.add_argument(
'macs',
metavar='mac address',
nargs='+',
help='The mac addresses or of the computers you are trying to wake.')
parser.add_argument(
'-i',
metavar='ip',
default=BROADCAST_IP,
help='The ip address of the host to send the magic packet to.'
' (default {})'.format(BROADCAST_IP))
parser.add_argument(
'-p',
metavar='port',
type=int,
default=DEFAULT_PORT,
help='The port of the host to send the magic packet to (default 9)')
args = parser.parse_args(argv)
send_magic_packet(*args.macs, ip_address=args.i, port=args.p) | [
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Wake one or more computers using the wake on lan'",
"' protocol.'",
")",
"parser",
".",
"add_argument",
"(",
"'macs'",
",",
"metavar",
"=",
"'mac address'",
",",
"nargs",
"=",
"'+'",
",",
"help",
"=",
"'The mac addresses or of the computers you are trying to wake.'",
")",
"parser",
".",
"add_argument",
"(",
"'-i'",
",",
"metavar",
"=",
"'ip'",
",",
"default",
"=",
"BROADCAST_IP",
",",
"help",
"=",
"'The ip address of the host to send the magic packet to.'",
"' (default {})'",
".",
"format",
"(",
"BROADCAST_IP",
")",
")",
"parser",
".",
"add_argument",
"(",
"'-p'",
",",
"metavar",
"=",
"'port'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"DEFAULT_PORT",
",",
"help",
"=",
"'The port of the host to send the magic packet to (default 9)'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"argv",
")",
"send_magic_packet",
"(",
"*",
"args",
".",
"macs",
",",
"ip_address",
"=",
"args",
".",
"i",
",",
"port",
"=",
"args",
".",
"p",
")"
]
| Run wake on lan as a CLI application. | [
"Run",
"wake",
"on",
"lan",
"as",
"a",
"CLI",
"application",
"."
]
| d30b66172c483c4baadb426f493c3de30fecc19b | https://github.com/remcohaszing/pywakeonlan/blob/d30b66172c483c4baadb426f493c3de30fecc19b/wakeonlan.py#L85-L111 | train |
liminspace/django-mjml | mjml/templatetags/mjml.py | mjml | def mjml(parser, token):
"""
Compile MJML template after render django template.
Usage:
{% mjml %}
.. MJML template code ..
{% endmjml %}
"""
nodelist = parser.parse(('endmjml',))
parser.delete_first_token()
tokens = token.split_contents()
if len(tokens) != 1:
raise template.TemplateSyntaxError("'%r' tag doesn't receive any arguments." % tokens[0])
return MJMLRenderNode(nodelist) | python | def mjml(parser, token):
"""
Compile MJML template after render django template.
Usage:
{% mjml %}
.. MJML template code ..
{% endmjml %}
"""
nodelist = parser.parse(('endmjml',))
parser.delete_first_token()
tokens = token.split_contents()
if len(tokens) != 1:
raise template.TemplateSyntaxError("'%r' tag doesn't receive any arguments." % tokens[0])
return MJMLRenderNode(nodelist) | [
"def",
"mjml",
"(",
"parser",
",",
"token",
")",
":",
"nodelist",
"=",
"parser",
".",
"parse",
"(",
"(",
"'endmjml'",
",",
")",
")",
"parser",
".",
"delete_first_token",
"(",
")",
"tokens",
"=",
"token",
".",
"split_contents",
"(",
")",
"if",
"len",
"(",
"tokens",
")",
"!=",
"1",
":",
"raise",
"template",
".",
"TemplateSyntaxError",
"(",
"\"'%r' tag doesn't receive any arguments.\"",
"%",
"tokens",
"[",
"0",
"]",
")",
"return",
"MJMLRenderNode",
"(",
"nodelist",
")"
]
| Compile MJML template after render django template.
Usage:
{% mjml %}
.. MJML template code ..
{% endmjml %} | [
"Compile",
"MJML",
"template",
"after",
"render",
"django",
"template",
"."
]
| 6f3e5959ccd35d1b2bcebc6f892a9400294736fb | https://github.com/liminspace/django-mjml/blob/6f3e5959ccd35d1b2bcebc6f892a9400294736fb/mjml/templatetags/mjml.py#L18-L32 | train |
moonso/vcf_parser | vcf_parser/header_parser.py | HeaderParser.parse_header_line | def parse_header_line(self, line):
"""docstring for parse_header_line"""
self.header = line[1:].rstrip().split('\t')
if len(self.header) < 9:
self.header = line[1:].rstrip().split()
self.individuals = self.header[9:] | python | def parse_header_line(self, line):
"""docstring for parse_header_line"""
self.header = line[1:].rstrip().split('\t')
if len(self.header) < 9:
self.header = line[1:].rstrip().split()
self.individuals = self.header[9:] | [
"def",
"parse_header_line",
"(",
"self",
",",
"line",
")",
":",
"self",
".",
"header",
"=",
"line",
"[",
"1",
":",
"]",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"if",
"len",
"(",
"self",
".",
"header",
")",
"<",
"9",
":",
"self",
".",
"header",
"=",
"line",
"[",
"1",
":",
"]",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
")",
"self",
".",
"individuals",
"=",
"self",
".",
"header",
"[",
"9",
":",
"]"
]
| docstring for parse_header_line | [
"docstring",
"for",
"parse_header_line"
]
| 8e2b6724e31995e0d43af501f25974310c6b843b | https://github.com/moonso/vcf_parser/blob/8e2b6724e31995e0d43af501f25974310c6b843b/vcf_parser/header_parser.py#L178-L183 | train |
moonso/vcf_parser | vcf_parser/header_parser.py | HeaderParser.print_header | def print_header(self):
"""Returns a list with the header lines if proper format"""
lines_to_print = []
lines_to_print.append('##fileformat='+self.fileformat)
if self.filedate:
lines_to_print.append('##fileformat='+self.fileformat)
for filt in self.filter_dict:
lines_to_print.append(self.filter_dict[filt])
for form in self.format_dict:
lines_to_print.append(self.format_dict[form])
for info in self.info_dict:
lines_to_print.append(self.info_dict[info])
for contig in self.contig_dict:
lines_to_print.append(self.contig_dict[contig])
for alt in self.alt_dict:
lines_to_print.append(self.alt_dict[alt])
for other in self.other_dict:
lines_to_print.append(self.other_dict[other])
lines_to_print.append('#'+ '\t'.join(self.header))
return lines_to_print | python | def print_header(self):
"""Returns a list with the header lines if proper format"""
lines_to_print = []
lines_to_print.append('##fileformat='+self.fileformat)
if self.filedate:
lines_to_print.append('##fileformat='+self.fileformat)
for filt in self.filter_dict:
lines_to_print.append(self.filter_dict[filt])
for form in self.format_dict:
lines_to_print.append(self.format_dict[form])
for info in self.info_dict:
lines_to_print.append(self.info_dict[info])
for contig in self.contig_dict:
lines_to_print.append(self.contig_dict[contig])
for alt in self.alt_dict:
lines_to_print.append(self.alt_dict[alt])
for other in self.other_dict:
lines_to_print.append(self.other_dict[other])
lines_to_print.append('#'+ '\t'.join(self.header))
return lines_to_print | [
"def",
"print_header",
"(",
"self",
")",
":",
"lines_to_print",
"=",
"[",
"]",
"lines_to_print",
".",
"append",
"(",
"'##fileformat='",
"+",
"self",
".",
"fileformat",
")",
"if",
"self",
".",
"filedate",
":",
"lines_to_print",
".",
"append",
"(",
"'##fileformat='",
"+",
"self",
".",
"fileformat",
")",
"for",
"filt",
"in",
"self",
".",
"filter_dict",
":",
"lines_to_print",
".",
"append",
"(",
"self",
".",
"filter_dict",
"[",
"filt",
"]",
")",
"for",
"form",
"in",
"self",
".",
"format_dict",
":",
"lines_to_print",
".",
"append",
"(",
"self",
".",
"format_dict",
"[",
"form",
"]",
")",
"for",
"info",
"in",
"self",
".",
"info_dict",
":",
"lines_to_print",
".",
"append",
"(",
"self",
".",
"info_dict",
"[",
"info",
"]",
")",
"for",
"contig",
"in",
"self",
".",
"contig_dict",
":",
"lines_to_print",
".",
"append",
"(",
"self",
".",
"contig_dict",
"[",
"contig",
"]",
")",
"for",
"alt",
"in",
"self",
".",
"alt_dict",
":",
"lines_to_print",
".",
"append",
"(",
"self",
".",
"alt_dict",
"[",
"alt",
"]",
")",
"for",
"other",
"in",
"self",
".",
"other_dict",
":",
"lines_to_print",
".",
"append",
"(",
"self",
".",
"other_dict",
"[",
"other",
"]",
")",
"lines_to_print",
".",
"append",
"(",
"'#'",
"+",
"'\\t'",
".",
"join",
"(",
"self",
".",
"header",
")",
")",
"return",
"lines_to_print"
]
| Returns a list with the header lines if proper format | [
"Returns",
"a",
"list",
"with",
"the",
"header",
"lines",
"if",
"proper",
"format"
]
| 8e2b6724e31995e0d43af501f25974310c6b843b | https://github.com/moonso/vcf_parser/blob/8e2b6724e31995e0d43af501f25974310c6b843b/vcf_parser/header_parser.py#L185-L205 | train |
moonso/vcf_parser | vcf_parser/parser.py | VCFParser.add_variant | def add_variant(self, chrom, pos, rs_id, ref, alt, qual, filt, info, form=None, genotypes=[]):
"""
Add a variant to the parser.
This function is for building a vcf. It takes the relevant parameters
and make a vcf variant in the proper format.
"""
variant_info = [chrom, pos, rs_id, ref, alt, qual, filt, info]
if form:
variant_info.append(form)
for individual in genotypes:
variant_info.append(individual)
variant_line = '\t'.join(variant_info)
variant = format_variant(
line = variant_line,
header_parser = self.metadata,
check_info = self.check_info
)
if not (self.split_variants and len(variant['ALT'].split(',')) > 1):
self.variants.append(variant)
# If multiple alternative and split_variants we must split the variant
else:
for splitted_variant in split_variants(
variant_dict=variant,
header_parser=self.metadata,
allele_symbol=self.allele_symbol):
self.variants.append(splitted_variant) | python | def add_variant(self, chrom, pos, rs_id, ref, alt, qual, filt, info, form=None, genotypes=[]):
"""
Add a variant to the parser.
This function is for building a vcf. It takes the relevant parameters
and make a vcf variant in the proper format.
"""
variant_info = [chrom, pos, rs_id, ref, alt, qual, filt, info]
if form:
variant_info.append(form)
for individual in genotypes:
variant_info.append(individual)
variant_line = '\t'.join(variant_info)
variant = format_variant(
line = variant_line,
header_parser = self.metadata,
check_info = self.check_info
)
if not (self.split_variants and len(variant['ALT'].split(',')) > 1):
self.variants.append(variant)
# If multiple alternative and split_variants we must split the variant
else:
for splitted_variant in split_variants(
variant_dict=variant,
header_parser=self.metadata,
allele_symbol=self.allele_symbol):
self.variants.append(splitted_variant) | [
"def",
"add_variant",
"(",
"self",
",",
"chrom",
",",
"pos",
",",
"rs_id",
",",
"ref",
",",
"alt",
",",
"qual",
",",
"filt",
",",
"info",
",",
"form",
"=",
"None",
",",
"genotypes",
"=",
"[",
"]",
")",
":",
"variant_info",
"=",
"[",
"chrom",
",",
"pos",
",",
"rs_id",
",",
"ref",
",",
"alt",
",",
"qual",
",",
"filt",
",",
"info",
"]",
"if",
"form",
":",
"variant_info",
".",
"append",
"(",
"form",
")",
"for",
"individual",
"in",
"genotypes",
":",
"variant_info",
".",
"append",
"(",
"individual",
")",
"variant_line",
"=",
"'\\t'",
".",
"join",
"(",
"variant_info",
")",
"variant",
"=",
"format_variant",
"(",
"line",
"=",
"variant_line",
",",
"header_parser",
"=",
"self",
".",
"metadata",
",",
"check_info",
"=",
"self",
".",
"check_info",
")",
"if",
"not",
"(",
"self",
".",
"split_variants",
"and",
"len",
"(",
"variant",
"[",
"'ALT'",
"]",
".",
"split",
"(",
"','",
")",
")",
">",
"1",
")",
":",
"self",
".",
"variants",
".",
"append",
"(",
"variant",
")",
"# If multiple alternative and split_variants we must split the variant ",
"else",
":",
"for",
"splitted_variant",
"in",
"split_variants",
"(",
"variant_dict",
"=",
"variant",
",",
"header_parser",
"=",
"self",
".",
"metadata",
",",
"allele_symbol",
"=",
"self",
".",
"allele_symbol",
")",
":",
"self",
".",
"variants",
".",
"append",
"(",
"splitted_variant",
")"
]
| Add a variant to the parser.
This function is for building a vcf. It takes the relevant parameters
and make a vcf variant in the proper format. | [
"Add",
"a",
"variant",
"to",
"the",
"parser",
".",
"This",
"function",
"is",
"for",
"building",
"a",
"vcf",
".",
"It",
"takes",
"the",
"relevant",
"parameters",
"and",
"make",
"a",
"vcf",
"variant",
"in",
"the",
"proper",
"format",
"."
]
| 8e2b6724e31995e0d43af501f25974310c6b843b | https://github.com/moonso/vcf_parser/blob/8e2b6724e31995e0d43af501f25974310c6b843b/vcf_parser/parser.py#L173-L202 | train |
hfaran/piazza-api | piazza_api/rpc.py | PiazzaRPC.content_get | def content_get(self, cid, nid=None):
"""Get data from post `cid` in network `nid`
:type nid: str
:param nid: This is the ID of the network (or class) from which
to query posts. This is optional and only to override the existing
`network_id` entered when created the class
:type cid: str|int
:param cid: This is the post ID which we grab
:returns: Python object containing returned data
"""
r = self.request(
method="content.get",
data={"cid": cid},
nid=nid
)
return self._handle_error(r, "Could not get post {}.".format(cid)) | python | def content_get(self, cid, nid=None):
"""Get data from post `cid` in network `nid`
:type nid: str
:param nid: This is the ID of the network (or class) from which
to query posts. This is optional and only to override the existing
`network_id` entered when created the class
:type cid: str|int
:param cid: This is the post ID which we grab
:returns: Python object containing returned data
"""
r = self.request(
method="content.get",
data={"cid": cid},
nid=nid
)
return self._handle_error(r, "Could not get post {}.".format(cid)) | [
"def",
"content_get",
"(",
"self",
",",
"cid",
",",
"nid",
"=",
"None",
")",
":",
"r",
"=",
"self",
".",
"request",
"(",
"method",
"=",
"\"content.get\"",
",",
"data",
"=",
"{",
"\"cid\"",
":",
"cid",
"}",
",",
"nid",
"=",
"nid",
")",
"return",
"self",
".",
"_handle_error",
"(",
"r",
",",
"\"Could not get post {}.\"",
".",
"format",
"(",
"cid",
")",
")"
]
| Get data from post `cid` in network `nid`
:type nid: str
:param nid: This is the ID of the network (or class) from which
to query posts. This is optional and only to override the existing
`network_id` entered when created the class
:type cid: str|int
:param cid: This is the post ID which we grab
:returns: Python object containing returned data | [
"Get",
"data",
"from",
"post",
"cid",
"in",
"network",
"nid"
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/rpc.py#L82-L98 | train |
hfaran/piazza-api | piazza_api/rpc.py | PiazzaRPC.content_create | def content_create(self, params):
"""Create a post or followup.
:type params: dict
:param params: A dict of options to pass to the endpoint. Depends on
the specific type of content being created.
:returns: Python object containing returned data
"""
r = self.request(
method="content.create",
data=params
)
return self._handle_error(
r,
"Could not create object {}.".format(repr(params))
) | python | def content_create(self, params):
"""Create a post or followup.
:type params: dict
:param params: A dict of options to pass to the endpoint. Depends on
the specific type of content being created.
:returns: Python object containing returned data
"""
r = self.request(
method="content.create",
data=params
)
return self._handle_error(
r,
"Could not create object {}.".format(repr(params))
) | [
"def",
"content_create",
"(",
"self",
",",
"params",
")",
":",
"r",
"=",
"self",
".",
"request",
"(",
"method",
"=",
"\"content.create\"",
",",
"data",
"=",
"params",
")",
"return",
"self",
".",
"_handle_error",
"(",
"r",
",",
"\"Could not create object {}.\"",
".",
"format",
"(",
"repr",
"(",
"params",
")",
")",
")"
]
| Create a post or followup.
:type params: dict
:param params: A dict of options to pass to the endpoint. Depends on
the specific type of content being created.
:returns: Python object containing returned data | [
"Create",
"a",
"post",
"or",
"followup",
"."
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/rpc.py#L100-L115 | train |
hfaran/piazza-api | piazza_api/rpc.py | PiazzaRPC.add_students | def add_students(self, student_emails, nid=None):
"""Enroll students in a network `nid`.
Piazza will email these students with instructions to
activate their account.
:type student_emails: list of str
:param student_emails: A listing of email addresses to enroll
in the network (or class). This can be a list of length one.
:type nid: str
:param nid: This is the ID of the network to add students
to. This is optional and only to override the existing
`network_id` entered when created the class
:returns: Python object containing returned data, a list
of dicts of user data of all of the users in the network
including the ones that were just added.
"""
r = self.request(
method="network.update",
data={
"from": "ClassSettingsPage",
"add_students": student_emails
},
nid=nid,
nid_key="id"
)
return self._handle_error(r, "Could not add users.") | python | def add_students(self, student_emails, nid=None):
"""Enroll students in a network `nid`.
Piazza will email these students with instructions to
activate their account.
:type student_emails: list of str
:param student_emails: A listing of email addresses to enroll
in the network (or class). This can be a list of length one.
:type nid: str
:param nid: This is the ID of the network to add students
to. This is optional and only to override the existing
`network_id` entered when created the class
:returns: Python object containing returned data, a list
of dicts of user data of all of the users in the network
including the ones that were just added.
"""
r = self.request(
method="network.update",
data={
"from": "ClassSettingsPage",
"add_students": student_emails
},
nid=nid,
nid_key="id"
)
return self._handle_error(r, "Could not add users.") | [
"def",
"add_students",
"(",
"self",
",",
"student_emails",
",",
"nid",
"=",
"None",
")",
":",
"r",
"=",
"self",
".",
"request",
"(",
"method",
"=",
"\"network.update\"",
",",
"data",
"=",
"{",
"\"from\"",
":",
"\"ClassSettingsPage\"",
",",
"\"add_students\"",
":",
"student_emails",
"}",
",",
"nid",
"=",
"nid",
",",
"nid_key",
"=",
"\"id\"",
")",
"return",
"self",
".",
"_handle_error",
"(",
"r",
",",
"\"Could not add users.\"",
")"
]
| Enroll students in a network `nid`.
Piazza will email these students with instructions to
activate their account.
:type student_emails: list of str
:param student_emails: A listing of email addresses to enroll
in the network (or class). This can be a list of length one.
:type nid: str
:param nid: This is the ID of the network to add students
to. This is optional and only to override the existing
`network_id` entered when created the class
:returns: Python object containing returned data, a list
of dicts of user data of all of the users in the network
including the ones that were just added. | [
"Enroll",
"students",
"in",
"a",
"network",
"nid",
"."
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/rpc.py#L185-L211 | train |
hfaran/piazza-api | piazza_api/rpc.py | PiazzaRPC.get_all_users | def get_all_users(self, nid=None):
"""Get a listing of data for each user in a network `nid`
:type nid: str
:param nid: This is the ID of the network to get users
from. This is optional and only to override the existing
`network_id` entered when created the class
:returns: Python object containing returned data, a list
of dicts containing user data.
"""
r = self.request(
method="network.get_all_users",
nid=nid
)
return self._handle_error(r, "Could not get users.") | python | def get_all_users(self, nid=None):
"""Get a listing of data for each user in a network `nid`
:type nid: str
:param nid: This is the ID of the network to get users
from. This is optional and only to override the existing
`network_id` entered when created the class
:returns: Python object containing returned data, a list
of dicts containing user data.
"""
r = self.request(
method="network.get_all_users",
nid=nid
)
return self._handle_error(r, "Could not get users.") | [
"def",
"get_all_users",
"(",
"self",
",",
"nid",
"=",
"None",
")",
":",
"r",
"=",
"self",
".",
"request",
"(",
"method",
"=",
"\"network.get_all_users\"",
",",
"nid",
"=",
"nid",
")",
"return",
"self",
".",
"_handle_error",
"(",
"r",
",",
"\"Could not get users.\"",
")"
]
| Get a listing of data for each user in a network `nid`
:type nid: str
:param nid: This is the ID of the network to get users
from. This is optional and only to override the existing
`network_id` entered when created the class
:returns: Python object containing returned data, a list
of dicts containing user data. | [
"Get",
"a",
"listing",
"of",
"data",
"for",
"each",
"user",
"in",
"a",
"network",
"nid"
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/rpc.py#L213-L227 | train |
hfaran/piazza-api | piazza_api/rpc.py | PiazzaRPC.get_users | def get_users(self, user_ids, nid=None):
"""Get a listing of data for specific users `user_ids` in
a network `nid`
:type user_ids: list of str
:param user_ids: a list of user ids. These are the same
ids that are returned by get_all_users.
:type nid: str
:param nid: This is the ID of the network to get students
from. This is optional and only to override the existing
`network_id` entered when created the class
:returns: Python object containing returned data, a list
of dicts containing user data.
"""
r = self.request(
method="network.get_users",
data={"ids": user_ids},
nid=nid
)
return self._handle_error(r, "Could not get users.") | python | def get_users(self, user_ids, nid=None):
"""Get a listing of data for specific users `user_ids` in
a network `nid`
:type user_ids: list of str
:param user_ids: a list of user ids. These are the same
ids that are returned by get_all_users.
:type nid: str
:param nid: This is the ID of the network to get students
from. This is optional and only to override the existing
`network_id` entered when created the class
:returns: Python object containing returned data, a list
of dicts containing user data.
"""
r = self.request(
method="network.get_users",
data={"ids": user_ids},
nid=nid
)
return self._handle_error(r, "Could not get users.") | [
"def",
"get_users",
"(",
"self",
",",
"user_ids",
",",
"nid",
"=",
"None",
")",
":",
"r",
"=",
"self",
".",
"request",
"(",
"method",
"=",
"\"network.get_users\"",
",",
"data",
"=",
"{",
"\"ids\"",
":",
"user_ids",
"}",
",",
"nid",
"=",
"nid",
")",
"return",
"self",
".",
"_handle_error",
"(",
"r",
",",
"\"Could not get users.\"",
")"
]
| Get a listing of data for specific users `user_ids` in
a network `nid`
:type user_ids: list of str
:param user_ids: a list of user ids. These are the same
ids that are returned by get_all_users.
:type nid: str
:param nid: This is the ID of the network to get students
from. This is optional and only to override the existing
`network_id` entered when created the class
:returns: Python object containing returned data, a list
of dicts containing user data. | [
"Get",
"a",
"listing",
"of",
"data",
"for",
"specific",
"users",
"user_ids",
"in",
"a",
"network",
"nid"
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/rpc.py#L229-L248 | train |
hfaran/piazza-api | piazza_api/rpc.py | PiazzaRPC.remove_users | def remove_users(self, user_ids, nid=None):
"""Remove users from a network `nid`
:type user_ids: list of str
:param user_ids: a list of user ids. These are the same
ids that are returned by get_all_users.
:type nid: str
:param nid: This is the ID of the network to remove students
from. This is optional and only to override the existing
`network_id` entered when created the class
:returns: Python object containing returned data, a list
of dicts of user data of all of the users remaining in
the network after users are removed.
"""
r = self.request(
method="network.update",
data={"remove_users": user_ids},
nid=nid,
nid_key="id"
)
return self._handle_error(r, "Could not remove users.") | python | def remove_users(self, user_ids, nid=None):
"""Remove users from a network `nid`
:type user_ids: list of str
:param user_ids: a list of user ids. These are the same
ids that are returned by get_all_users.
:type nid: str
:param nid: This is the ID of the network to remove students
from. This is optional and only to override the existing
`network_id` entered when created the class
:returns: Python object containing returned data, a list
of dicts of user data of all of the users remaining in
the network after users are removed.
"""
r = self.request(
method="network.update",
data={"remove_users": user_ids},
nid=nid,
nid_key="id"
)
return self._handle_error(r, "Could not remove users.") | [
"def",
"remove_users",
"(",
"self",
",",
"user_ids",
",",
"nid",
"=",
"None",
")",
":",
"r",
"=",
"self",
".",
"request",
"(",
"method",
"=",
"\"network.update\"",
",",
"data",
"=",
"{",
"\"remove_users\"",
":",
"user_ids",
"}",
",",
"nid",
"=",
"nid",
",",
"nid_key",
"=",
"\"id\"",
")",
"return",
"self",
".",
"_handle_error",
"(",
"r",
",",
"\"Could not remove users.\"",
")"
]
| Remove users from a network `nid`
:type user_ids: list of str
:param user_ids: a list of user ids. These are the same
ids that are returned by get_all_users.
:type nid: str
:param nid: This is the ID of the network to remove students
from. This is optional and only to override the existing
`network_id` entered when created the class
:returns: Python object containing returned data, a list
of dicts of user data of all of the users remaining in
the network after users are removed. | [
"Remove",
"users",
"from",
"a",
"network",
"nid"
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/rpc.py#L250-L270 | train |
hfaran/piazza-api | piazza_api/rpc.py | PiazzaRPC.get_my_feed | def get_my_feed(self, limit=150, offset=20, sort="updated", nid=None):
"""Get my feed
:type limit: int
:param limit: Number of posts from feed to get, starting from ``offset``
:type offset: int
:param offset: Offset starting from bottom of feed
:type sort: str
:param sort: How to sort feed that will be retrieved; only current
known value is "updated"
:type nid: str
:param nid: This is the ID of the network to get the feed
from. This is optional and only to override the existing
`network_id` entered when created the class
"""
r = self.request(
method="network.get_my_feed",
nid=nid,
data=dict(
limit=limit,
offset=offset,
sort=sort
)
)
return self._handle_error(r, "Could not retrieve your feed.") | python | def get_my_feed(self, limit=150, offset=20, sort="updated", nid=None):
"""Get my feed
:type limit: int
:param limit: Number of posts from feed to get, starting from ``offset``
:type offset: int
:param offset: Offset starting from bottom of feed
:type sort: str
:param sort: How to sort feed that will be retrieved; only current
known value is "updated"
:type nid: str
:param nid: This is the ID of the network to get the feed
from. This is optional and only to override the existing
`network_id` entered when created the class
"""
r = self.request(
method="network.get_my_feed",
nid=nid,
data=dict(
limit=limit,
offset=offset,
sort=sort
)
)
return self._handle_error(r, "Could not retrieve your feed.") | [
"def",
"get_my_feed",
"(",
"self",
",",
"limit",
"=",
"150",
",",
"offset",
"=",
"20",
",",
"sort",
"=",
"\"updated\"",
",",
"nid",
"=",
"None",
")",
":",
"r",
"=",
"self",
".",
"request",
"(",
"method",
"=",
"\"network.get_my_feed\"",
",",
"nid",
"=",
"nid",
",",
"data",
"=",
"dict",
"(",
"limit",
"=",
"limit",
",",
"offset",
"=",
"offset",
",",
"sort",
"=",
"sort",
")",
")",
"return",
"self",
".",
"_handle_error",
"(",
"r",
",",
"\"Could not retrieve your feed.\"",
")"
]
| Get my feed
:type limit: int
:param limit: Number of posts from feed to get, starting from ``offset``
:type offset: int
:param offset: Offset starting from bottom of feed
:type sort: str
:param sort: How to sort feed that will be retrieved; only current
known value is "updated"
:type nid: str
:param nid: This is the ID of the network to get the feed
from. This is optional and only to override the existing
`network_id` entered when created the class | [
"Get",
"my",
"feed"
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/rpc.py#L272-L296 | train |
hfaran/piazza-api | piazza_api/rpc.py | PiazzaRPC.filter_feed | def filter_feed(self, updated=False, following=False, folder=False,
filter_folder="", sort="updated", nid=None):
"""Get filtered feed
Only one filter type (updated, following, folder) is possible.
:type nid: str
:param nid: This is the ID of the network to get the feed
from. This is optional and only to override the existing
`network_id` entered when created the class
:type sort: str
:param sort: How to sort feed that will be retrieved; only current
known value is "updated"
:type updated: bool
:param updated: Set to filter through only posts which have been updated
since you last read them
:type following: bool
:param following: Set to filter through only posts which you are
following
:type folder: bool
:param folder: Set to filter through only posts which are in the
provided ``filter_folder``
:type filter_folder: str
:param filter_folder: Name of folder to show posts from; required
only if ``folder`` is set
"""
assert sum([updated, following, folder]) == 1
if folder:
assert filter_folder
if updated:
filter_type = dict(updated=1)
elif following:
filter_type = dict(following=1)
else:
filter_type = dict(folder=1, filter_folder=filter_folder)
r = self.request(
nid=nid,
method="network.filter_feed",
data=dict(
sort=sort,
**filter_type
)
)
return self._handle_error(r, "Could not retrieve filtered feed.") | python | def filter_feed(self, updated=False, following=False, folder=False,
filter_folder="", sort="updated", nid=None):
"""Get filtered feed
Only one filter type (updated, following, folder) is possible.
:type nid: str
:param nid: This is the ID of the network to get the feed
from. This is optional and only to override the existing
`network_id` entered when created the class
:type sort: str
:param sort: How to sort feed that will be retrieved; only current
known value is "updated"
:type updated: bool
:param updated: Set to filter through only posts which have been updated
since you last read them
:type following: bool
:param following: Set to filter through only posts which you are
following
:type folder: bool
:param folder: Set to filter through only posts which are in the
provided ``filter_folder``
:type filter_folder: str
:param filter_folder: Name of folder to show posts from; required
only if ``folder`` is set
"""
assert sum([updated, following, folder]) == 1
if folder:
assert filter_folder
if updated:
filter_type = dict(updated=1)
elif following:
filter_type = dict(following=1)
else:
filter_type = dict(folder=1, filter_folder=filter_folder)
r = self.request(
nid=nid,
method="network.filter_feed",
data=dict(
sort=sort,
**filter_type
)
)
return self._handle_error(r, "Could not retrieve filtered feed.") | [
"def",
"filter_feed",
"(",
"self",
",",
"updated",
"=",
"False",
",",
"following",
"=",
"False",
",",
"folder",
"=",
"False",
",",
"filter_folder",
"=",
"\"\"",
",",
"sort",
"=",
"\"updated\"",
",",
"nid",
"=",
"None",
")",
":",
"assert",
"sum",
"(",
"[",
"updated",
",",
"following",
",",
"folder",
"]",
")",
"==",
"1",
"if",
"folder",
":",
"assert",
"filter_folder",
"if",
"updated",
":",
"filter_type",
"=",
"dict",
"(",
"updated",
"=",
"1",
")",
"elif",
"following",
":",
"filter_type",
"=",
"dict",
"(",
"following",
"=",
"1",
")",
"else",
":",
"filter_type",
"=",
"dict",
"(",
"folder",
"=",
"1",
",",
"filter_folder",
"=",
"filter_folder",
")",
"r",
"=",
"self",
".",
"request",
"(",
"nid",
"=",
"nid",
",",
"method",
"=",
"\"network.filter_feed\"",
",",
"data",
"=",
"dict",
"(",
"sort",
"=",
"sort",
",",
"*",
"*",
"filter_type",
")",
")",
"return",
"self",
".",
"_handle_error",
"(",
"r",
",",
"\"Could not retrieve filtered feed.\"",
")"
]
| Get filtered feed
Only one filter type (updated, following, folder) is possible.
:type nid: str
:param nid: This is the ID of the network to get the feed
from. This is optional and only to override the existing
`network_id` entered when created the class
:type sort: str
:param sort: How to sort feed that will be retrieved; only current
known value is "updated"
:type updated: bool
:param updated: Set to filter through only posts which have been updated
since you last read them
:type following: bool
:param following: Set to filter through only posts which you are
following
:type folder: bool
:param folder: Set to filter through only posts which are in the
provided ``filter_folder``
:type filter_folder: str
:param filter_folder: Name of folder to show posts from; required
only if ``folder`` is set | [
"Get",
"filtered",
"feed"
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/rpc.py#L298-L343 | train |
hfaran/piazza-api | piazza_api/rpc.py | PiazzaRPC.search | def search(self, query, nid=None):
"""Search for posts with ``query``
:type nid: str
:param nid: This is the ID of the network to get the feed
from. This is optional and only to override the existing
`network_id` entered when created the class
:type query: str
:param query: The search query; should just be keywords for posts
that you are looking for
"""
r = self.request(
method="network.search",
nid=nid,
data=dict(query=query)
)
return self._handle_error(r, "Search with query '{}' failed."
.format(query)) | python | def search(self, query, nid=None):
"""Search for posts with ``query``
:type nid: str
:param nid: This is the ID of the network to get the feed
from. This is optional and only to override the existing
`network_id` entered when created the class
:type query: str
:param query: The search query; should just be keywords for posts
that you are looking for
"""
r = self.request(
method="network.search",
nid=nid,
data=dict(query=query)
)
return self._handle_error(r, "Search with query '{}' failed."
.format(query)) | [
"def",
"search",
"(",
"self",
",",
"query",
",",
"nid",
"=",
"None",
")",
":",
"r",
"=",
"self",
".",
"request",
"(",
"method",
"=",
"\"network.search\"",
",",
"nid",
"=",
"nid",
",",
"data",
"=",
"dict",
"(",
"query",
"=",
"query",
")",
")",
"return",
"self",
".",
"_handle_error",
"(",
"r",
",",
"\"Search with query '{}' failed.\"",
".",
"format",
"(",
"query",
")",
")"
]
| Search for posts with ``query``
:type nid: str
:param nid: This is the ID of the network to get the feed
from. This is optional and only to override the existing
`network_id` entered when created the class
:type query: str
:param query: The search query; should just be keywords for posts
that you are looking for | [
"Search",
"for",
"posts",
"with",
"query"
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/rpc.py#L345-L362 | train |
hfaran/piazza-api | piazza_api/rpc.py | PiazzaRPC.get_stats | def get_stats(self, nid=None):
"""Get statistics for class
:type nid: str
:param nid: This is the ID of the network to get stats
from. This is optional and only to override the existing
`network_id` entered when created the class
"""
r = self.request(
api_type="main",
method="network.get_stats",
nid=nid,
)
return self._handle_error(r, "Could not retrieve stats for class.") | python | def get_stats(self, nid=None):
"""Get statistics for class
:type nid: str
:param nid: This is the ID of the network to get stats
from. This is optional and only to override the existing
`network_id` entered when created the class
"""
r = self.request(
api_type="main",
method="network.get_stats",
nid=nid,
)
return self._handle_error(r, "Could not retrieve stats for class.") | [
"def",
"get_stats",
"(",
"self",
",",
"nid",
"=",
"None",
")",
":",
"r",
"=",
"self",
".",
"request",
"(",
"api_type",
"=",
"\"main\"",
",",
"method",
"=",
"\"network.get_stats\"",
",",
"nid",
"=",
"nid",
",",
")",
"return",
"self",
".",
"_handle_error",
"(",
"r",
",",
"\"Could not retrieve stats for class.\"",
")"
]
| Get statistics for class
:type nid: str
:param nid: This is the ID of the network to get stats
from. This is optional and only to override the existing
`network_id` entered when created the class | [
"Get",
"statistics",
"for",
"class"
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/rpc.py#L364-L377 | train |
hfaran/piazza-api | piazza_api/rpc.py | PiazzaRPC.request | def request(self, method, data=None, nid=None, nid_key='nid',
api_type="logic", return_response=False):
"""Get data from arbitrary Piazza API endpoint `method` in network `nid`
:type method: str
:param method: An internal Piazza API method name like `content.get`
or `network.get_users`
:type data: dict
:param data: Key-value data to pass to Piazza in the request
:type nid: str
:param nid: This is the ID of the network to which the request
should be made. This is optional and only to override the
existing `network_id` entered when creating the class
:type nid_key: str
:param nid_key: Name expected by Piazza for `nid` when making request.
(Usually and by default "nid", but sometimes "id" is expected)
:returns: Python object containing returned data
:type return_response: bool
:param return_response: If set, returns whole :class:`requests.Response`
object rather than just the response body
"""
self._check_authenticated()
nid = nid if nid else self._nid
if data is None:
data = {}
headers = {}
if "session_id" in self.session.cookies:
headers["CSRF-Token"] = self.session.cookies["session_id"]
# Adding a nonce to the request
endpoint = self.base_api_urls[api_type]
if api_type == "logic":
endpoint += "?method={}&aid={}".format(
method,
_piazza_nonce()
)
response = self.session.post(
endpoint,
data=json.dumps({
"method": method,
"params": dict({nid_key: nid}, **data)
}),
headers=headers
)
return response if return_response else response.json() | python | def request(self, method, data=None, nid=None, nid_key='nid',
api_type="logic", return_response=False):
"""Get data from arbitrary Piazza API endpoint `method` in network `nid`
:type method: str
:param method: An internal Piazza API method name like `content.get`
or `network.get_users`
:type data: dict
:param data: Key-value data to pass to Piazza in the request
:type nid: str
:param nid: This is the ID of the network to which the request
should be made. This is optional and only to override the
existing `network_id` entered when creating the class
:type nid_key: str
:param nid_key: Name expected by Piazza for `nid` when making request.
(Usually and by default "nid", but sometimes "id" is expected)
:returns: Python object containing returned data
:type return_response: bool
:param return_response: If set, returns whole :class:`requests.Response`
object rather than just the response body
"""
self._check_authenticated()
nid = nid if nid else self._nid
if data is None:
data = {}
headers = {}
if "session_id" in self.session.cookies:
headers["CSRF-Token"] = self.session.cookies["session_id"]
# Adding a nonce to the request
endpoint = self.base_api_urls[api_type]
if api_type == "logic":
endpoint += "?method={}&aid={}".format(
method,
_piazza_nonce()
)
response = self.session.post(
endpoint,
data=json.dumps({
"method": method,
"params": dict({nid_key: nid}, **data)
}),
headers=headers
)
return response if return_response else response.json() | [
"def",
"request",
"(",
"self",
",",
"method",
",",
"data",
"=",
"None",
",",
"nid",
"=",
"None",
",",
"nid_key",
"=",
"'nid'",
",",
"api_type",
"=",
"\"logic\"",
",",
"return_response",
"=",
"False",
")",
":",
"self",
".",
"_check_authenticated",
"(",
")",
"nid",
"=",
"nid",
"if",
"nid",
"else",
"self",
".",
"_nid",
"if",
"data",
"is",
"None",
":",
"data",
"=",
"{",
"}",
"headers",
"=",
"{",
"}",
"if",
"\"session_id\"",
"in",
"self",
".",
"session",
".",
"cookies",
":",
"headers",
"[",
"\"CSRF-Token\"",
"]",
"=",
"self",
".",
"session",
".",
"cookies",
"[",
"\"session_id\"",
"]",
"# Adding a nonce to the request",
"endpoint",
"=",
"self",
".",
"base_api_urls",
"[",
"api_type",
"]",
"if",
"api_type",
"==",
"\"logic\"",
":",
"endpoint",
"+=",
"\"?method={}&aid={}\"",
".",
"format",
"(",
"method",
",",
"_piazza_nonce",
"(",
")",
")",
"response",
"=",
"self",
".",
"session",
".",
"post",
"(",
"endpoint",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"{",
"\"method\"",
":",
"method",
",",
"\"params\"",
":",
"dict",
"(",
"{",
"nid_key",
":",
"nid",
"}",
",",
"*",
"*",
"data",
")",
"}",
")",
",",
"headers",
"=",
"headers",
")",
"return",
"response",
"if",
"return_response",
"else",
"response",
".",
"json",
"(",
")"
]
| Get data from arbitrary Piazza API endpoint `method` in network `nid`
:type method: str
:param method: An internal Piazza API method name like `content.get`
or `network.get_users`
:type data: dict
:param data: Key-value data to pass to Piazza in the request
:type nid: str
:param nid: This is the ID of the network to which the request
should be made. This is optional and only to override the
existing `network_id` entered when creating the class
:type nid_key: str
:param nid_key: Name expected by Piazza for `nid` when making request.
(Usually and by default "nid", but sometimes "id" is expected)
:returns: Python object containing returned data
:type return_response: bool
:param return_response: If set, returns whole :class:`requests.Response`
object rather than just the response body | [
"Get",
"data",
"from",
"arbitrary",
"Piazza",
"API",
"endpoint",
"method",
"in",
"network",
"nid"
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/rpc.py#L392-L439 | train |
hfaran/piazza-api | piazza_api/rpc.py | PiazzaRPC._handle_error | def _handle_error(self, result, err_msg):
"""Check result for error
:type result: dict
:param result: response body
:type err_msg: str
:param err_msg: The message given to the :class:`RequestError` instance
raised
:returns: Actual result from result
:raises RequestError: If result has error
"""
if result.get(u'error'):
raise RequestError("{}\nResponse: {}".format(
err_msg,
json.dumps(result, indent=2)
))
else:
return result.get(u'result') | python | def _handle_error(self, result, err_msg):
"""Check result for error
:type result: dict
:param result: response body
:type err_msg: str
:param err_msg: The message given to the :class:`RequestError` instance
raised
:returns: Actual result from result
:raises RequestError: If result has error
"""
if result.get(u'error'):
raise RequestError("{}\nResponse: {}".format(
err_msg,
json.dumps(result, indent=2)
))
else:
return result.get(u'result') | [
"def",
"_handle_error",
"(",
"self",
",",
"result",
",",
"err_msg",
")",
":",
"if",
"result",
".",
"get",
"(",
"u'error'",
")",
":",
"raise",
"RequestError",
"(",
"\"{}\\nResponse: {}\"",
".",
"format",
"(",
"err_msg",
",",
"json",
".",
"dumps",
"(",
"result",
",",
"indent",
"=",
"2",
")",
")",
")",
"else",
":",
"return",
"result",
".",
"get",
"(",
"u'result'",
")"
]
| Check result for error
:type result: dict
:param result: response body
:type err_msg: str
:param err_msg: The message given to the :class:`RequestError` instance
raised
:returns: Actual result from result
:raises RequestError: If result has error | [
"Check",
"result",
"for",
"error"
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/rpc.py#L454-L471 | train |
hfaran/piazza-api | piazza_api/piazza.py | Piazza.get_user_classes | def get_user_classes(self):
"""Get list of the current user's classes. This is a subset of the
information returned by the call to ``get_user_status``.
:returns: Classes of currently authenticated user
:rtype: list
"""
# Previously getting classes from profile (such a list is incomplete)
# raw_classes = self.get_user_profile().get('all_classes').values()
# Get classes from the user status (includes all classes)
status = self.get_user_status()
uid = status['id']
raw_classes = status.get('networks', [])
classes = []
for rawc in raw_classes:
c = {k: rawc[k] for k in ['name', 'term']}
c['num'] = rawc.get('course_number', '')
c['nid'] = rawc['id']
c['is_ta'] = uid in rawc['prof_hash']
classes.append(c)
return classes | python | def get_user_classes(self):
"""Get list of the current user's classes. This is a subset of the
information returned by the call to ``get_user_status``.
:returns: Classes of currently authenticated user
:rtype: list
"""
# Previously getting classes from profile (such a list is incomplete)
# raw_classes = self.get_user_profile().get('all_classes').values()
# Get classes from the user status (includes all classes)
status = self.get_user_status()
uid = status['id']
raw_classes = status.get('networks', [])
classes = []
for rawc in raw_classes:
c = {k: rawc[k] for k in ['name', 'term']}
c['num'] = rawc.get('course_number', '')
c['nid'] = rawc['id']
c['is_ta'] = uid in rawc['prof_hash']
classes.append(c)
return classes | [
"def",
"get_user_classes",
"(",
"self",
")",
":",
"# Previously getting classes from profile (such a list is incomplete)",
"# raw_classes = self.get_user_profile().get('all_classes').values()",
"# Get classes from the user status (includes all classes)",
"status",
"=",
"self",
".",
"get_user_status",
"(",
")",
"uid",
"=",
"status",
"[",
"'id'",
"]",
"raw_classes",
"=",
"status",
".",
"get",
"(",
"'networks'",
",",
"[",
"]",
")",
"classes",
"=",
"[",
"]",
"for",
"rawc",
"in",
"raw_classes",
":",
"c",
"=",
"{",
"k",
":",
"rawc",
"[",
"k",
"]",
"for",
"k",
"in",
"[",
"'name'",
",",
"'term'",
"]",
"}",
"c",
"[",
"'num'",
"]",
"=",
"rawc",
".",
"get",
"(",
"'course_number'",
",",
"''",
")",
"c",
"[",
"'nid'",
"]",
"=",
"rawc",
"[",
"'id'",
"]",
"c",
"[",
"'is_ta'",
"]",
"=",
"uid",
"in",
"rawc",
"[",
"'prof_hash'",
"]",
"classes",
".",
"append",
"(",
"c",
")",
"return",
"classes"
]
| Get list of the current user's classes. This is a subset of the
information returned by the call to ``get_user_status``.
:returns: Classes of currently authenticated user
:rtype: list | [
"Get",
"list",
"of",
"the",
"current",
"user",
"s",
"classes",
".",
"This",
"is",
"a",
"subset",
"of",
"the",
"information",
"returned",
"by",
"the",
"call",
"to",
"get_user_status",
"."
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/piazza.py#L66-L89 | train |
hfaran/piazza-api | piazza_api/nonce.py | nonce | def nonce():
"""
Returns a new nonce to be used with the Piazza API.
"""
nonce_part1 = _int2base(int(_time()*1000), 36)
nonce_part2 = _int2base(round(_random()*1679616), 36)
return "{}{}".format(nonce_part1, nonce_part2) | python | def nonce():
"""
Returns a new nonce to be used with the Piazza API.
"""
nonce_part1 = _int2base(int(_time()*1000), 36)
nonce_part2 = _int2base(round(_random()*1679616), 36)
return "{}{}".format(nonce_part1, nonce_part2) | [
"def",
"nonce",
"(",
")",
":",
"nonce_part1",
"=",
"_int2base",
"(",
"int",
"(",
"_time",
"(",
")",
"*",
"1000",
")",
",",
"36",
")",
"nonce_part2",
"=",
"_int2base",
"(",
"round",
"(",
"_random",
"(",
")",
"*",
"1679616",
")",
",",
"36",
")",
"return",
"\"{}{}\"",
".",
"format",
"(",
"nonce_part1",
",",
"nonce_part2",
")"
]
| Returns a new nonce to be used with the Piazza API. | [
"Returns",
"a",
"new",
"nonce",
"to",
"be",
"used",
"with",
"the",
"Piazza",
"API",
"."
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/nonce.py#L7-L13 | train |
hfaran/piazza-api | piazza_api/network.py | Network.iter_all_posts | def iter_all_posts(self, limit=None):
"""Get all posts visible to the current user
This grabs you current feed and ids of all posts from it; each post
is then individually fetched. This method does not go against
a bulk endpoint; it retrieves each post individually, so a
caution to the user when using this.
:type limit: int|None
:param limit: If given, will limit the number of posts to fetch
before the generator is exhausted and raises StopIteration.
No special consideration is given to `0`; provide `None` to
retrieve all posts.
:returns: An iterator which yields all posts which the current user
can view
:rtype: generator
"""
feed = self.get_feed(limit=999999, offset=0)
cids = [post['id'] for post in feed["feed"]]
if limit is not None:
cids = cids[:limit]
for cid in cids:
yield self.get_post(cid) | python | def iter_all_posts(self, limit=None):
"""Get all posts visible to the current user
This grabs you current feed and ids of all posts from it; each post
is then individually fetched. This method does not go against
a bulk endpoint; it retrieves each post individually, so a
caution to the user when using this.
:type limit: int|None
:param limit: If given, will limit the number of posts to fetch
before the generator is exhausted and raises StopIteration.
No special consideration is given to `0`; provide `None` to
retrieve all posts.
:returns: An iterator which yields all posts which the current user
can view
:rtype: generator
"""
feed = self.get_feed(limit=999999, offset=0)
cids = [post['id'] for post in feed["feed"]]
if limit is not None:
cids = cids[:limit]
for cid in cids:
yield self.get_post(cid) | [
"def",
"iter_all_posts",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"feed",
"=",
"self",
".",
"get_feed",
"(",
"limit",
"=",
"999999",
",",
"offset",
"=",
"0",
")",
"cids",
"=",
"[",
"post",
"[",
"'id'",
"]",
"for",
"post",
"in",
"feed",
"[",
"\"feed\"",
"]",
"]",
"if",
"limit",
"is",
"not",
"None",
":",
"cids",
"=",
"cids",
"[",
":",
"limit",
"]",
"for",
"cid",
"in",
"cids",
":",
"yield",
"self",
".",
"get_post",
"(",
"cid",
")"
]
| Get all posts visible to the current user
This grabs you current feed and ids of all posts from it; each post
is then individually fetched. This method does not go against
a bulk endpoint; it retrieves each post individually, so a
caution to the user when using this.
:type limit: int|None
:param limit: If given, will limit the number of posts to fetch
before the generator is exhausted and raises StopIteration.
No special consideration is given to `0`; provide `None` to
retrieve all posts.
:returns: An iterator which yields all posts which the current user
can view
:rtype: generator | [
"Get",
"all",
"posts",
"visible",
"to",
"the",
"current",
"user"
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/network.py#L85-L107 | train |
hfaran/piazza-api | piazza_api/network.py | Network.create_post | def create_post(self, post_type, post_folders, post_subject, post_content, is_announcement=0, bypass_email=0, anonymous=False):
"""Create a post
It seems like if the post has `<p>` tags, then it's treated as HTML,
but is treated as text otherwise. You'll want to provide `content`
accordingly.
:type post_type: str
:param post_type: 'note', 'question'
:type post_folders: str
:param post_folders: Folder to put post into
:type post_subject: str
:param post_subject: Subject string
:type post_content: str
:param post_content: Content string
:type is_announcement: bool
:param is_announcement:
:type bypass_email: bool
:param bypass_email:
:type anonymous: bool
:param anonymous:
:rtype: dict
:returns: Dictionary with information about the created post.
"""
params = {
"anonymous": "yes" if anonymous else "no",
"subject": post_subject,
"content": post_content,
"folders": post_folders,
"type": post_type,
"config": {
"bypass_email": bypass_email,
"is_announcement": is_announcement
}
}
return self._rpc.content_create(params) | python | def create_post(self, post_type, post_folders, post_subject, post_content, is_announcement=0, bypass_email=0, anonymous=False):
"""Create a post
It seems like if the post has `<p>` tags, then it's treated as HTML,
but is treated as text otherwise. You'll want to provide `content`
accordingly.
:type post_type: str
:param post_type: 'note', 'question'
:type post_folders: str
:param post_folders: Folder to put post into
:type post_subject: str
:param post_subject: Subject string
:type post_content: str
:param post_content: Content string
:type is_announcement: bool
:param is_announcement:
:type bypass_email: bool
:param bypass_email:
:type anonymous: bool
:param anonymous:
:rtype: dict
:returns: Dictionary with information about the created post.
"""
params = {
"anonymous": "yes" if anonymous else "no",
"subject": post_subject,
"content": post_content,
"folders": post_folders,
"type": post_type,
"config": {
"bypass_email": bypass_email,
"is_announcement": is_announcement
}
}
return self._rpc.content_create(params) | [
"def",
"create_post",
"(",
"self",
",",
"post_type",
",",
"post_folders",
",",
"post_subject",
",",
"post_content",
",",
"is_announcement",
"=",
"0",
",",
"bypass_email",
"=",
"0",
",",
"anonymous",
"=",
"False",
")",
":",
"params",
"=",
"{",
"\"anonymous\"",
":",
"\"yes\"",
"if",
"anonymous",
"else",
"\"no\"",
",",
"\"subject\"",
":",
"post_subject",
",",
"\"content\"",
":",
"post_content",
",",
"\"folders\"",
":",
"post_folders",
",",
"\"type\"",
":",
"post_type",
",",
"\"config\"",
":",
"{",
"\"bypass_email\"",
":",
"bypass_email",
",",
"\"is_announcement\"",
":",
"is_announcement",
"}",
"}",
"return",
"self",
".",
"_rpc",
".",
"content_create",
"(",
"params",
")"
]
| Create a post
It seems like if the post has `<p>` tags, then it's treated as HTML,
but is treated as text otherwise. You'll want to provide `content`
accordingly.
:type post_type: str
:param post_type: 'note', 'question'
:type post_folders: str
:param post_folders: Folder to put post into
:type post_subject: str
:param post_subject: Subject string
:type post_content: str
:param post_content: Content string
:type is_announcement: bool
:param is_announcement:
:type bypass_email: bool
:param bypass_email:
:type anonymous: bool
:param anonymous:
:rtype: dict
:returns: Dictionary with information about the created post. | [
"Create",
"a",
"post"
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/network.py#L109-L145 | train |
hfaran/piazza-api | piazza_api/network.py | Network.create_followup | def create_followup(self, post, content, anonymous=False):
"""Create a follow-up on a post `post`.
It seems like if the post has `<p>` tags, then it's treated as HTML,
but is treated as text otherwise. You'll want to provide `content`
accordingly.
:type post: dict|str|int
:param post: Either the post dict returned by another API method, or
the `cid` field of that post.
:type content: str
:param content: The content of the followup.
:type anonymous: bool
:param anonymous: Whether or not to post anonymously.
:rtype: dict
:returns: Dictionary with information about the created follow-up.
"""
try:
cid = post["id"]
except KeyError:
cid = post
params = {
"cid": cid,
"type": "followup",
# For followups, the content is actually put into the subject.
"subject": content,
"content": "",
"anonymous": "yes" if anonymous else "no",
}
return self._rpc.content_create(params) | python | def create_followup(self, post, content, anonymous=False):
"""Create a follow-up on a post `post`.
It seems like if the post has `<p>` tags, then it's treated as HTML,
but is treated as text otherwise. You'll want to provide `content`
accordingly.
:type post: dict|str|int
:param post: Either the post dict returned by another API method, or
the `cid` field of that post.
:type content: str
:param content: The content of the followup.
:type anonymous: bool
:param anonymous: Whether or not to post anonymously.
:rtype: dict
:returns: Dictionary with information about the created follow-up.
"""
try:
cid = post["id"]
except KeyError:
cid = post
params = {
"cid": cid,
"type": "followup",
# For followups, the content is actually put into the subject.
"subject": content,
"content": "",
"anonymous": "yes" if anonymous else "no",
}
return self._rpc.content_create(params) | [
"def",
"create_followup",
"(",
"self",
",",
"post",
",",
"content",
",",
"anonymous",
"=",
"False",
")",
":",
"try",
":",
"cid",
"=",
"post",
"[",
"\"id\"",
"]",
"except",
"KeyError",
":",
"cid",
"=",
"post",
"params",
"=",
"{",
"\"cid\"",
":",
"cid",
",",
"\"type\"",
":",
"\"followup\"",
",",
"# For followups, the content is actually put into the subject.",
"\"subject\"",
":",
"content",
",",
"\"content\"",
":",
"\"\"",
",",
"\"anonymous\"",
":",
"\"yes\"",
"if",
"anonymous",
"else",
"\"no\"",
",",
"}",
"return",
"self",
".",
"_rpc",
".",
"content_create",
"(",
"params",
")"
]
| Create a follow-up on a post `post`.
It seems like if the post has `<p>` tags, then it's treated as HTML,
but is treated as text otherwise. You'll want to provide `content`
accordingly.
:type post: dict|str|int
:param post: Either the post dict returned by another API method, or
the `cid` field of that post.
:type content: str
:param content: The content of the followup.
:type anonymous: bool
:param anonymous: Whether or not to post anonymously.
:rtype: dict
:returns: Dictionary with information about the created follow-up. | [
"Create",
"a",
"follow",
"-",
"up",
"on",
"a",
"post",
"post",
"."
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/network.py#L147-L179 | train |
hfaran/piazza-api | piazza_api/network.py | Network.create_instructor_answer | def create_instructor_answer(self, post, content, revision, anonymous=False):
"""Create an instructor's answer to a post `post`.
It seems like if the post has `<p>` tags, then it's treated as HTML,
but is treated as text otherwise. You'll want to provide `content`
accordingly.
:type post: dict|str|int
:param post: Either the post dict returned by another API method, or
the `cid` field of that post.
:type content: str
:param content: The content of the answer.
:type revision: int
:param revision: The number of revisions the answer has gone through.
The first responder should out 0, the first editor 1, etc.
:type anonymous: bool
:param anonymous: Whether or not to post anonymously.
:rtype: dict
:returns: Dictionary with information about the created answer.
"""
try:
cid = post["id"]
except KeyError:
cid = post
params = {
"cid": cid,
"type": "i_answer",
"content": content,
"revision": revision,
"anonymous": "yes" if anonymous else "no",
}
return self._rpc.content_instructor_answer(params) | python | def create_instructor_answer(self, post, content, revision, anonymous=False):
"""Create an instructor's answer to a post `post`.
It seems like if the post has `<p>` tags, then it's treated as HTML,
but is treated as text otherwise. You'll want to provide `content`
accordingly.
:type post: dict|str|int
:param post: Either the post dict returned by another API method, or
the `cid` field of that post.
:type content: str
:param content: The content of the answer.
:type revision: int
:param revision: The number of revisions the answer has gone through.
The first responder should out 0, the first editor 1, etc.
:type anonymous: bool
:param anonymous: Whether or not to post anonymously.
:rtype: dict
:returns: Dictionary with information about the created answer.
"""
try:
cid = post["id"]
except KeyError:
cid = post
params = {
"cid": cid,
"type": "i_answer",
"content": content,
"revision": revision,
"anonymous": "yes" if anonymous else "no",
}
return self._rpc.content_instructor_answer(params) | [
"def",
"create_instructor_answer",
"(",
"self",
",",
"post",
",",
"content",
",",
"revision",
",",
"anonymous",
"=",
"False",
")",
":",
"try",
":",
"cid",
"=",
"post",
"[",
"\"id\"",
"]",
"except",
"KeyError",
":",
"cid",
"=",
"post",
"params",
"=",
"{",
"\"cid\"",
":",
"cid",
",",
"\"type\"",
":",
"\"i_answer\"",
",",
"\"content\"",
":",
"content",
",",
"\"revision\"",
":",
"revision",
",",
"\"anonymous\"",
":",
"\"yes\"",
"if",
"anonymous",
"else",
"\"no\"",
",",
"}",
"return",
"self",
".",
"_rpc",
".",
"content_instructor_answer",
"(",
"params",
")"
]
| Create an instructor's answer to a post `post`.
It seems like if the post has `<p>` tags, then it's treated as HTML,
but is treated as text otherwise. You'll want to provide `content`
accordingly.
:type post: dict|str|int
:param post: Either the post dict returned by another API method, or
the `cid` field of that post.
:type content: str
:param content: The content of the answer.
:type revision: int
:param revision: The number of revisions the answer has gone through.
The first responder should out 0, the first editor 1, etc.
:type anonymous: bool
:param anonymous: Whether or not to post anonymously.
:rtype: dict
:returns: Dictionary with information about the created answer. | [
"Create",
"an",
"instructor",
"s",
"answer",
"to",
"a",
"post",
"post",
"."
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/network.py#L181-L213 | train |
hfaran/piazza-api | piazza_api/network.py | Network.mark_as_duplicate | def mark_as_duplicate(self, duplicated_cid, master_cid, msg=''):
"""Mark the post at ``duplicated_cid`` as a duplicate of ``master_cid``
:type duplicated_cid: int
:param duplicated_cid: The numeric id of the duplicated post
:type master_cid: int
:param master_cid: The numeric id of an older post. This will be the
post that gets kept and ``duplicated_cid`` post will be concatinated
as a follow up to ``master_cid`` post.
:type msg: string
:param msg: the optional message (or reason for marking as duplicate)
:returns: True if it is successful. False otherwise
"""
content_id_from = self.get_post(duplicated_cid)["id"]
content_id_to = self.get_post(master_cid)["id"]
params = {
"cid_dupe": content_id_from,
"cid_to": content_id_to,
"msg": msg
}
return self._rpc.content_mark_duplicate(params) | python | def mark_as_duplicate(self, duplicated_cid, master_cid, msg=''):
"""Mark the post at ``duplicated_cid`` as a duplicate of ``master_cid``
:type duplicated_cid: int
:param duplicated_cid: The numeric id of the duplicated post
:type master_cid: int
:param master_cid: The numeric id of an older post. This will be the
post that gets kept and ``duplicated_cid`` post will be concatinated
as a follow up to ``master_cid`` post.
:type msg: string
:param msg: the optional message (or reason for marking as duplicate)
:returns: True if it is successful. False otherwise
"""
content_id_from = self.get_post(duplicated_cid)["id"]
content_id_to = self.get_post(master_cid)["id"]
params = {
"cid_dupe": content_id_from,
"cid_to": content_id_to,
"msg": msg
}
return self._rpc.content_mark_duplicate(params) | [
"def",
"mark_as_duplicate",
"(",
"self",
",",
"duplicated_cid",
",",
"master_cid",
",",
"msg",
"=",
"''",
")",
":",
"content_id_from",
"=",
"self",
".",
"get_post",
"(",
"duplicated_cid",
")",
"[",
"\"id\"",
"]",
"content_id_to",
"=",
"self",
".",
"get_post",
"(",
"master_cid",
")",
"[",
"\"id\"",
"]",
"params",
"=",
"{",
"\"cid_dupe\"",
":",
"content_id_from",
",",
"\"cid_to\"",
":",
"content_id_to",
",",
"\"msg\"",
":",
"msg",
"}",
"return",
"self",
".",
"_rpc",
".",
"content_mark_duplicate",
"(",
"params",
")"
]
| Mark the post at ``duplicated_cid`` as a duplicate of ``master_cid``
:type duplicated_cid: int
:param duplicated_cid: The numeric id of the duplicated post
:type master_cid: int
:param master_cid: The numeric id of an older post. This will be the
post that gets kept and ``duplicated_cid`` post will be concatinated
as a follow up to ``master_cid`` post.
:type msg: string
:param msg: the optional message (or reason for marking as duplicate)
:returns: True if it is successful. False otherwise | [
"Mark",
"the",
"post",
"at",
"duplicated_cid",
"as",
"a",
"duplicate",
"of",
"master_cid"
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/network.py#L248-L268 | train |
hfaran/piazza-api | piazza_api/network.py | Network.resolve_post | def resolve_post(self, post):
"""Mark post as resolved
:type post: dict|str|int
:param post: Either the post dict returned by another API method, or
the `cid` field of that post.
:returns: True if it is successful. False otherwise
"""
try:
cid = post["id"]
except KeyError:
cid = post
params = {
"cid": cid,
"resolved": "true"
}
return self._rpc.content_mark_resolved(params) | python | def resolve_post(self, post):
"""Mark post as resolved
:type post: dict|str|int
:param post: Either the post dict returned by another API method, or
the `cid` field of that post.
:returns: True if it is successful. False otherwise
"""
try:
cid = post["id"]
except KeyError:
cid = post
params = {
"cid": cid,
"resolved": "true"
}
return self._rpc.content_mark_resolved(params) | [
"def",
"resolve_post",
"(",
"self",
",",
"post",
")",
":",
"try",
":",
"cid",
"=",
"post",
"[",
"\"id\"",
"]",
"except",
"KeyError",
":",
"cid",
"=",
"post",
"params",
"=",
"{",
"\"cid\"",
":",
"cid",
",",
"\"resolved\"",
":",
"\"true\"",
"}",
"return",
"self",
".",
"_rpc",
".",
"content_mark_resolved",
"(",
"params",
")"
]
| Mark post as resolved
:type post: dict|str|int
:param post: Either the post dict returned by another API method, or
the `cid` field of that post.
:returns: True if it is successful. False otherwise | [
"Mark",
"post",
"as",
"resolved"
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/network.py#L270-L288 | train |
hfaran/piazza-api | piazza_api/network.py | Network.delete_post | def delete_post(self, post):
""" Deletes post by cid
:type post: dict|str|int
:param post: Either the post dict returned by another API method, the post ID, or
the `cid` field of that post.
:rtype: dict
:returns: Dictionary with information about the post cid.
"""
try:
cid = post['id']
except KeyError:
cid = post
except TypeError:
post = self.get_post(post)
cid = post['id']
params = {
"cid": cid,
}
return self._rpc.content_delete(params) | python | def delete_post(self, post):
""" Deletes post by cid
:type post: dict|str|int
:param post: Either the post dict returned by another API method, the post ID, or
the `cid` field of that post.
:rtype: dict
:returns: Dictionary with information about the post cid.
"""
try:
cid = post['id']
except KeyError:
cid = post
except TypeError:
post = self.get_post(post)
cid = post['id']
params = {
"cid": cid,
}
return self._rpc.content_delete(params) | [
"def",
"delete_post",
"(",
"self",
",",
"post",
")",
":",
"try",
":",
"cid",
"=",
"post",
"[",
"'id'",
"]",
"except",
"KeyError",
":",
"cid",
"=",
"post",
"except",
"TypeError",
":",
"post",
"=",
"self",
".",
"get_post",
"(",
"post",
")",
"cid",
"=",
"post",
"[",
"'id'",
"]",
"params",
"=",
"{",
"\"cid\"",
":",
"cid",
",",
"}",
"return",
"self",
".",
"_rpc",
".",
"content_delete",
"(",
"params",
")"
]
| Deletes post by cid
:type post: dict|str|int
:param post: Either the post dict returned by another API method, the post ID, or
the `cid` field of that post.
:rtype: dict
:returns: Dictionary with information about the post cid. | [
"Deletes",
"post",
"by",
"cid"
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/network.py#L378-L400 | train |
hfaran/piazza-api | piazza_api/network.py | Network.get_feed | def get_feed(self, limit=100, offset=0):
"""Get your feed for this network
Pagination for this can be achieved by using the ``limit`` and
``offset`` params
:type limit: int
:param limit: Number of posts from feed to get, starting from ``offset``
:type offset: int
:param offset: Offset starting from bottom of feed
:rtype: dict
:returns: Feed metadata, including list of posts in feed format; this
means they are not the full posts but only in partial form as
necessary to display them on the Piazza feed. For example, the
returned dicts only have content snippets of posts rather
than the full text.
"""
return self._rpc.get_my_feed(limit=limit, offset=offset) | python | def get_feed(self, limit=100, offset=0):
"""Get your feed for this network
Pagination for this can be achieved by using the ``limit`` and
``offset`` params
:type limit: int
:param limit: Number of posts from feed to get, starting from ``offset``
:type offset: int
:param offset: Offset starting from bottom of feed
:rtype: dict
:returns: Feed metadata, including list of posts in feed format; this
means they are not the full posts but only in partial form as
necessary to display them on the Piazza feed. For example, the
returned dicts only have content snippets of posts rather
than the full text.
"""
return self._rpc.get_my_feed(limit=limit, offset=offset) | [
"def",
"get_feed",
"(",
"self",
",",
"limit",
"=",
"100",
",",
"offset",
"=",
"0",
")",
":",
"return",
"self",
".",
"_rpc",
".",
"get_my_feed",
"(",
"limit",
"=",
"limit",
",",
"offset",
"=",
"offset",
")"
]
| Get your feed for this network
Pagination for this can be achieved by using the ``limit`` and
``offset`` params
:type limit: int
:param limit: Number of posts from feed to get, starting from ``offset``
:type offset: int
:param offset: Offset starting from bottom of feed
:rtype: dict
:returns: Feed metadata, including list of posts in feed format; this
means they are not the full posts but only in partial form as
necessary to display them on the Piazza feed. For example, the
returned dicts only have content snippets of posts rather
than the full text. | [
"Get",
"your",
"feed",
"for",
"this",
"network"
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/network.py#L406-L423 | train |
hfaran/piazza-api | piazza_api/network.py | Network.get_filtered_feed | def get_filtered_feed(self, feed_filter):
"""Get your feed containing only posts filtered by ``feed_filter``
:type feed_filter: FeedFilter
:param feed_filter: Must be an instance of either: UnreadFilter,
FollowingFilter, or FolderFilter
:rtype: dict
"""
assert isinstance(feed_filter, (UnreadFilter, FollowingFilter,
FolderFilter))
return self._rpc.filter_feed(**feed_filter.to_kwargs()) | python | def get_filtered_feed(self, feed_filter):
"""Get your feed containing only posts filtered by ``feed_filter``
:type feed_filter: FeedFilter
:param feed_filter: Must be an instance of either: UnreadFilter,
FollowingFilter, or FolderFilter
:rtype: dict
"""
assert isinstance(feed_filter, (UnreadFilter, FollowingFilter,
FolderFilter))
return self._rpc.filter_feed(**feed_filter.to_kwargs()) | [
"def",
"get_filtered_feed",
"(",
"self",
",",
"feed_filter",
")",
":",
"assert",
"isinstance",
"(",
"feed_filter",
",",
"(",
"UnreadFilter",
",",
"FollowingFilter",
",",
"FolderFilter",
")",
")",
"return",
"self",
".",
"_rpc",
".",
"filter_feed",
"(",
"*",
"*",
"feed_filter",
".",
"to_kwargs",
"(",
")",
")"
]
| Get your feed containing only posts filtered by ``feed_filter``
:type feed_filter: FeedFilter
:param feed_filter: Must be an instance of either: UnreadFilter,
FollowingFilter, or FolderFilter
:rtype: dict | [
"Get",
"your",
"feed",
"containing",
"only",
"posts",
"filtered",
"by",
"feed_filter"
]
| 26201d06e26bada9a838f6765c1bccedad05bd39 | https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/network.py#L425-L435 | train |
lucaskjaero/PyCasia | pycasia/CASIA.py | CASIA.get_dataset | def get_dataset(self, dataset):
"""
Checks to see if the dataset is present. If not, it downloads and unzips it.
"""
# If the dataset is present, no need to download anything.
success = True
dataset_path = self.base_dataset_path + dataset
if not isdir(dataset_path):
# Try 5 times to download. The download page is unreliable, so we need a few tries.
was_error = False
for iteration in range(5):
# Guard against trying again if successful
if iteration == 0 or was_error is True:
zip_path = dataset_path + ".zip"
# Download zip files if they're not there
if not isfile(zip_path):
try:
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset) as pbar:
urlretrieve(self.datasets[dataset]["url"], zip_path, pbar.hook)
except Exception as ex:
print("Error downloading %s: %s" % (dataset, ex))
was_error = True
# Unzip the data files
if not isdir(dataset_path):
try:
with zipfile.ZipFile(zip_path) as zip_archive:
zip_archive.extractall(path=dataset_path)
zip_archive.close()
except Exception as ex:
print("Error unzipping %s: %s" % (zip_path, ex))
# Usually the error is caused by a bad zip file.
# Delete it so the program will try to download it again.
try:
remove(zip_path)
except FileNotFoundError:
pass
was_error = True
if was_error:
print("\nThis recognizer is trained by the CASIA handwriting database.")
print("If the download doesn't work, you can get the files at %s" % self.datasets[dataset]["url"])
print("If you have download problems, "
"wget may be effective at downloading because of download resuming.")
success = False
return success | python | def get_dataset(self, dataset):
"""
Checks to see if the dataset is present. If not, it downloads and unzips it.
"""
# If the dataset is present, no need to download anything.
success = True
dataset_path = self.base_dataset_path + dataset
if not isdir(dataset_path):
# Try 5 times to download. The download page is unreliable, so we need a few tries.
was_error = False
for iteration in range(5):
# Guard against trying again if successful
if iteration == 0 or was_error is True:
zip_path = dataset_path + ".zip"
# Download zip files if they're not there
if not isfile(zip_path):
try:
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset) as pbar:
urlretrieve(self.datasets[dataset]["url"], zip_path, pbar.hook)
except Exception as ex:
print("Error downloading %s: %s" % (dataset, ex))
was_error = True
# Unzip the data files
if not isdir(dataset_path):
try:
with zipfile.ZipFile(zip_path) as zip_archive:
zip_archive.extractall(path=dataset_path)
zip_archive.close()
except Exception as ex:
print("Error unzipping %s: %s" % (zip_path, ex))
# Usually the error is caused by a bad zip file.
# Delete it so the program will try to download it again.
try:
remove(zip_path)
except FileNotFoundError:
pass
was_error = True
if was_error:
print("\nThis recognizer is trained by the CASIA handwriting database.")
print("If the download doesn't work, you can get the files at %s" % self.datasets[dataset]["url"])
print("If you have download problems, "
"wget may be effective at downloading because of download resuming.")
success = False
return success | [
"def",
"get_dataset",
"(",
"self",
",",
"dataset",
")",
":",
"# If the dataset is present, no need to download anything.",
"success",
"=",
"True",
"dataset_path",
"=",
"self",
".",
"base_dataset_path",
"+",
"dataset",
"if",
"not",
"isdir",
"(",
"dataset_path",
")",
":",
"# Try 5 times to download. The download page is unreliable, so we need a few tries.",
"was_error",
"=",
"False",
"for",
"iteration",
"in",
"range",
"(",
"5",
")",
":",
"# Guard against trying again if successful",
"if",
"iteration",
"==",
"0",
"or",
"was_error",
"is",
"True",
":",
"zip_path",
"=",
"dataset_path",
"+",
"\".zip\"",
"# Download zip files if they're not there",
"if",
"not",
"isfile",
"(",
"zip_path",
")",
":",
"try",
":",
"with",
"DLProgress",
"(",
"unit",
"=",
"'B'",
",",
"unit_scale",
"=",
"True",
",",
"miniters",
"=",
"1",
",",
"desc",
"=",
"dataset",
")",
"as",
"pbar",
":",
"urlretrieve",
"(",
"self",
".",
"datasets",
"[",
"dataset",
"]",
"[",
"\"url\"",
"]",
",",
"zip_path",
",",
"pbar",
".",
"hook",
")",
"except",
"Exception",
"as",
"ex",
":",
"print",
"(",
"\"Error downloading %s: %s\"",
"%",
"(",
"dataset",
",",
"ex",
")",
")",
"was_error",
"=",
"True",
"# Unzip the data files",
"if",
"not",
"isdir",
"(",
"dataset_path",
")",
":",
"try",
":",
"with",
"zipfile",
".",
"ZipFile",
"(",
"zip_path",
")",
"as",
"zip_archive",
":",
"zip_archive",
".",
"extractall",
"(",
"path",
"=",
"dataset_path",
")",
"zip_archive",
".",
"close",
"(",
")",
"except",
"Exception",
"as",
"ex",
":",
"print",
"(",
"\"Error unzipping %s: %s\"",
"%",
"(",
"zip_path",
",",
"ex",
")",
")",
"# Usually the error is caused by a bad zip file.",
"# Delete it so the program will try to download it again.",
"try",
":",
"remove",
"(",
"zip_path",
")",
"except",
"FileNotFoundError",
":",
"pass",
"was_error",
"=",
"True",
"if",
"was_error",
":",
"print",
"(",
"\"\\nThis recognizer is trained by the CASIA handwriting database.\"",
")",
"print",
"(",
"\"If the download doesn't work, you can get the files at %s\"",
"%",
"self",
".",
"datasets",
"[",
"dataset",
"]",
"[",
"\"url\"",
"]",
")",
"print",
"(",
"\"If you have download problems, \"",
"\"wget may be effective at downloading because of download resuming.\"",
")",
"success",
"=",
"False",
"return",
"success"
]
| Checks to see if the dataset is present. If not, it downloads and unzips it. | [
"Checks",
"to",
"see",
"if",
"the",
"dataset",
"is",
"present",
".",
"If",
"not",
"it",
"downloads",
"and",
"unzips",
"it",
"."
]
| 511ddb7809d788fc2c7bc7c1e8600db60bac8152 | https://github.com/lucaskjaero/PyCasia/blob/511ddb7809d788fc2c7bc7c1e8600db60bac8152/pycasia/CASIA.py#L72-L121 | train |
ashleysommer/sanicpluginsframework | spf/plugin.py | SanicPlugin.first_plugin_context | def first_plugin_context(self):
"""Returns the context is associated with the first app this plugin was
registered on"""
# Note, because registrations are stored in a set, its not _really_
# the first one, but whichever one it sees first in the set.
first_spf_reg = next(iter(self.registrations))
return self.get_context_from_spf(first_spf_reg) | python | def first_plugin_context(self):
"""Returns the context is associated with the first app this plugin was
registered on"""
# Note, because registrations are stored in a set, its not _really_
# the first one, but whichever one it sees first in the set.
first_spf_reg = next(iter(self.registrations))
return self.get_context_from_spf(first_spf_reg) | [
"def",
"first_plugin_context",
"(",
"self",
")",
":",
"# Note, because registrations are stored in a set, its not _really_",
"# the first one, but whichever one it sees first in the set.",
"first_spf_reg",
"=",
"next",
"(",
"iter",
"(",
"self",
".",
"registrations",
")",
")",
"return",
"self",
".",
"get_context_from_spf",
"(",
"first_spf_reg",
")"
]
| Returns the context is associated with the first app this plugin was
registered on | [
"Returns",
"the",
"context",
"is",
"associated",
"with",
"the",
"first",
"app",
"this",
"plugin",
"was",
"registered",
"on"
]
| 2cb1656d9334f04c30c738074784b0450c1b893e | https://github.com/ashleysommer/sanicpluginsframework/blob/2cb1656d9334f04c30c738074784b0450c1b893e/spf/plugin.py#L191-L197 | train |
ashleysommer/sanicpluginsframework | spf/plugin.py | SanicPlugin.route_wrapper | async def route_wrapper(self, route, request, context, request_args,
request_kw, *decorator_args, with_context=None,
**decorator_kw):
"""This is the function that is called when a route is decorated with
your plugin decorator. Context will normally be None, but the user
can pass use_context=True so the route will get the plugin
context
"""
# by default, do nothing, just run the wrapped function
if with_context:
resp = route(request, context, *request_args, **request_kw)
else:
resp = route(request, *request_args, **request_kw)
if isawaitable(resp):
resp = await resp
return resp | python | async def route_wrapper(self, route, request, context, request_args,
request_kw, *decorator_args, with_context=None,
**decorator_kw):
"""This is the function that is called when a route is decorated with
your plugin decorator. Context will normally be None, but the user
can pass use_context=True so the route will get the plugin
context
"""
# by default, do nothing, just run the wrapped function
if with_context:
resp = route(request, context, *request_args, **request_kw)
else:
resp = route(request, *request_args, **request_kw)
if isawaitable(resp):
resp = await resp
return resp | [
"async",
"def",
"route_wrapper",
"(",
"self",
",",
"route",
",",
"request",
",",
"context",
",",
"request_args",
",",
"request_kw",
",",
"*",
"decorator_args",
",",
"with_context",
"=",
"None",
",",
"*",
"*",
"decorator_kw",
")",
":",
"# by default, do nothing, just run the wrapped function",
"if",
"with_context",
":",
"resp",
"=",
"route",
"(",
"request",
",",
"context",
",",
"*",
"request_args",
",",
"*",
"*",
"request_kw",
")",
"else",
":",
"resp",
"=",
"route",
"(",
"request",
",",
"*",
"request_args",
",",
"*",
"*",
"request_kw",
")",
"if",
"isawaitable",
"(",
"resp",
")",
":",
"resp",
"=",
"await",
"resp",
"return",
"resp"
]
| This is the function that is called when a route is decorated with
your plugin decorator. Context will normally be None, but the user
can pass use_context=True so the route will get the plugin
context | [
"This",
"is",
"the",
"function",
"that",
"is",
"called",
"when",
"a",
"route",
"is",
"decorated",
"with",
"your",
"plugin",
"decorator",
".",
"Context",
"will",
"normally",
"be",
"None",
"but",
"the",
"user",
"can",
"pass",
"use_context",
"=",
"True",
"so",
"the",
"route",
"will",
"get",
"the",
"plugin",
"context"
]
| 2cb1656d9334f04c30c738074784b0450c1b893e | https://github.com/ashleysommer/sanicpluginsframework/blob/2cb1656d9334f04c30c738074784b0450c1b893e/spf/plugin.py#L370-L385 | train |
ionelmc/python-manhole | src/manhole/__init__.py | check_credentials | def check_credentials(client):
"""
Checks credentials for given socket.
"""
pid, uid, gid = get_peercred(client)
euid = os.geteuid()
client_name = "PID:%s UID:%s GID:%s" % (pid, uid, gid)
if uid not in (0, euid):
raise SuspiciousClient("Can't accept client with %s. It doesn't match the current EUID:%s or ROOT." % (
client_name, euid
))
_LOG("Accepted connection on fd:%s from %s" % (client.fileno(), client_name))
return pid, uid, gid | python | def check_credentials(client):
"""
Checks credentials for given socket.
"""
pid, uid, gid = get_peercred(client)
euid = os.geteuid()
client_name = "PID:%s UID:%s GID:%s" % (pid, uid, gid)
if uid not in (0, euid):
raise SuspiciousClient("Can't accept client with %s. It doesn't match the current EUID:%s or ROOT." % (
client_name, euid
))
_LOG("Accepted connection on fd:%s from %s" % (client.fileno(), client_name))
return pid, uid, gid | [
"def",
"check_credentials",
"(",
"client",
")",
":",
"pid",
",",
"uid",
",",
"gid",
"=",
"get_peercred",
"(",
"client",
")",
"euid",
"=",
"os",
".",
"geteuid",
"(",
")",
"client_name",
"=",
"\"PID:%s UID:%s GID:%s\"",
"%",
"(",
"pid",
",",
"uid",
",",
"gid",
")",
"if",
"uid",
"not",
"in",
"(",
"0",
",",
"euid",
")",
":",
"raise",
"SuspiciousClient",
"(",
"\"Can't accept client with %s. It doesn't match the current EUID:%s or ROOT.\"",
"%",
"(",
"client_name",
",",
"euid",
")",
")",
"_LOG",
"(",
"\"Accepted connection on fd:%s from %s\"",
"%",
"(",
"client",
".",
"fileno",
"(",
")",
",",
"client_name",
")",
")",
"return",
"pid",
",",
"uid",
",",
"gid"
]
| Checks credentials for given socket. | [
"Checks",
"credentials",
"for",
"given",
"socket",
"."
]
| 6a519a1f25142b047e814c6d00f4ef404856a15d | https://github.com/ionelmc/python-manhole/blob/6a519a1f25142b047e814c6d00f4ef404856a15d/src/manhole/__init__.py#L242-L256 | train |
ionelmc/python-manhole | src/manhole/__init__.py | handle_connection_exec | def handle_connection_exec(client):
"""
Alternate connection handler. No output redirection.
"""
class ExitExecLoop(Exception):
pass
def exit():
raise ExitExecLoop()
client.settimeout(None)
fh = os.fdopen(client.detach() if hasattr(client, 'detach') else client.fileno())
with closing(client):
with closing(fh):
try:
payload = fh.readline()
while payload:
_LOG("Running: %r." % payload)
eval(compile(payload, '<manhole>', 'exec'), {'exit': exit}, _MANHOLE.locals)
payload = fh.readline()
except ExitExecLoop:
_LOG("Exiting exec loop.") | python | def handle_connection_exec(client):
"""
Alternate connection handler. No output redirection.
"""
class ExitExecLoop(Exception):
pass
def exit():
raise ExitExecLoop()
client.settimeout(None)
fh = os.fdopen(client.detach() if hasattr(client, 'detach') else client.fileno())
with closing(client):
with closing(fh):
try:
payload = fh.readline()
while payload:
_LOG("Running: %r." % payload)
eval(compile(payload, '<manhole>', 'exec'), {'exit': exit}, _MANHOLE.locals)
payload = fh.readline()
except ExitExecLoop:
_LOG("Exiting exec loop.") | [
"def",
"handle_connection_exec",
"(",
"client",
")",
":",
"class",
"ExitExecLoop",
"(",
"Exception",
")",
":",
"pass",
"def",
"exit",
"(",
")",
":",
"raise",
"ExitExecLoop",
"(",
")",
"client",
".",
"settimeout",
"(",
"None",
")",
"fh",
"=",
"os",
".",
"fdopen",
"(",
"client",
".",
"detach",
"(",
")",
"if",
"hasattr",
"(",
"client",
",",
"'detach'",
")",
"else",
"client",
".",
"fileno",
"(",
")",
")",
"with",
"closing",
"(",
"client",
")",
":",
"with",
"closing",
"(",
"fh",
")",
":",
"try",
":",
"payload",
"=",
"fh",
".",
"readline",
"(",
")",
"while",
"payload",
":",
"_LOG",
"(",
"\"Running: %r.\"",
"%",
"payload",
")",
"eval",
"(",
"compile",
"(",
"payload",
",",
"'<manhole>'",
",",
"'exec'",
")",
",",
"{",
"'exit'",
":",
"exit",
"}",
",",
"_MANHOLE",
".",
"locals",
")",
"payload",
"=",
"fh",
".",
"readline",
"(",
")",
"except",
"ExitExecLoop",
":",
"_LOG",
"(",
"\"Exiting exec loop.\"",
")"
]
| Alternate connection handler. No output redirection. | [
"Alternate",
"connection",
"handler",
".",
"No",
"output",
"redirection",
"."
]
| 6a519a1f25142b047e814c6d00f4ef404856a15d | https://github.com/ionelmc/python-manhole/blob/6a519a1f25142b047e814c6d00f4ef404856a15d/src/manhole/__init__.py#L259-L281 | train |
ionelmc/python-manhole | src/manhole/__init__.py | handle_connection_repl | def handle_connection_repl(client):
"""
Handles connection.
"""
client.settimeout(None)
# # disable this till we have evidence that it's needed
# client.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 0)
# # Note: setting SO_RCVBUF on UDS has no effect, see: http://man7.org/linux/man-pages/man7/unix.7.html
backup = []
old_interval = getinterval()
patches = [('r', ('stdin', '__stdin__')), ('w', ('stdout', '__stdout__'))]
if _MANHOLE.redirect_stderr:
patches.append(('w', ('stderr', '__stderr__')))
try:
client_fd = client.fileno()
for mode, names in patches:
for name in names:
backup.append((name, getattr(sys, name)))
setattr(sys, name, _ORIGINAL_FDOPEN(client_fd, mode, 1 if PY3 else 0))
try:
handle_repl(_MANHOLE.locals)
except Exception as exc:
_LOG("REPL failed with %r." % exc)
_LOG("DONE.")
finally:
try:
# Change the switch/check interval to something ridiculous. We don't want to have other thread try
# to write to the redirected sys.__std*/sys.std* - it would fail horribly.
setinterval(2147483647)
try:
client.close() # close before it's too late. it may already be dead
except IOError:
pass
junk = [] # keep the old file objects alive for a bit
for name, fh in backup:
junk.append(getattr(sys, name))
setattr(sys, name, fh)
del backup
for fh in junk:
try:
if hasattr(fh, 'detach'):
fh.detach()
else:
fh.close()
except IOError:
pass
del fh
del junk
finally:
setinterval(old_interval)
_LOG("Cleaned up.") | python | def handle_connection_repl(client):
"""
Handles connection.
"""
client.settimeout(None)
# # disable this till we have evidence that it's needed
# client.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 0)
# # Note: setting SO_RCVBUF on UDS has no effect, see: http://man7.org/linux/man-pages/man7/unix.7.html
backup = []
old_interval = getinterval()
patches = [('r', ('stdin', '__stdin__')), ('w', ('stdout', '__stdout__'))]
if _MANHOLE.redirect_stderr:
patches.append(('w', ('stderr', '__stderr__')))
try:
client_fd = client.fileno()
for mode, names in patches:
for name in names:
backup.append((name, getattr(sys, name)))
setattr(sys, name, _ORIGINAL_FDOPEN(client_fd, mode, 1 if PY3 else 0))
try:
handle_repl(_MANHOLE.locals)
except Exception as exc:
_LOG("REPL failed with %r." % exc)
_LOG("DONE.")
finally:
try:
# Change the switch/check interval to something ridiculous. We don't want to have other thread try
# to write to the redirected sys.__std*/sys.std* - it would fail horribly.
setinterval(2147483647)
try:
client.close() # close before it's too late. it may already be dead
except IOError:
pass
junk = [] # keep the old file objects alive for a bit
for name, fh in backup:
junk.append(getattr(sys, name))
setattr(sys, name, fh)
del backup
for fh in junk:
try:
if hasattr(fh, 'detach'):
fh.detach()
else:
fh.close()
except IOError:
pass
del fh
del junk
finally:
setinterval(old_interval)
_LOG("Cleaned up.") | [
"def",
"handle_connection_repl",
"(",
"client",
")",
":",
"client",
".",
"settimeout",
"(",
"None",
")",
"# # disable this till we have evidence that it's needed",
"# client.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 0)",
"# # Note: setting SO_RCVBUF on UDS has no effect, see: http://man7.org/linux/man-pages/man7/unix.7.html",
"backup",
"=",
"[",
"]",
"old_interval",
"=",
"getinterval",
"(",
")",
"patches",
"=",
"[",
"(",
"'r'",
",",
"(",
"'stdin'",
",",
"'__stdin__'",
")",
")",
",",
"(",
"'w'",
",",
"(",
"'stdout'",
",",
"'__stdout__'",
")",
")",
"]",
"if",
"_MANHOLE",
".",
"redirect_stderr",
":",
"patches",
".",
"append",
"(",
"(",
"'w'",
",",
"(",
"'stderr'",
",",
"'__stderr__'",
")",
")",
")",
"try",
":",
"client_fd",
"=",
"client",
".",
"fileno",
"(",
")",
"for",
"mode",
",",
"names",
"in",
"patches",
":",
"for",
"name",
"in",
"names",
":",
"backup",
".",
"append",
"(",
"(",
"name",
",",
"getattr",
"(",
"sys",
",",
"name",
")",
")",
")",
"setattr",
"(",
"sys",
",",
"name",
",",
"_ORIGINAL_FDOPEN",
"(",
"client_fd",
",",
"mode",
",",
"1",
"if",
"PY3",
"else",
"0",
")",
")",
"try",
":",
"handle_repl",
"(",
"_MANHOLE",
".",
"locals",
")",
"except",
"Exception",
"as",
"exc",
":",
"_LOG",
"(",
"\"REPL failed with %r.\"",
"%",
"exc",
")",
"_LOG",
"(",
"\"DONE.\"",
")",
"finally",
":",
"try",
":",
"# Change the switch/check interval to something ridiculous. We don't want to have other thread try",
"# to write to the redirected sys.__std*/sys.std* - it would fail horribly.",
"setinterval",
"(",
"2147483647",
")",
"try",
":",
"client",
".",
"close",
"(",
")",
"# close before it's too late. it may already be dead",
"except",
"IOError",
":",
"pass",
"junk",
"=",
"[",
"]",
"# keep the old file objects alive for a bit",
"for",
"name",
",",
"fh",
"in",
"backup",
":",
"junk",
".",
"append",
"(",
"getattr",
"(",
"sys",
",",
"name",
")",
")",
"setattr",
"(",
"sys",
",",
"name",
",",
"fh",
")",
"del",
"backup",
"for",
"fh",
"in",
"junk",
":",
"try",
":",
"if",
"hasattr",
"(",
"fh",
",",
"'detach'",
")",
":",
"fh",
".",
"detach",
"(",
")",
"else",
":",
"fh",
".",
"close",
"(",
")",
"except",
"IOError",
":",
"pass",
"del",
"fh",
"del",
"junk",
"finally",
":",
"setinterval",
"(",
"old_interval",
")",
"_LOG",
"(",
"\"Cleaned up.\"",
")"
]
| Handles connection. | [
"Handles",
"connection",
"."
]
| 6a519a1f25142b047e814c6d00f4ef404856a15d | https://github.com/ionelmc/python-manhole/blob/6a519a1f25142b047e814c6d00f4ef404856a15d/src/manhole/__init__.py#L284-L335 | train |
ionelmc/python-manhole | src/manhole/__init__.py | install | def install(verbose=True,
verbose_destination=sys.__stderr__.fileno() if hasattr(sys.__stderr__, 'fileno') else sys.__stderr__,
strict=True,
**kwargs):
"""
Installs the manhole.
Args:
verbose (bool): Set it to ``False`` to squelch the logging.
verbose_destination (file descriptor or handle): Destination for verbose messages. Default is unbuffered stderr
(stderr ``2`` file descriptor).
patch_fork (bool): Set it to ``False`` if you don't want your ``os.fork`` and ``os.forkpy`` monkeypatched
activate_on (int or signal name): set to ``"USR1"``, ``"USR2"`` or some other signal name, or a number if you
want the Manhole thread to start when this signal is sent. This is desireable in case you don't want the
thread active all the time.
oneshot_on (int or signal name): Set to ``"USR1"``, ``"USR2"`` or some other signal name, or a number if you
want the Manhole to listen for connection in the signal handler. This is desireable in case you don't want
threads at all.
thread (bool): Start the always-on ManholeThread. Default: ``True``. Automatically switched to ``False`` if
``oneshort_on`` or ``activate_on`` are used.
sigmask (list of ints or signal names): Will set the signal mask to the given list (using
``signalfd.sigprocmask``). No action is done if ``signalfd`` is not importable.
**NOTE**: This is done so that the Manhole thread doesn't *steal* any signals; Normally that is fine because
Python will force all the signal handling to be run in the main thread but signalfd doesn't.
socket_path (str): Use a specific path for the unix domain socket (instead of ``/tmp/manhole-<pid>``). This
disables ``patch_fork`` as children cannot reuse the same path.
reinstall_delay (float): Delay the unix domain socket creation *reinstall_delay* seconds. This
alleviates cleanup failures when using fork+exec patterns.
locals (dict): Names to add to manhole interactive shell locals.
daemon_connection (bool): The connection thread is daemonic (dies on app exit). Default: ``False``.
redirect_stderr (bool): Redirect output from stderr to manhole console. Default: ``True``.
connection_handler (function): Connection handler to use. Use ``"exec"`` for simple implementation without
output redirection or your own function. (warning: this is for advanced users). Default: ``"repl"``.
"""
# pylint: disable=W0603
global _MANHOLE
with _LOCK:
if _MANHOLE is None:
_MANHOLE = Manhole()
else:
if strict:
raise AlreadyInstalled("Manhole already installed!")
else:
_LOG.release()
_MANHOLE.release() # Threads might be started here
_LOG.configure(verbose, verbose_destination)
_MANHOLE.configure(**kwargs) # Threads might be started here
return _MANHOLE | python | def install(verbose=True,
verbose_destination=sys.__stderr__.fileno() if hasattr(sys.__stderr__, 'fileno') else sys.__stderr__,
strict=True,
**kwargs):
"""
Installs the manhole.
Args:
verbose (bool): Set it to ``False`` to squelch the logging.
verbose_destination (file descriptor or handle): Destination for verbose messages. Default is unbuffered stderr
(stderr ``2`` file descriptor).
patch_fork (bool): Set it to ``False`` if you don't want your ``os.fork`` and ``os.forkpy`` monkeypatched
activate_on (int or signal name): set to ``"USR1"``, ``"USR2"`` or some other signal name, or a number if you
want the Manhole thread to start when this signal is sent. This is desireable in case you don't want the
thread active all the time.
oneshot_on (int or signal name): Set to ``"USR1"``, ``"USR2"`` or some other signal name, or a number if you
want the Manhole to listen for connection in the signal handler. This is desireable in case you don't want
threads at all.
thread (bool): Start the always-on ManholeThread. Default: ``True``. Automatically switched to ``False`` if
``oneshort_on`` or ``activate_on`` are used.
sigmask (list of ints or signal names): Will set the signal mask to the given list (using
``signalfd.sigprocmask``). No action is done if ``signalfd`` is not importable.
**NOTE**: This is done so that the Manhole thread doesn't *steal* any signals; Normally that is fine because
Python will force all the signal handling to be run in the main thread but signalfd doesn't.
socket_path (str): Use a specific path for the unix domain socket (instead of ``/tmp/manhole-<pid>``). This
disables ``patch_fork`` as children cannot reuse the same path.
reinstall_delay (float): Delay the unix domain socket creation *reinstall_delay* seconds. This
alleviates cleanup failures when using fork+exec patterns.
locals (dict): Names to add to manhole interactive shell locals.
daemon_connection (bool): The connection thread is daemonic (dies on app exit). Default: ``False``.
redirect_stderr (bool): Redirect output from stderr to manhole console. Default: ``True``.
connection_handler (function): Connection handler to use. Use ``"exec"`` for simple implementation without
output redirection or your own function. (warning: this is for advanced users). Default: ``"repl"``.
"""
# pylint: disable=W0603
global _MANHOLE
with _LOCK:
if _MANHOLE is None:
_MANHOLE = Manhole()
else:
if strict:
raise AlreadyInstalled("Manhole already installed!")
else:
_LOG.release()
_MANHOLE.release() # Threads might be started here
_LOG.configure(verbose, verbose_destination)
_MANHOLE.configure(**kwargs) # Threads might be started here
return _MANHOLE | [
"def",
"install",
"(",
"verbose",
"=",
"True",
",",
"verbose_destination",
"=",
"sys",
".",
"__stderr__",
".",
"fileno",
"(",
")",
"if",
"hasattr",
"(",
"sys",
".",
"__stderr__",
",",
"'fileno'",
")",
"else",
"sys",
".",
"__stderr__",
",",
"strict",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=W0603",
"global",
"_MANHOLE",
"with",
"_LOCK",
":",
"if",
"_MANHOLE",
"is",
"None",
":",
"_MANHOLE",
"=",
"Manhole",
"(",
")",
"else",
":",
"if",
"strict",
":",
"raise",
"AlreadyInstalled",
"(",
"\"Manhole already installed!\"",
")",
"else",
":",
"_LOG",
".",
"release",
"(",
")",
"_MANHOLE",
".",
"release",
"(",
")",
"# Threads might be started here",
"_LOG",
".",
"configure",
"(",
"verbose",
",",
"verbose_destination",
")",
"_MANHOLE",
".",
"configure",
"(",
"*",
"*",
"kwargs",
")",
"# Threads might be started here",
"return",
"_MANHOLE"
]
| Installs the manhole.
Args:
verbose (bool): Set it to ``False`` to squelch the logging.
verbose_destination (file descriptor or handle): Destination for verbose messages. Default is unbuffered stderr
(stderr ``2`` file descriptor).
patch_fork (bool): Set it to ``False`` if you don't want your ``os.fork`` and ``os.forkpy`` monkeypatched
activate_on (int or signal name): set to ``"USR1"``, ``"USR2"`` or some other signal name, or a number if you
want the Manhole thread to start when this signal is sent. This is desireable in case you don't want the
thread active all the time.
oneshot_on (int or signal name): Set to ``"USR1"``, ``"USR2"`` or some other signal name, or a number if you
want the Manhole to listen for connection in the signal handler. This is desireable in case you don't want
threads at all.
thread (bool): Start the always-on ManholeThread. Default: ``True``. Automatically switched to ``False`` if
``oneshort_on`` or ``activate_on`` are used.
sigmask (list of ints or signal names): Will set the signal mask to the given list (using
``signalfd.sigprocmask``). No action is done if ``signalfd`` is not importable.
**NOTE**: This is done so that the Manhole thread doesn't *steal* any signals; Normally that is fine because
Python will force all the signal handling to be run in the main thread but signalfd doesn't.
socket_path (str): Use a specific path for the unix domain socket (instead of ``/tmp/manhole-<pid>``). This
disables ``patch_fork`` as children cannot reuse the same path.
reinstall_delay (float): Delay the unix domain socket creation *reinstall_delay* seconds. This
alleviates cleanup failures when using fork+exec patterns.
locals (dict): Names to add to manhole interactive shell locals.
daemon_connection (bool): The connection thread is daemonic (dies on app exit). Default: ``False``.
redirect_stderr (bool): Redirect output from stderr to manhole console. Default: ``True``.
connection_handler (function): Connection handler to use. Use ``"exec"`` for simple implementation without
output redirection or your own function. (warning: this is for advanced users). Default: ``"repl"``. | [
"Installs",
"the",
"manhole",
"."
]
| 6a519a1f25142b047e814c6d00f4ef404856a15d | https://github.com/ionelmc/python-manhole/blob/6a519a1f25142b047e814c6d00f4ef404856a15d/src/manhole/__init__.py#L569-L618 | train |
ionelmc/python-manhole | src/manhole/__init__.py | dump_stacktraces | def dump_stacktraces():
"""
Dumps thread ids and tracebacks to stdout.
"""
lines = []
for thread_id, stack in sys._current_frames().items(): # pylint: disable=W0212
lines.append("\n######### ProcessID=%s, ThreadID=%s #########" % (
os.getpid(), thread_id
))
for filename, lineno, name, line in traceback.extract_stack(stack):
lines.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
lines.append(" %s" % (line.strip()))
lines.append("#############################################\n\n")
print('\n'.join(lines), file=sys.stderr if _MANHOLE.redirect_stderr else sys.stdout) | python | def dump_stacktraces():
"""
Dumps thread ids and tracebacks to stdout.
"""
lines = []
for thread_id, stack in sys._current_frames().items(): # pylint: disable=W0212
lines.append("\n######### ProcessID=%s, ThreadID=%s #########" % (
os.getpid(), thread_id
))
for filename, lineno, name, line in traceback.extract_stack(stack):
lines.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
lines.append(" %s" % (line.strip()))
lines.append("#############################################\n\n")
print('\n'.join(lines), file=sys.stderr if _MANHOLE.redirect_stderr else sys.stdout) | [
"def",
"dump_stacktraces",
"(",
")",
":",
"lines",
"=",
"[",
"]",
"for",
"thread_id",
",",
"stack",
"in",
"sys",
".",
"_current_frames",
"(",
")",
".",
"items",
"(",
")",
":",
"# pylint: disable=W0212",
"lines",
".",
"append",
"(",
"\"\\n######### ProcessID=%s, ThreadID=%s #########\"",
"%",
"(",
"os",
".",
"getpid",
"(",
")",
",",
"thread_id",
")",
")",
"for",
"filename",
",",
"lineno",
",",
"name",
",",
"line",
"in",
"traceback",
".",
"extract_stack",
"(",
"stack",
")",
":",
"lines",
".",
"append",
"(",
"'File: \"%s\", line %d, in %s'",
"%",
"(",
"filename",
",",
"lineno",
",",
"name",
")",
")",
"if",
"line",
":",
"lines",
".",
"append",
"(",
"\" %s\"",
"%",
"(",
"line",
".",
"strip",
"(",
")",
")",
")",
"lines",
".",
"append",
"(",
"\"#############################################\\n\\n\"",
")",
"print",
"(",
"'\\n'",
".",
"join",
"(",
"lines",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
"if",
"_MANHOLE",
".",
"redirect_stderr",
"else",
"sys",
".",
"stdout",
")"
]
| Dumps thread ids and tracebacks to stdout. | [
"Dumps",
"thread",
"ids",
"and",
"tracebacks",
"to",
"stdout",
"."
]
| 6a519a1f25142b047e814c6d00f4ef404856a15d | https://github.com/ionelmc/python-manhole/blob/6a519a1f25142b047e814c6d00f4ef404856a15d/src/manhole/__init__.py#L621-L636 | train |
ionelmc/python-manhole | src/manhole/__init__.py | ManholeThread.clone | def clone(self, **kwargs):
"""
Make a fresh thread with the same options. This is usually used on dead threads.
"""
return ManholeThread(
self.get_socket, self.sigmask, self.start_timeout,
connection_handler=self.connection_handler,
daemon_connection=self.daemon_connection,
**kwargs
) | python | def clone(self, **kwargs):
"""
Make a fresh thread with the same options. This is usually used on dead threads.
"""
return ManholeThread(
self.get_socket, self.sigmask, self.start_timeout,
connection_handler=self.connection_handler,
daemon_connection=self.daemon_connection,
**kwargs
) | [
"def",
"clone",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"ManholeThread",
"(",
"self",
".",
"get_socket",
",",
"self",
".",
"sigmask",
",",
"self",
".",
"start_timeout",
",",
"connection_handler",
"=",
"self",
".",
"connection_handler",
",",
"daemon_connection",
"=",
"self",
".",
"daemon_connection",
",",
"*",
"*",
"kwargs",
")"
]
| Make a fresh thread with the same options. This is usually used on dead threads. | [
"Make",
"a",
"fresh",
"thread",
"with",
"the",
"same",
"options",
".",
"This",
"is",
"usually",
"used",
"on",
"dead",
"threads",
"."
]
| 6a519a1f25142b047e814c6d00f4ef404856a15d | https://github.com/ionelmc/python-manhole/blob/6a519a1f25142b047e814c6d00f4ef404856a15d/src/manhole/__init__.py#L167-L176 | train |
ionelmc/python-manhole | src/manhole/__init__.py | Manhole.reinstall | def reinstall(self):
"""
Reinstalls the manhole. Checks if the thread is running. If not, it starts it again.
"""
with _LOCK:
if not (self.thread.is_alive() and self.thread in _ORIGINAL__ACTIVE):
self.thread = self.thread.clone(bind_delay=self.reinstall_delay)
if self.should_restart:
self.thread.start() | python | def reinstall(self):
"""
Reinstalls the manhole. Checks if the thread is running. If not, it starts it again.
"""
with _LOCK:
if not (self.thread.is_alive() and self.thread in _ORIGINAL__ACTIVE):
self.thread = self.thread.clone(bind_delay=self.reinstall_delay)
if self.should_restart:
self.thread.start() | [
"def",
"reinstall",
"(",
"self",
")",
":",
"with",
"_LOCK",
":",
"if",
"not",
"(",
"self",
".",
"thread",
".",
"is_alive",
"(",
")",
"and",
"self",
".",
"thread",
"in",
"_ORIGINAL__ACTIVE",
")",
":",
"self",
".",
"thread",
"=",
"self",
".",
"thread",
".",
"clone",
"(",
"bind_delay",
"=",
"self",
".",
"reinstall_delay",
")",
"if",
"self",
".",
"should_restart",
":",
"self",
".",
"thread",
".",
"start",
"(",
")"
]
| Reinstalls the manhole. Checks if the thread is running. If not, it starts it again. | [
"Reinstalls",
"the",
"manhole",
".",
"Checks",
"if",
"the",
"thread",
"is",
"running",
".",
"If",
"not",
"it",
"starts",
"it",
"again",
"."
]
| 6a519a1f25142b047e814c6d00f4ef404856a15d | https://github.com/ionelmc/python-manhole/blob/6a519a1f25142b047e814c6d00f4ef404856a15d/src/manhole/__init__.py#L502-L510 | train |
ionelmc/python-manhole | src/manhole/__init__.py | Manhole.patched_forkpty | def patched_forkpty(self):
"""Fork a new process with a new pseudo-terminal as controlling tty."""
pid, master_fd = self.original_os_forkpty()
if not pid:
_LOG('Fork detected. Reinstalling Manhole.')
self.reinstall()
return pid, master_fd | python | def patched_forkpty(self):
"""Fork a new process with a new pseudo-terminal as controlling tty."""
pid, master_fd = self.original_os_forkpty()
if not pid:
_LOG('Fork detected. Reinstalling Manhole.')
self.reinstall()
return pid, master_fd | [
"def",
"patched_forkpty",
"(",
"self",
")",
":",
"pid",
",",
"master_fd",
"=",
"self",
".",
"original_os_forkpty",
"(",
")",
"if",
"not",
"pid",
":",
"_LOG",
"(",
"'Fork detected. Reinstalling Manhole.'",
")",
"self",
".",
"reinstall",
"(",
")",
"return",
"pid",
",",
"master_fd"
]
| Fork a new process with a new pseudo-terminal as controlling tty. | [
"Fork",
"a",
"new",
"process",
"with",
"a",
"new",
"pseudo",
"-",
"terminal",
"as",
"controlling",
"tty",
"."
]
| 6a519a1f25142b047e814c6d00f4ef404856a15d | https://github.com/ionelmc/python-manhole/blob/6a519a1f25142b047e814c6d00f4ef404856a15d/src/manhole/__init__.py#L546-L552 | train |
ambitioninc/newrelic-api | newrelic_api/alert_conditions_nrql.py | AlertConditionsNRQL.update | def update( # noqa: C901
self, alert_condition_nrql_id, policy_id, name=None, threshold_type=None, query=None,
since_value=None, terms=None, expected_groups=None, value_function=None,
runbook_url=None, ignore_overlap=None, enabled=True):
"""
Updates any of the optional parameters of the alert condition nrql
:type alert_condition_nrql_id: int
:param alert_condition_nrql_id: Alerts condition NRQL id to update
:type policy_id: int
:param policy_id: Alert policy id where target alert condition belongs to
:type condition_scope: str
:param condition_scope: The scope of the condition, can be instance or application
:type name: str
:param name: The name of the alert
:type threshold_type: str
:param threshold_type: The tthreshold_typeype of the condition, can be static or outlier
:type query: str
:param query: nrql query for the alerts
:type since_value: str
:param since_value: since value for the alert
:type terms: list[hash]
:param terms: list of hashes containing threshold config for the alert
:type expected_groups: int
:param expected_groups: expected groups setting for outlier alerts
:type value_function: str
:param type: value function for static alerts
:type runbook_url: str
:param runbook_url: The url of the runbook
:type ignore_overlap: bool
:param ignore_overlap: Whether to ignore overlaps for outlier alerts
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
:raises: This will raise a
:class:`NewRelicAPIServerException<newrelic_api.exceptions.NoEntityException>`
if target alert condition is not included in target policy
:raises: This will raise a
:class:`ConfigurationException<newrelic_api.exceptions.ConfigurationException>`
if metric is set as user_defined but user_defined config is not passed
::
{
"nrql_condition": {
"name": "string",
"runbook_url": "string",
"enabled": "boolean",
"expected_groups": "integer",
"ignore_overlap": "boolean",
"value_function": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"nrql": {
"query": "string",
"since_value": "string"
}
}
}
"""
conditions_nrql_dict = self.list(policy_id)
target_condition_nrql = None
for condition in conditions_nrql_dict['nrql_conditions']:
if int(condition['id']) == alert_condition_nrql_id:
target_condition_nrql = condition
break
if target_condition_nrql is None:
raise NoEntityException(
'Target alert condition nrql is not included in that policy.'
'policy_id: {}, alert_condition_nrql_id {}'.format(
policy_id,
alert_condition_nrql_id
)
)
data = {
'nrql_condition': {
'type': threshold_type or target_condition_nrql['type'],
'enabled': target_condition_nrql['enabled'],
'name': name or target_condition_nrql['name'],
'terms': terms or target_condition_nrql['terms'],
'nrql': {
'query': query or target_condition_nrql['nrql']['query'],
'since_value': since_value or target_condition_nrql['nrql']['since_value'],
}
}
}
if enabled is not None:
data['nrql_condition']['enabled'] = str(enabled).lower()
if runbook_url is not None:
data['nrql_condition']['runbook_url'] = runbook_url
elif 'runbook_url' in target_condition_nrql:
data['nrql_condition']['runbook_url'] = target_condition_nrql['runbook_url']
if expected_groups is not None:
data['nrql_condition']['expected_groups'] = expected_groups
elif 'expected_groups' in target_condition_nrql:
data['nrql_condition']['expected_groups'] = target_condition_nrql['expected_groups']
if ignore_overlap is not None:
data['nrql_condition']['ignore_overlap'] = ignore_overlap
elif 'ignore_overlap' in target_condition_nrql:
data['nrql_condition']['ignore_overlap'] = target_condition_nrql['ignore_overlap']
if value_function is not None:
data['nrql_condition']['value_function'] = value_function
elif 'value_function' in target_condition_nrql:
data['nrql_condition']['value_function'] = target_condition_nrql['value_function']
if data['nrql_condition']['type'] == 'static':
if 'value_function' not in data['nrql_condition']:
raise ConfigurationException(
'Alert is set as static but no value_function config specified'
)
data['nrql_condition'].pop('expected_groups', None)
data['nrql_condition'].pop('ignore_overlap', None)
elif data['nrql_condition']['type'] == 'outlier':
if 'expected_groups' not in data['nrql_condition']:
raise ConfigurationException(
'Alert is set as outlier but expected_groups config is not specified'
)
if 'ignore_overlap' not in data['nrql_condition']:
raise ConfigurationException(
'Alert is set as outlier but ignore_overlap config is not specified'
)
data['nrql_condition'].pop('value_function', None)
return self._put(
url='{0}alerts_nrql_conditions/{1}.json'.format(self.URL, alert_condition_nrql_id),
headers=self.headers,
data=data
) | python | def update( # noqa: C901
self, alert_condition_nrql_id, policy_id, name=None, threshold_type=None, query=None,
since_value=None, terms=None, expected_groups=None, value_function=None,
runbook_url=None, ignore_overlap=None, enabled=True):
"""
Updates any of the optional parameters of the alert condition nrql
:type alert_condition_nrql_id: int
:param alert_condition_nrql_id: Alerts condition NRQL id to update
:type policy_id: int
:param policy_id: Alert policy id where target alert condition belongs to
:type condition_scope: str
:param condition_scope: The scope of the condition, can be instance or application
:type name: str
:param name: The name of the alert
:type threshold_type: str
:param threshold_type: The tthreshold_typeype of the condition, can be static or outlier
:type query: str
:param query: nrql query for the alerts
:type since_value: str
:param since_value: since value for the alert
:type terms: list[hash]
:param terms: list of hashes containing threshold config for the alert
:type expected_groups: int
:param expected_groups: expected groups setting for outlier alerts
:type value_function: str
:param type: value function for static alerts
:type runbook_url: str
:param runbook_url: The url of the runbook
:type ignore_overlap: bool
:param ignore_overlap: Whether to ignore overlaps for outlier alerts
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
:raises: This will raise a
:class:`NewRelicAPIServerException<newrelic_api.exceptions.NoEntityException>`
if target alert condition is not included in target policy
:raises: This will raise a
:class:`ConfigurationException<newrelic_api.exceptions.ConfigurationException>`
if metric is set as user_defined but user_defined config is not passed
::
{
"nrql_condition": {
"name": "string",
"runbook_url": "string",
"enabled": "boolean",
"expected_groups": "integer",
"ignore_overlap": "boolean",
"value_function": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"nrql": {
"query": "string",
"since_value": "string"
}
}
}
"""
conditions_nrql_dict = self.list(policy_id)
target_condition_nrql = None
for condition in conditions_nrql_dict['nrql_conditions']:
if int(condition['id']) == alert_condition_nrql_id:
target_condition_nrql = condition
break
if target_condition_nrql is None:
raise NoEntityException(
'Target alert condition nrql is not included in that policy.'
'policy_id: {}, alert_condition_nrql_id {}'.format(
policy_id,
alert_condition_nrql_id
)
)
data = {
'nrql_condition': {
'type': threshold_type or target_condition_nrql['type'],
'enabled': target_condition_nrql['enabled'],
'name': name or target_condition_nrql['name'],
'terms': terms or target_condition_nrql['terms'],
'nrql': {
'query': query or target_condition_nrql['nrql']['query'],
'since_value': since_value or target_condition_nrql['nrql']['since_value'],
}
}
}
if enabled is not None:
data['nrql_condition']['enabled'] = str(enabled).lower()
if runbook_url is not None:
data['nrql_condition']['runbook_url'] = runbook_url
elif 'runbook_url' in target_condition_nrql:
data['nrql_condition']['runbook_url'] = target_condition_nrql['runbook_url']
if expected_groups is not None:
data['nrql_condition']['expected_groups'] = expected_groups
elif 'expected_groups' in target_condition_nrql:
data['nrql_condition']['expected_groups'] = target_condition_nrql['expected_groups']
if ignore_overlap is not None:
data['nrql_condition']['ignore_overlap'] = ignore_overlap
elif 'ignore_overlap' in target_condition_nrql:
data['nrql_condition']['ignore_overlap'] = target_condition_nrql['ignore_overlap']
if value_function is not None:
data['nrql_condition']['value_function'] = value_function
elif 'value_function' in target_condition_nrql:
data['nrql_condition']['value_function'] = target_condition_nrql['value_function']
if data['nrql_condition']['type'] == 'static':
if 'value_function' not in data['nrql_condition']:
raise ConfigurationException(
'Alert is set as static but no value_function config specified'
)
data['nrql_condition'].pop('expected_groups', None)
data['nrql_condition'].pop('ignore_overlap', None)
elif data['nrql_condition']['type'] == 'outlier':
if 'expected_groups' not in data['nrql_condition']:
raise ConfigurationException(
'Alert is set as outlier but expected_groups config is not specified'
)
if 'ignore_overlap' not in data['nrql_condition']:
raise ConfigurationException(
'Alert is set as outlier but ignore_overlap config is not specified'
)
data['nrql_condition'].pop('value_function', None)
return self._put(
url='{0}alerts_nrql_conditions/{1}.json'.format(self.URL, alert_condition_nrql_id),
headers=self.headers,
data=data
) | [
"def",
"update",
"(",
"# noqa: C901",
"self",
",",
"alert_condition_nrql_id",
",",
"policy_id",
",",
"name",
"=",
"None",
",",
"threshold_type",
"=",
"None",
",",
"query",
"=",
"None",
",",
"since_value",
"=",
"None",
",",
"terms",
"=",
"None",
",",
"expected_groups",
"=",
"None",
",",
"value_function",
"=",
"None",
",",
"runbook_url",
"=",
"None",
",",
"ignore_overlap",
"=",
"None",
",",
"enabled",
"=",
"True",
")",
":",
"conditions_nrql_dict",
"=",
"self",
".",
"list",
"(",
"policy_id",
")",
"target_condition_nrql",
"=",
"None",
"for",
"condition",
"in",
"conditions_nrql_dict",
"[",
"'nrql_conditions'",
"]",
":",
"if",
"int",
"(",
"condition",
"[",
"'id'",
"]",
")",
"==",
"alert_condition_nrql_id",
":",
"target_condition_nrql",
"=",
"condition",
"break",
"if",
"target_condition_nrql",
"is",
"None",
":",
"raise",
"NoEntityException",
"(",
"'Target alert condition nrql is not included in that policy.'",
"'policy_id: {}, alert_condition_nrql_id {}'",
".",
"format",
"(",
"policy_id",
",",
"alert_condition_nrql_id",
")",
")",
"data",
"=",
"{",
"'nrql_condition'",
":",
"{",
"'type'",
":",
"threshold_type",
"or",
"target_condition_nrql",
"[",
"'type'",
"]",
",",
"'enabled'",
":",
"target_condition_nrql",
"[",
"'enabled'",
"]",
",",
"'name'",
":",
"name",
"or",
"target_condition_nrql",
"[",
"'name'",
"]",
",",
"'terms'",
":",
"terms",
"or",
"target_condition_nrql",
"[",
"'terms'",
"]",
",",
"'nrql'",
":",
"{",
"'query'",
":",
"query",
"or",
"target_condition_nrql",
"[",
"'nrql'",
"]",
"[",
"'query'",
"]",
",",
"'since_value'",
":",
"since_value",
"or",
"target_condition_nrql",
"[",
"'nrql'",
"]",
"[",
"'since_value'",
"]",
",",
"}",
"}",
"}",
"if",
"enabled",
"is",
"not",
"None",
":",
"data",
"[",
"'nrql_condition'",
"]",
"[",
"'enabled'",
"]",
"=",
"str",
"(",
"enabled",
")",
".",
"lower",
"(",
")",
"if",
"runbook_url",
"is",
"not",
"None",
":",
"data",
"[",
"'nrql_condition'",
"]",
"[",
"'runbook_url'",
"]",
"=",
"runbook_url",
"elif",
"'runbook_url'",
"in",
"target_condition_nrql",
":",
"data",
"[",
"'nrql_condition'",
"]",
"[",
"'runbook_url'",
"]",
"=",
"target_condition_nrql",
"[",
"'runbook_url'",
"]",
"if",
"expected_groups",
"is",
"not",
"None",
":",
"data",
"[",
"'nrql_condition'",
"]",
"[",
"'expected_groups'",
"]",
"=",
"expected_groups",
"elif",
"'expected_groups'",
"in",
"target_condition_nrql",
":",
"data",
"[",
"'nrql_condition'",
"]",
"[",
"'expected_groups'",
"]",
"=",
"target_condition_nrql",
"[",
"'expected_groups'",
"]",
"if",
"ignore_overlap",
"is",
"not",
"None",
":",
"data",
"[",
"'nrql_condition'",
"]",
"[",
"'ignore_overlap'",
"]",
"=",
"ignore_overlap",
"elif",
"'ignore_overlap'",
"in",
"target_condition_nrql",
":",
"data",
"[",
"'nrql_condition'",
"]",
"[",
"'ignore_overlap'",
"]",
"=",
"target_condition_nrql",
"[",
"'ignore_overlap'",
"]",
"if",
"value_function",
"is",
"not",
"None",
":",
"data",
"[",
"'nrql_condition'",
"]",
"[",
"'value_function'",
"]",
"=",
"value_function",
"elif",
"'value_function'",
"in",
"target_condition_nrql",
":",
"data",
"[",
"'nrql_condition'",
"]",
"[",
"'value_function'",
"]",
"=",
"target_condition_nrql",
"[",
"'value_function'",
"]",
"if",
"data",
"[",
"'nrql_condition'",
"]",
"[",
"'type'",
"]",
"==",
"'static'",
":",
"if",
"'value_function'",
"not",
"in",
"data",
"[",
"'nrql_condition'",
"]",
":",
"raise",
"ConfigurationException",
"(",
"'Alert is set as static but no value_function config specified'",
")",
"data",
"[",
"'nrql_condition'",
"]",
".",
"pop",
"(",
"'expected_groups'",
",",
"None",
")",
"data",
"[",
"'nrql_condition'",
"]",
".",
"pop",
"(",
"'ignore_overlap'",
",",
"None",
")",
"elif",
"data",
"[",
"'nrql_condition'",
"]",
"[",
"'type'",
"]",
"==",
"'outlier'",
":",
"if",
"'expected_groups'",
"not",
"in",
"data",
"[",
"'nrql_condition'",
"]",
":",
"raise",
"ConfigurationException",
"(",
"'Alert is set as outlier but expected_groups config is not specified'",
")",
"if",
"'ignore_overlap'",
"not",
"in",
"data",
"[",
"'nrql_condition'",
"]",
":",
"raise",
"ConfigurationException",
"(",
"'Alert is set as outlier but ignore_overlap config is not specified'",
")",
"data",
"[",
"'nrql_condition'",
"]",
".",
"pop",
"(",
"'value_function'",
",",
"None",
")",
"return",
"self",
".",
"_put",
"(",
"url",
"=",
"'{0}alerts_nrql_conditions/{1}.json'",
".",
"format",
"(",
"self",
".",
"URL",
",",
"alert_condition_nrql_id",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"data",
"=",
"data",
")"
]
| Updates any of the optional parameters of the alert condition nrql
:type alert_condition_nrql_id: int
:param alert_condition_nrql_id: Alerts condition NRQL id to update
:type policy_id: int
:param policy_id: Alert policy id where target alert condition belongs to
:type condition_scope: str
:param condition_scope: The scope of the condition, can be instance or application
:type name: str
:param name: The name of the alert
:type threshold_type: str
:param threshold_type: The tthreshold_typeype of the condition, can be static or outlier
:type query: str
:param query: nrql query for the alerts
:type since_value: str
:param since_value: since value for the alert
:type terms: list[hash]
:param terms: list of hashes containing threshold config for the alert
:type expected_groups: int
:param expected_groups: expected groups setting for outlier alerts
:type value_function: str
:param type: value function for static alerts
:type runbook_url: str
:param runbook_url: The url of the runbook
:type ignore_overlap: bool
:param ignore_overlap: Whether to ignore overlaps for outlier alerts
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
:raises: This will raise a
:class:`NewRelicAPIServerException<newrelic_api.exceptions.NoEntityException>`
if target alert condition is not included in target policy
:raises: This will raise a
:class:`ConfigurationException<newrelic_api.exceptions.ConfigurationException>`
if metric is set as user_defined but user_defined config is not passed
::
{
"nrql_condition": {
"name": "string",
"runbook_url": "string",
"enabled": "boolean",
"expected_groups": "integer",
"ignore_overlap": "boolean",
"value_function": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"nrql": {
"query": "string",
"since_value": "string"
}
}
} | [
"Updates",
"any",
"of",
"the",
"optional",
"parameters",
"of",
"the",
"alert",
"condition",
"nrql"
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions_nrql.py#L67-L224 | train |
ambitioninc/newrelic-api | newrelic_api/alert_conditions_nrql.py | AlertConditionsNRQL.create | def create(
self, policy_id, name, threshold_type, query, since_value, terms,
expected_groups=None, value_function=None, runbook_url=None,
ignore_overlap=None, enabled=True):
"""
Creates an alert condition nrql
:type policy_id: int
:param policy_id: Alert policy id where target alert condition nrql belongs to
:type name: str
:param name: The name of the alert
:type threshold_type: str
:param type: The threshold_type of the condition, can be static or outlier
:type query: str
:param query: nrql query for the alerts
:type since_value: str
:param since_value: since value for the alert
:type terms: list[hash]
:param terms: list of hashes containing threshold config for the alert
:type expected_groups: int
:param expected_groups: expected groups setting for outlier alerts
:type value_function: str
:param type: value function for static alerts
:type runbook_url: str
:param runbook_url: The url of the runbook
:type ignore_overlap: bool
:param ignore_overlap: Whether to ignore overlaps for outlier alerts
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
:raises: This will raise a
:class:`NewRelicAPIServerException<newrelic_api.exceptions.NoEntityException>`
if target alert condition is not included in target policy
:raises: This will raise a
:class:`ConfigurationException<newrelic_api.exceptions.ConfigurationException>`
if metric is set as user_defined but user_defined config is not passed
::
{
"nrql_condition": {
"name": "string",
"runbook_url": "string",
"enabled": "boolean",
"expected_groups": "integer",
"ignore_overlap": "boolean",
"value_function": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"nrql": {
"query": "string",
"since_value": "string"
}
}
}
"""
data = {
'nrql_condition': {
'type': threshold_type,
'name': name,
'enabled': enabled,
'terms': terms,
'nrql': {
'query': query,
'since_value': since_value
}
}
}
if runbook_url is not None:
data['nrql_condition']['runbook_url'] = runbook_url
if expected_groups is not None:
data['nrql_condition']['expected_groups'] = expected_groups
if ignore_overlap is not None:
data['nrql_condition']['ignore_overlap'] = ignore_overlap
if value_function is not None:
data['nrql_condition']['value_function'] = value_function
if data['nrql_condition']['type'] == 'static':
if 'value_function' not in data['nrql_condition']:
raise ConfigurationException(
'Alert is set as static but no value_function config specified'
)
data['nrql_condition'].pop('expected_groups', None)
data['nrql_condition'].pop('ignore_overlap', None)
elif data['nrql_condition']['type'] == 'outlier':
if 'expected_groups' not in data['nrql_condition']:
raise ConfigurationException(
'Alert is set as outlier but expected_groups config is not specified'
)
if 'ignore_overlap' not in data['nrql_condition']:
raise ConfigurationException(
'Alert is set as outlier but ignore_overlap config is not specified'
)
data['nrql_condition'].pop('value_function', None)
return self._post(
url='{0}alerts_nrql_conditions/policies/{1}.json'.format(self.URL, policy_id),
headers=self.headers,
data=data
) | python | def create(
self, policy_id, name, threshold_type, query, since_value, terms,
expected_groups=None, value_function=None, runbook_url=None,
ignore_overlap=None, enabled=True):
"""
Creates an alert condition nrql
:type policy_id: int
:param policy_id: Alert policy id where target alert condition nrql belongs to
:type name: str
:param name: The name of the alert
:type threshold_type: str
:param type: The threshold_type of the condition, can be static or outlier
:type query: str
:param query: nrql query for the alerts
:type since_value: str
:param since_value: since value for the alert
:type terms: list[hash]
:param terms: list of hashes containing threshold config for the alert
:type expected_groups: int
:param expected_groups: expected groups setting for outlier alerts
:type value_function: str
:param type: value function for static alerts
:type runbook_url: str
:param runbook_url: The url of the runbook
:type ignore_overlap: bool
:param ignore_overlap: Whether to ignore overlaps for outlier alerts
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
:raises: This will raise a
:class:`NewRelicAPIServerException<newrelic_api.exceptions.NoEntityException>`
if target alert condition is not included in target policy
:raises: This will raise a
:class:`ConfigurationException<newrelic_api.exceptions.ConfigurationException>`
if metric is set as user_defined but user_defined config is not passed
::
{
"nrql_condition": {
"name": "string",
"runbook_url": "string",
"enabled": "boolean",
"expected_groups": "integer",
"ignore_overlap": "boolean",
"value_function": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"nrql": {
"query": "string",
"since_value": "string"
}
}
}
"""
data = {
'nrql_condition': {
'type': threshold_type,
'name': name,
'enabled': enabled,
'terms': terms,
'nrql': {
'query': query,
'since_value': since_value
}
}
}
if runbook_url is not None:
data['nrql_condition']['runbook_url'] = runbook_url
if expected_groups is not None:
data['nrql_condition']['expected_groups'] = expected_groups
if ignore_overlap is not None:
data['nrql_condition']['ignore_overlap'] = ignore_overlap
if value_function is not None:
data['nrql_condition']['value_function'] = value_function
if data['nrql_condition']['type'] == 'static':
if 'value_function' not in data['nrql_condition']:
raise ConfigurationException(
'Alert is set as static but no value_function config specified'
)
data['nrql_condition'].pop('expected_groups', None)
data['nrql_condition'].pop('ignore_overlap', None)
elif data['nrql_condition']['type'] == 'outlier':
if 'expected_groups' not in data['nrql_condition']:
raise ConfigurationException(
'Alert is set as outlier but expected_groups config is not specified'
)
if 'ignore_overlap' not in data['nrql_condition']:
raise ConfigurationException(
'Alert is set as outlier but ignore_overlap config is not specified'
)
data['nrql_condition'].pop('value_function', None)
return self._post(
url='{0}alerts_nrql_conditions/policies/{1}.json'.format(self.URL, policy_id),
headers=self.headers,
data=data
) | [
"def",
"create",
"(",
"self",
",",
"policy_id",
",",
"name",
",",
"threshold_type",
",",
"query",
",",
"since_value",
",",
"terms",
",",
"expected_groups",
"=",
"None",
",",
"value_function",
"=",
"None",
",",
"runbook_url",
"=",
"None",
",",
"ignore_overlap",
"=",
"None",
",",
"enabled",
"=",
"True",
")",
":",
"data",
"=",
"{",
"'nrql_condition'",
":",
"{",
"'type'",
":",
"threshold_type",
",",
"'name'",
":",
"name",
",",
"'enabled'",
":",
"enabled",
",",
"'terms'",
":",
"terms",
",",
"'nrql'",
":",
"{",
"'query'",
":",
"query",
",",
"'since_value'",
":",
"since_value",
"}",
"}",
"}",
"if",
"runbook_url",
"is",
"not",
"None",
":",
"data",
"[",
"'nrql_condition'",
"]",
"[",
"'runbook_url'",
"]",
"=",
"runbook_url",
"if",
"expected_groups",
"is",
"not",
"None",
":",
"data",
"[",
"'nrql_condition'",
"]",
"[",
"'expected_groups'",
"]",
"=",
"expected_groups",
"if",
"ignore_overlap",
"is",
"not",
"None",
":",
"data",
"[",
"'nrql_condition'",
"]",
"[",
"'ignore_overlap'",
"]",
"=",
"ignore_overlap",
"if",
"value_function",
"is",
"not",
"None",
":",
"data",
"[",
"'nrql_condition'",
"]",
"[",
"'value_function'",
"]",
"=",
"value_function",
"if",
"data",
"[",
"'nrql_condition'",
"]",
"[",
"'type'",
"]",
"==",
"'static'",
":",
"if",
"'value_function'",
"not",
"in",
"data",
"[",
"'nrql_condition'",
"]",
":",
"raise",
"ConfigurationException",
"(",
"'Alert is set as static but no value_function config specified'",
")",
"data",
"[",
"'nrql_condition'",
"]",
".",
"pop",
"(",
"'expected_groups'",
",",
"None",
")",
"data",
"[",
"'nrql_condition'",
"]",
".",
"pop",
"(",
"'ignore_overlap'",
",",
"None",
")",
"elif",
"data",
"[",
"'nrql_condition'",
"]",
"[",
"'type'",
"]",
"==",
"'outlier'",
":",
"if",
"'expected_groups'",
"not",
"in",
"data",
"[",
"'nrql_condition'",
"]",
":",
"raise",
"ConfigurationException",
"(",
"'Alert is set as outlier but expected_groups config is not specified'",
")",
"if",
"'ignore_overlap'",
"not",
"in",
"data",
"[",
"'nrql_condition'",
"]",
":",
"raise",
"ConfigurationException",
"(",
"'Alert is set as outlier but ignore_overlap config is not specified'",
")",
"data",
"[",
"'nrql_condition'",
"]",
".",
"pop",
"(",
"'value_function'",
",",
"None",
")",
"return",
"self",
".",
"_post",
"(",
"url",
"=",
"'{0}alerts_nrql_conditions/policies/{1}.json'",
".",
"format",
"(",
"self",
".",
"URL",
",",
"policy_id",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"data",
"=",
"data",
")"
]
| Creates an alert condition nrql
:type policy_id: int
:param policy_id: Alert policy id where target alert condition nrql belongs to
:type name: str
:param name: The name of the alert
:type threshold_type: str
:param type: The threshold_type of the condition, can be static or outlier
:type query: str
:param query: nrql query for the alerts
:type since_value: str
:param since_value: since value for the alert
:type terms: list[hash]
:param terms: list of hashes containing threshold config for the alert
:type expected_groups: int
:param expected_groups: expected groups setting for outlier alerts
:type value_function: str
:param type: value function for static alerts
:type runbook_url: str
:param runbook_url: The url of the runbook
:type ignore_overlap: bool
:param ignore_overlap: Whether to ignore overlaps for outlier alerts
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
:raises: This will raise a
:class:`NewRelicAPIServerException<newrelic_api.exceptions.NoEntityException>`
if target alert condition is not included in target policy
:raises: This will raise a
:class:`ConfigurationException<newrelic_api.exceptions.ConfigurationException>`
if metric is set as user_defined but user_defined config is not passed
::
{
"nrql_condition": {
"name": "string",
"runbook_url": "string",
"enabled": "boolean",
"expected_groups": "integer",
"ignore_overlap": "boolean",
"value_function": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"nrql": {
"query": "string",
"since_value": "string"
}
}
} | [
"Creates",
"an",
"alert",
"condition",
"nrql"
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions_nrql.py#L226-L350 | train |
ambitioninc/newrelic-api | newrelic_api/alert_conditions_nrql.py | AlertConditionsNRQL.delete | def delete(self, alert_condition_nrql_id):
"""
This API endpoint allows you to delete an alert condition nrql
:type alert_condition_nrql_id: integer
:param alert_condition_nrql_id: Alert Condition ID
:rtype: dict
:return: The JSON response of the API
::
{
"nrql_condition": {
"type": "string",
"id": "integer",
"name": "string",
"runbook_url": "string",
"enabled": "boolean",
"expected_groups": "integer",
"ignore_overlap": "boolean",
"value_function": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"nrql": {
"query": "string",
"since_value": "string"
}
}
}
"""
return self._delete(
url='{0}alerts_nrql_conditions/{1}.json'.format(self.URL, alert_condition_nrql_id),
headers=self.headers
) | python | def delete(self, alert_condition_nrql_id):
"""
This API endpoint allows you to delete an alert condition nrql
:type alert_condition_nrql_id: integer
:param alert_condition_nrql_id: Alert Condition ID
:rtype: dict
:return: The JSON response of the API
::
{
"nrql_condition": {
"type": "string",
"id": "integer",
"name": "string",
"runbook_url": "string",
"enabled": "boolean",
"expected_groups": "integer",
"ignore_overlap": "boolean",
"value_function": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"nrql": {
"query": "string",
"since_value": "string"
}
}
}
"""
return self._delete(
url='{0}alerts_nrql_conditions/{1}.json'.format(self.URL, alert_condition_nrql_id),
headers=self.headers
) | [
"def",
"delete",
"(",
"self",
",",
"alert_condition_nrql_id",
")",
":",
"return",
"self",
".",
"_delete",
"(",
"url",
"=",
"'{0}alerts_nrql_conditions/{1}.json'",
".",
"format",
"(",
"self",
".",
"URL",
",",
"alert_condition_nrql_id",
")",
",",
"headers",
"=",
"self",
".",
"headers",
")"
]
| This API endpoint allows you to delete an alert condition nrql
:type alert_condition_nrql_id: integer
:param alert_condition_nrql_id: Alert Condition ID
:rtype: dict
:return: The JSON response of the API
::
{
"nrql_condition": {
"type": "string",
"id": "integer",
"name": "string",
"runbook_url": "string",
"enabled": "boolean",
"expected_groups": "integer",
"ignore_overlap": "boolean",
"value_function": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"nrql": {
"query": "string",
"since_value": "string"
}
}
} | [
"This",
"API",
"endpoint",
"allows",
"you",
"to",
"delete",
"an",
"alert",
"condition",
"nrql"
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions_nrql.py#L352-L394 | train |
ambitioninc/newrelic-api | newrelic_api/servers.py | Servers.list | def list(self, filter_name=None, filter_ids=None, filter_labels=None, page=None):
"""
This API endpoint returns a paginated list of the Servers
associated with your New Relic account. Servers can be filtered
by their name or by a list of server IDs.
:type filter_name: str
:param filter_name: Filter by server name
:type filter_ids: list of ints
:param filter_ids: Filter by server ids
:type filter_labels: dict of label type: value pairs
:param filter_labels: Filter by server labels
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"servers": [
{
"id": "integer",
"account_id": "integer",
"name": "string",
"host": "string",
"reporting": "boolean",
"last_reported_at": "time",
"summary": {
"cpu": "float",
"cpu_stolen": "float",
"disk_io": "float",
"memory": "float",
"memory_used": "integer",
"memory_total": "integer",
"fullest_disk": "float",
"fullest_disk_free": "integer"
}
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/servers.json?page=2",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/servers.json?page=2",
"rel": "next"
}
}
}
"""
label_param = ''
if filter_labels:
label_param = ';'.join(['{}:{}'.format(label, value) for label, value in filter_labels.items()])
filters = [
'filter[name]={0}'.format(filter_name) if filter_name else None,
'filter[ids]={0}'.format(','.join([str(app_id) for app_id in filter_ids])) if filter_ids else None,
'filter[labels]={0}'.format(label_param) if filter_labels else None,
'page={0}'.format(page) if page else None
]
return self._get(
url='{0}servers.json'.format(self.URL),
headers=self.headers,
params=self.build_param_string(filters)
) | python | def list(self, filter_name=None, filter_ids=None, filter_labels=None, page=None):
"""
This API endpoint returns a paginated list of the Servers
associated with your New Relic account. Servers can be filtered
by their name or by a list of server IDs.
:type filter_name: str
:param filter_name: Filter by server name
:type filter_ids: list of ints
:param filter_ids: Filter by server ids
:type filter_labels: dict of label type: value pairs
:param filter_labels: Filter by server labels
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"servers": [
{
"id": "integer",
"account_id": "integer",
"name": "string",
"host": "string",
"reporting": "boolean",
"last_reported_at": "time",
"summary": {
"cpu": "float",
"cpu_stolen": "float",
"disk_io": "float",
"memory": "float",
"memory_used": "integer",
"memory_total": "integer",
"fullest_disk": "float",
"fullest_disk_free": "integer"
}
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/servers.json?page=2",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/servers.json?page=2",
"rel": "next"
}
}
}
"""
label_param = ''
if filter_labels:
label_param = ';'.join(['{}:{}'.format(label, value) for label, value in filter_labels.items()])
filters = [
'filter[name]={0}'.format(filter_name) if filter_name else None,
'filter[ids]={0}'.format(','.join([str(app_id) for app_id in filter_ids])) if filter_ids else None,
'filter[labels]={0}'.format(label_param) if filter_labels else None,
'page={0}'.format(page) if page else None
]
return self._get(
url='{0}servers.json'.format(self.URL),
headers=self.headers,
params=self.build_param_string(filters)
) | [
"def",
"list",
"(",
"self",
",",
"filter_name",
"=",
"None",
",",
"filter_ids",
"=",
"None",
",",
"filter_labels",
"=",
"None",
",",
"page",
"=",
"None",
")",
":",
"label_param",
"=",
"''",
"if",
"filter_labels",
":",
"label_param",
"=",
"';'",
".",
"join",
"(",
"[",
"'{}:{}'",
".",
"format",
"(",
"label",
",",
"value",
")",
"for",
"label",
",",
"value",
"in",
"filter_labels",
".",
"items",
"(",
")",
"]",
")",
"filters",
"=",
"[",
"'filter[name]={0}'",
".",
"format",
"(",
"filter_name",
")",
"if",
"filter_name",
"else",
"None",
",",
"'filter[ids]={0}'",
".",
"format",
"(",
"','",
".",
"join",
"(",
"[",
"str",
"(",
"app_id",
")",
"for",
"app_id",
"in",
"filter_ids",
"]",
")",
")",
"if",
"filter_ids",
"else",
"None",
",",
"'filter[labels]={0}'",
".",
"format",
"(",
"label_param",
")",
"if",
"filter_labels",
"else",
"None",
",",
"'page={0}'",
".",
"format",
"(",
"page",
")",
"if",
"page",
"else",
"None",
"]",
"return",
"self",
".",
"_get",
"(",
"url",
"=",
"'{0}servers.json'",
".",
"format",
"(",
"self",
".",
"URL",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"params",
"=",
"self",
".",
"build_param_string",
"(",
"filters",
")",
")"
]
| This API endpoint returns a paginated list of the Servers
associated with your New Relic account. Servers can be filtered
by their name or by a list of server IDs.
:type filter_name: str
:param filter_name: Filter by server name
:type filter_ids: list of ints
:param filter_ids: Filter by server ids
:type filter_labels: dict of label type: value pairs
:param filter_labels: Filter by server labels
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"servers": [
{
"id": "integer",
"account_id": "integer",
"name": "string",
"host": "string",
"reporting": "boolean",
"last_reported_at": "time",
"summary": {
"cpu": "float",
"cpu_stolen": "float",
"disk_io": "float",
"memory": "float",
"memory_used": "integer",
"memory_total": "integer",
"fullest_disk": "float",
"fullest_disk_free": "integer"
}
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/servers.json?page=2",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/servers.json?page=2",
"rel": "next"
}
}
} | [
"This",
"API",
"endpoint",
"returns",
"a",
"paginated",
"list",
"of",
"the",
"Servers",
"associated",
"with",
"your",
"New",
"Relic",
"account",
".",
"Servers",
"can",
"be",
"filtered",
"by",
"their",
"name",
"or",
"by",
"a",
"list",
"of",
"server",
"IDs",
"."
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/servers.py#L8-L82 | train |
ambitioninc/newrelic-api | newrelic_api/servers.py | Servers.update | def update(self, id, name=None):
"""
Updates any of the optional parameters of the server
:type id: int
:param id: Server ID
:type name: str
:param name: The name of the server
:rtype: dict
:return: The JSON response of the API
::
{
"server": {
"id": "integer",
"account_id": "integer",
"name": "string",
"host": "string",
"reporting": "boolean",
"last_reported_at": "time",
"summary": {
"cpu": "float",
"cpu_stolen": "float",
"disk_io": "float",
"memory": "float",
"memory_used": "integer",
"memory_total": "integer",
"fullest_disk": "float",
"fullest_disk_free": "integer"
}
}
}
"""
nr_data = self.show(id)['server']
data = {
'server': {
'name': name or nr_data['name'],
}
}
return self._put(
url='{0}servers/{1}.json'.format(self.URL, id),
headers=self.headers,
data=data
) | python | def update(self, id, name=None):
"""
Updates any of the optional parameters of the server
:type id: int
:param id: Server ID
:type name: str
:param name: The name of the server
:rtype: dict
:return: The JSON response of the API
::
{
"server": {
"id": "integer",
"account_id": "integer",
"name": "string",
"host": "string",
"reporting": "boolean",
"last_reported_at": "time",
"summary": {
"cpu": "float",
"cpu_stolen": "float",
"disk_io": "float",
"memory": "float",
"memory_used": "integer",
"memory_total": "integer",
"fullest_disk": "float",
"fullest_disk_free": "integer"
}
}
}
"""
nr_data = self.show(id)['server']
data = {
'server': {
'name': name or nr_data['name'],
}
}
return self._put(
url='{0}servers/{1}.json'.format(self.URL, id),
headers=self.headers,
data=data
) | [
"def",
"update",
"(",
"self",
",",
"id",
",",
"name",
"=",
"None",
")",
":",
"nr_data",
"=",
"self",
".",
"show",
"(",
"id",
")",
"[",
"'server'",
"]",
"data",
"=",
"{",
"'server'",
":",
"{",
"'name'",
":",
"name",
"or",
"nr_data",
"[",
"'name'",
"]",
",",
"}",
"}",
"return",
"self",
".",
"_put",
"(",
"url",
"=",
"'{0}servers/{1}.json'",
".",
"format",
"(",
"self",
".",
"URL",
",",
"id",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"data",
"=",
"data",
")"
]
| Updates any of the optional parameters of the server
:type id: int
:param id: Server ID
:type name: str
:param name: The name of the server
:rtype: dict
:return: The JSON response of the API
::
{
"server": {
"id": "integer",
"account_id": "integer",
"name": "string",
"host": "string",
"reporting": "boolean",
"last_reported_at": "time",
"summary": {
"cpu": "float",
"cpu_stolen": "float",
"disk_io": "float",
"memory": "float",
"memory_used": "integer",
"memory_total": "integer",
"fullest_disk": "float",
"fullest_disk_free": "integer"
}
}
} | [
"Updates",
"any",
"of",
"the",
"optional",
"parameters",
"of",
"the",
"server"
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/servers.py#L123-L172 | train |
ambitioninc/newrelic-api | newrelic_api/alert_policies.py | AlertPolicies.create | def create(self, name, incident_preference):
"""
This API endpoint allows you to create an alert policy
:type name: str
:param name: The name of the policy
:type incident_preference: str
:param incident_preference: Can be PER_POLICY, PER_CONDITION or
PER_CONDITION_AND_TARGET
:rtype: dict
:return: The JSON response of the API
::
{
"policy": {
"created_at": "time",
"id": "integer",
"incident_preference": "string",
"name": "string",
"updated_at": "time"
}
}
"""
data = {
"policy": {
"name": name,
"incident_preference": incident_preference
}
}
return self._post(
url='{0}alerts_policies.json'.format(self.URL),
headers=self.headers,
data=data
) | python | def create(self, name, incident_preference):
"""
This API endpoint allows you to create an alert policy
:type name: str
:param name: The name of the policy
:type incident_preference: str
:param incident_preference: Can be PER_POLICY, PER_CONDITION or
PER_CONDITION_AND_TARGET
:rtype: dict
:return: The JSON response of the API
::
{
"policy": {
"created_at": "time",
"id": "integer",
"incident_preference": "string",
"name": "string",
"updated_at": "time"
}
}
"""
data = {
"policy": {
"name": name,
"incident_preference": incident_preference
}
}
return self._post(
url='{0}alerts_policies.json'.format(self.URL),
headers=self.headers,
data=data
) | [
"def",
"create",
"(",
"self",
",",
"name",
",",
"incident_preference",
")",
":",
"data",
"=",
"{",
"\"policy\"",
":",
"{",
"\"name\"",
":",
"name",
",",
"\"incident_preference\"",
":",
"incident_preference",
"}",
"}",
"return",
"self",
".",
"_post",
"(",
"url",
"=",
"'{0}alerts_policies.json'",
".",
"format",
"(",
"self",
".",
"URL",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"data",
"=",
"data",
")"
]
| This API endpoint allows you to create an alert policy
:type name: str
:param name: The name of the policy
:type incident_preference: str
:param incident_preference: Can be PER_POLICY, PER_CONDITION or
PER_CONDITION_AND_TARGET
:rtype: dict
:return: The JSON response of the API
::
{
"policy": {
"created_at": "time",
"id": "integer",
"incident_preference": "string",
"name": "string",
"updated_at": "time"
}
} | [
"This",
"API",
"endpoint",
"allows",
"you",
"to",
"create",
"an",
"alert",
"policy"
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_policies.py#L49-L88 | train |
ambitioninc/newrelic-api | newrelic_api/alert_policies.py | AlertPolicies.update | def update(self, id, name, incident_preference):
"""
This API endpoint allows you to update an alert policy
:type id: integer
:param id: The id of the policy
:type name: str
:param name: The name of the policy
:type incident_preference: str
:param incident_preference: Can be PER_POLICY, PER_CONDITION or
PER_CONDITION_AND_TARGET
:rtype: dict
:return: The JSON response of the API
::
{
"policy": {
"created_at": "time",
"id": "integer",
"incident_preference": "string",
"name": "string",
"updated_at": "time"
}
}
"""
data = {
"policy": {
"name": name,
"incident_preference": incident_preference
}
}
return self._put(
url='{0}alerts_policies/{1}.json'.format(self.URL, id),
headers=self.headers,
data=data
) | python | def update(self, id, name, incident_preference):
"""
This API endpoint allows you to update an alert policy
:type id: integer
:param id: The id of the policy
:type name: str
:param name: The name of the policy
:type incident_preference: str
:param incident_preference: Can be PER_POLICY, PER_CONDITION or
PER_CONDITION_AND_TARGET
:rtype: dict
:return: The JSON response of the API
::
{
"policy": {
"created_at": "time",
"id": "integer",
"incident_preference": "string",
"name": "string",
"updated_at": "time"
}
}
"""
data = {
"policy": {
"name": name,
"incident_preference": incident_preference
}
}
return self._put(
url='{0}alerts_policies/{1}.json'.format(self.URL, id),
headers=self.headers,
data=data
) | [
"def",
"update",
"(",
"self",
",",
"id",
",",
"name",
",",
"incident_preference",
")",
":",
"data",
"=",
"{",
"\"policy\"",
":",
"{",
"\"name\"",
":",
"name",
",",
"\"incident_preference\"",
":",
"incident_preference",
"}",
"}",
"return",
"self",
".",
"_put",
"(",
"url",
"=",
"'{0}alerts_policies/{1}.json'",
".",
"format",
"(",
"self",
".",
"URL",
",",
"id",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"data",
"=",
"data",
")"
]
| This API endpoint allows you to update an alert policy
:type id: integer
:param id: The id of the policy
:type name: str
:param name: The name of the policy
:type incident_preference: str
:param incident_preference: Can be PER_POLICY, PER_CONDITION or
PER_CONDITION_AND_TARGET
:rtype: dict
:return: The JSON response of the API
::
{
"policy": {
"created_at": "time",
"id": "integer",
"incident_preference": "string",
"name": "string",
"updated_at": "time"
}
} | [
"This",
"API",
"endpoint",
"allows",
"you",
"to",
"update",
"an",
"alert",
"policy"
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_policies.py#L90-L132 | train |
ambitioninc/newrelic-api | newrelic_api/alert_policies.py | AlertPolicies.delete | def delete(self, id):
"""
This API endpoint allows you to delete an alert policy
:type id: integer
:param id: The id of the policy
:rtype: dict
:return: The JSON response of the API
::
{
"policy": {
"created_at": "time",
"id": "integer",
"incident_preference": "string",
"name": "string",
"updated_at": "time"
}
}
"""
return self._delete(
url='{0}alerts_policies/{1}.json'.format(self.URL, id),
headers=self.headers
) | python | def delete(self, id):
"""
This API endpoint allows you to delete an alert policy
:type id: integer
:param id: The id of the policy
:rtype: dict
:return: The JSON response of the API
::
{
"policy": {
"created_at": "time",
"id": "integer",
"incident_preference": "string",
"name": "string",
"updated_at": "time"
}
}
"""
return self._delete(
url='{0}alerts_policies/{1}.json'.format(self.URL, id),
headers=self.headers
) | [
"def",
"delete",
"(",
"self",
",",
"id",
")",
":",
"return",
"self",
".",
"_delete",
"(",
"url",
"=",
"'{0}alerts_policies/{1}.json'",
".",
"format",
"(",
"self",
".",
"URL",
",",
"id",
")",
",",
"headers",
"=",
"self",
".",
"headers",
")"
]
| This API endpoint allows you to delete an alert policy
:type id: integer
:param id: The id of the policy
:rtype: dict
:return: The JSON response of the API
::
{
"policy": {
"created_at": "time",
"id": "integer",
"incident_preference": "string",
"name": "string",
"updated_at": "time"
}
} | [
"This",
"API",
"endpoint",
"allows",
"you",
"to",
"delete",
"an",
"alert",
"policy"
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_policies.py#L134-L161 | train |
ambitioninc/newrelic-api | newrelic_api/alert_policies.py | AlertPolicies.associate_with_notification_channel | def associate_with_notification_channel(self, id, channel_id):
"""
This API endpoint allows you to associate an alert policy with an
notification channel
:type id: integer
:param id: The id of the policy
:type channel_id: integer
:param channel_id: The id of the notification channel
:rtype: dict
:return: The JSON response of the API
::
{
"policy": {
"channel_ids": "list",
"id": "integer"
}
}
"""
return self._put(
url='{0}alerts_policy_channels.json?policy_id={1}&channel_ids={2}'.format(
self.URL,
id,
channel_id
),
headers=self.headers
) | python | def associate_with_notification_channel(self, id, channel_id):
"""
This API endpoint allows you to associate an alert policy with an
notification channel
:type id: integer
:param id: The id of the policy
:type channel_id: integer
:param channel_id: The id of the notification channel
:rtype: dict
:return: The JSON response of the API
::
{
"policy": {
"channel_ids": "list",
"id": "integer"
}
}
"""
return self._put(
url='{0}alerts_policy_channels.json?policy_id={1}&channel_ids={2}'.format(
self.URL,
id,
channel_id
),
headers=self.headers
) | [
"def",
"associate_with_notification_channel",
"(",
"self",
",",
"id",
",",
"channel_id",
")",
":",
"return",
"self",
".",
"_put",
"(",
"url",
"=",
"'{0}alerts_policy_channels.json?policy_id={1}&channel_ids={2}'",
".",
"format",
"(",
"self",
".",
"URL",
",",
"id",
",",
"channel_id",
")",
",",
"headers",
"=",
"self",
".",
"headers",
")"
]
| This API endpoint allows you to associate an alert policy with an
notification channel
:type id: integer
:param id: The id of the policy
:type channel_id: integer
:param channel_id: The id of the notification channel
:rtype: dict
:return: The JSON response of the API
::
{
"policy": {
"channel_ids": "list",
"id": "integer"
}
} | [
"This",
"API",
"endpoint",
"allows",
"you",
"to",
"associate",
"an",
"alert",
"policy",
"with",
"an",
"notification",
"channel"
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_policies.py#L163-L195 | train |
ambitioninc/newrelic-api | newrelic_api/alert_policies.py | AlertPolicies.dissociate_from_notification_channel | def dissociate_from_notification_channel(self, id, channel_id):
"""
This API endpoint allows you to dissociate an alert policy from an
notification channel
:type id: integer
:param id: The id of the policy
:type channel_id: integer
:param channel_id: The id of the notification channel
:rtype: dict
:return: The JSON response of the API
::
{
"channel":{
"configuration": "hash",
"type": "string",
"id": "integer",
"links":{
"policy_ids": "list"
},
"name": "string"
}
}
"""
return self._delete(
url='{0}alerts_policy_channels.json?policy_id={1}&channel_id={2}'.format(
self.URL,
id,
channel_id
),
headers=self.headers
) | python | def dissociate_from_notification_channel(self, id, channel_id):
"""
This API endpoint allows you to dissociate an alert policy from an
notification channel
:type id: integer
:param id: The id of the policy
:type channel_id: integer
:param channel_id: The id of the notification channel
:rtype: dict
:return: The JSON response of the API
::
{
"channel":{
"configuration": "hash",
"type": "string",
"id": "integer",
"links":{
"policy_ids": "list"
},
"name": "string"
}
}
"""
return self._delete(
url='{0}alerts_policy_channels.json?policy_id={1}&channel_id={2}'.format(
self.URL,
id,
channel_id
),
headers=self.headers
) | [
"def",
"dissociate_from_notification_channel",
"(",
"self",
",",
"id",
",",
"channel_id",
")",
":",
"return",
"self",
".",
"_delete",
"(",
"url",
"=",
"'{0}alerts_policy_channels.json?policy_id={1}&channel_id={2}'",
".",
"format",
"(",
"self",
".",
"URL",
",",
"id",
",",
"channel_id",
")",
",",
"headers",
"=",
"self",
".",
"headers",
")"
]
| This API endpoint allows you to dissociate an alert policy from an
notification channel
:type id: integer
:param id: The id of the policy
:type channel_id: integer
:param channel_id: The id of the notification channel
:rtype: dict
:return: The JSON response of the API
::
{
"channel":{
"configuration": "hash",
"type": "string",
"id": "integer",
"links":{
"policy_ids": "list"
},
"name": "string"
}
} | [
"This",
"API",
"endpoint",
"allows",
"you",
"to",
"dissociate",
"an",
"alert",
"policy",
"from",
"an",
"notification",
"channel"
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_policies.py#L197-L234 | train |
ambitioninc/newrelic-api | newrelic_api/alert_conditions.py | AlertConditions.list | def list(self, policy_id, page=None):
"""
This API endpoint returns a paginated list of alert conditions associated with the
given policy_id.
This API endpoint returns a paginated list of the alert conditions
associated with your New Relic account. Alert conditions can be filtered
by their name, list of IDs, type (application, key_transaction, or
server) or whether or not policies are archived (defaults to filtering
archived policies).
:type policy_id: int
:param policy_id: Alert policy id
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"conditions": [
{
"id": "integer",
"type": "string",
"condition_scope": "string",
"name": "string",
"enabled": "boolean",
"entities": [
"integer"
],
"metric": "string",
"runbook_url": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"user_defined": {
"metric": "string",
"value_function": "string"
}
}
]
}
"""
filters = [
'policy_id={0}'.format(policy_id),
'page={0}'.format(page) if page else None
]
return self._get(
url='{0}alerts_conditions.json'.format(self.URL),
headers=self.headers,
params=self.build_param_string(filters)
) | python | def list(self, policy_id, page=None):
"""
This API endpoint returns a paginated list of alert conditions associated with the
given policy_id.
This API endpoint returns a paginated list of the alert conditions
associated with your New Relic account. Alert conditions can be filtered
by their name, list of IDs, type (application, key_transaction, or
server) or whether or not policies are archived (defaults to filtering
archived policies).
:type policy_id: int
:param policy_id: Alert policy id
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"conditions": [
{
"id": "integer",
"type": "string",
"condition_scope": "string",
"name": "string",
"enabled": "boolean",
"entities": [
"integer"
],
"metric": "string",
"runbook_url": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"user_defined": {
"metric": "string",
"value_function": "string"
}
}
]
}
"""
filters = [
'policy_id={0}'.format(policy_id),
'page={0}'.format(page) if page else None
]
return self._get(
url='{0}alerts_conditions.json'.format(self.URL),
headers=self.headers,
params=self.build_param_string(filters)
) | [
"def",
"list",
"(",
"self",
",",
"policy_id",
",",
"page",
"=",
"None",
")",
":",
"filters",
"=",
"[",
"'policy_id={0}'",
".",
"format",
"(",
"policy_id",
")",
",",
"'page={0}'",
".",
"format",
"(",
"page",
")",
"if",
"page",
"else",
"None",
"]",
"return",
"self",
".",
"_get",
"(",
"url",
"=",
"'{0}alerts_conditions.json'",
".",
"format",
"(",
"self",
".",
"URL",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"params",
"=",
"self",
".",
"build_param_string",
"(",
"filters",
")",
")"
]
| This API endpoint returns a paginated list of alert conditions associated with the
given policy_id.
This API endpoint returns a paginated list of the alert conditions
associated with your New Relic account. Alert conditions can be filtered
by their name, list of IDs, type (application, key_transaction, or
server) or whether or not policies are archived (defaults to filtering
archived policies).
:type policy_id: int
:param policy_id: Alert policy id
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"conditions": [
{
"id": "integer",
"type": "string",
"condition_scope": "string",
"name": "string",
"enabled": "boolean",
"entities": [
"integer"
],
"metric": "string",
"runbook_url": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"user_defined": {
"metric": "string",
"value_function": "string"
}
}
]
} | [
"This",
"API",
"endpoint",
"returns",
"a",
"paginated",
"list",
"of",
"alert",
"conditions",
"associated",
"with",
"the",
"given",
"policy_id",
"."
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions.py#L9-L72 | train |
ambitioninc/newrelic-api | newrelic_api/alert_conditions.py | AlertConditions.update | def update(
self, alert_condition_id, policy_id,
type=None,
condition_scope=None,
name=None,
entities=None,
metric=None,
runbook_url=None,
terms=None,
user_defined=None,
enabled=None):
"""
Updates any of the optional parameters of the alert condition
:type alert_condition_id: int
:param alert_condition_id: Alerts condition id to update
:type policy_id: int
:param policy_id: Alert policy id where target alert condition belongs to
:type type: str
:param type: The type of the condition, can be apm_app_metric,
apm_kt_metric, servers_metric, browser_metric, mobile_metric
:type condition_scope: str
:param condition_scope: The scope of the condition, can be instance or application
:type name: str
:param name: The name of the server
:type entities: list[str]
:param name: entity ids to which the alert condition is applied
:type : str
:param metric: The target metric
:type : str
:param runbook_url: The url of the runbook
:type terms: list[hash]
:param terms: list of hashes containing threshold config for the alert
:type user_defined: hash
:param user_defined: hash containing threshold user_defined for the alert
required if metric is set to user_defined
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
:raises: This will raise a
:class:`NewRelicAPIServerException<newrelic_api.exceptions.NoEntityException>`
if target alert condition is not included in target policy
:raises: This will raise a
:class:`ConfigurationException<newrelic_api.exceptions.ConfigurationException>`
if metric is set as user_defined but user_defined config is not passed
::
{
"condition": {
"id": "integer",
"type": "string",
"condition_scope": "string",
"name": "string",
"enabled": "boolean",
"entities": [
"integer"
],
"metric": "string",
"runbook_url": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"user_defined": {
"metric": "string",
"value_function": "string"
}
}
}
"""
conditions_dict = self.list(policy_id)
target_condition = None
for condition in conditions_dict['conditions']:
if int(condition['id']) == alert_condition_id:
target_condition = condition
break
if target_condition is None:
raise NoEntityException(
'Target alert condition is not included in that policy.'
'policy_id: {}, alert_condition_id {}'.format(policy_id, alert_condition_id)
)
data = {
'condition': {
'type': type or target_condition['type'],
'name': name or target_condition['name'],
'entities': entities or target_condition['entities'],
'condition_scope': condition_scope or target_condition['condition_scope'],
'terms': terms or target_condition['terms'],
'metric': metric or target_condition['metric'],
'runbook_url': runbook_url or target_condition['runbook_url'],
}
}
if enabled is not None:
data['condition']['enabled'] = str(enabled).lower()
if data['condition']['metric'] == 'user_defined':
if user_defined:
data['condition']['user_defined'] = user_defined
elif 'user_defined' in target_condition:
data['condition']['user_defined'] = target_condition['user_defined']
else:
raise ConfigurationException(
'Metric is set as user_defined but no user_defined config specified'
)
return self._put(
url='{0}alerts_conditions/{1}.json'.format(self.URL, alert_condition_id),
headers=self.headers,
data=data
) | python | def update(
self, alert_condition_id, policy_id,
type=None,
condition_scope=None,
name=None,
entities=None,
metric=None,
runbook_url=None,
terms=None,
user_defined=None,
enabled=None):
"""
Updates any of the optional parameters of the alert condition
:type alert_condition_id: int
:param alert_condition_id: Alerts condition id to update
:type policy_id: int
:param policy_id: Alert policy id where target alert condition belongs to
:type type: str
:param type: The type of the condition, can be apm_app_metric,
apm_kt_metric, servers_metric, browser_metric, mobile_metric
:type condition_scope: str
:param condition_scope: The scope of the condition, can be instance or application
:type name: str
:param name: The name of the server
:type entities: list[str]
:param name: entity ids to which the alert condition is applied
:type : str
:param metric: The target metric
:type : str
:param runbook_url: The url of the runbook
:type terms: list[hash]
:param terms: list of hashes containing threshold config for the alert
:type user_defined: hash
:param user_defined: hash containing threshold user_defined for the alert
required if metric is set to user_defined
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
:raises: This will raise a
:class:`NewRelicAPIServerException<newrelic_api.exceptions.NoEntityException>`
if target alert condition is not included in target policy
:raises: This will raise a
:class:`ConfigurationException<newrelic_api.exceptions.ConfigurationException>`
if metric is set as user_defined but user_defined config is not passed
::
{
"condition": {
"id": "integer",
"type": "string",
"condition_scope": "string",
"name": "string",
"enabled": "boolean",
"entities": [
"integer"
],
"metric": "string",
"runbook_url": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"user_defined": {
"metric": "string",
"value_function": "string"
}
}
}
"""
conditions_dict = self.list(policy_id)
target_condition = None
for condition in conditions_dict['conditions']:
if int(condition['id']) == alert_condition_id:
target_condition = condition
break
if target_condition is None:
raise NoEntityException(
'Target alert condition is not included in that policy.'
'policy_id: {}, alert_condition_id {}'.format(policy_id, alert_condition_id)
)
data = {
'condition': {
'type': type or target_condition['type'],
'name': name or target_condition['name'],
'entities': entities or target_condition['entities'],
'condition_scope': condition_scope or target_condition['condition_scope'],
'terms': terms or target_condition['terms'],
'metric': metric or target_condition['metric'],
'runbook_url': runbook_url or target_condition['runbook_url'],
}
}
if enabled is not None:
data['condition']['enabled'] = str(enabled).lower()
if data['condition']['metric'] == 'user_defined':
if user_defined:
data['condition']['user_defined'] = user_defined
elif 'user_defined' in target_condition:
data['condition']['user_defined'] = target_condition['user_defined']
else:
raise ConfigurationException(
'Metric is set as user_defined but no user_defined config specified'
)
return self._put(
url='{0}alerts_conditions/{1}.json'.format(self.URL, alert_condition_id),
headers=self.headers,
data=data
) | [
"def",
"update",
"(",
"self",
",",
"alert_condition_id",
",",
"policy_id",
",",
"type",
"=",
"None",
",",
"condition_scope",
"=",
"None",
",",
"name",
"=",
"None",
",",
"entities",
"=",
"None",
",",
"metric",
"=",
"None",
",",
"runbook_url",
"=",
"None",
",",
"terms",
"=",
"None",
",",
"user_defined",
"=",
"None",
",",
"enabled",
"=",
"None",
")",
":",
"conditions_dict",
"=",
"self",
".",
"list",
"(",
"policy_id",
")",
"target_condition",
"=",
"None",
"for",
"condition",
"in",
"conditions_dict",
"[",
"'conditions'",
"]",
":",
"if",
"int",
"(",
"condition",
"[",
"'id'",
"]",
")",
"==",
"alert_condition_id",
":",
"target_condition",
"=",
"condition",
"break",
"if",
"target_condition",
"is",
"None",
":",
"raise",
"NoEntityException",
"(",
"'Target alert condition is not included in that policy.'",
"'policy_id: {}, alert_condition_id {}'",
".",
"format",
"(",
"policy_id",
",",
"alert_condition_id",
")",
")",
"data",
"=",
"{",
"'condition'",
":",
"{",
"'type'",
":",
"type",
"or",
"target_condition",
"[",
"'type'",
"]",
",",
"'name'",
":",
"name",
"or",
"target_condition",
"[",
"'name'",
"]",
",",
"'entities'",
":",
"entities",
"or",
"target_condition",
"[",
"'entities'",
"]",
",",
"'condition_scope'",
":",
"condition_scope",
"or",
"target_condition",
"[",
"'condition_scope'",
"]",
",",
"'terms'",
":",
"terms",
"or",
"target_condition",
"[",
"'terms'",
"]",
",",
"'metric'",
":",
"metric",
"or",
"target_condition",
"[",
"'metric'",
"]",
",",
"'runbook_url'",
":",
"runbook_url",
"or",
"target_condition",
"[",
"'runbook_url'",
"]",
",",
"}",
"}",
"if",
"enabled",
"is",
"not",
"None",
":",
"data",
"[",
"'condition'",
"]",
"[",
"'enabled'",
"]",
"=",
"str",
"(",
"enabled",
")",
".",
"lower",
"(",
")",
"if",
"data",
"[",
"'condition'",
"]",
"[",
"'metric'",
"]",
"==",
"'user_defined'",
":",
"if",
"user_defined",
":",
"data",
"[",
"'condition'",
"]",
"[",
"'user_defined'",
"]",
"=",
"user_defined",
"elif",
"'user_defined'",
"in",
"target_condition",
":",
"data",
"[",
"'condition'",
"]",
"[",
"'user_defined'",
"]",
"=",
"target_condition",
"[",
"'user_defined'",
"]",
"else",
":",
"raise",
"ConfigurationException",
"(",
"'Metric is set as user_defined but no user_defined config specified'",
")",
"return",
"self",
".",
"_put",
"(",
"url",
"=",
"'{0}alerts_conditions/{1}.json'",
".",
"format",
"(",
"self",
".",
"URL",
",",
"alert_condition_id",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"data",
"=",
"data",
")"
]
| Updates any of the optional parameters of the alert condition
:type alert_condition_id: int
:param alert_condition_id: Alerts condition id to update
:type policy_id: int
:param policy_id: Alert policy id where target alert condition belongs to
:type type: str
:param type: The type of the condition, can be apm_app_metric,
apm_kt_metric, servers_metric, browser_metric, mobile_metric
:type condition_scope: str
:param condition_scope: The scope of the condition, can be instance or application
:type name: str
:param name: The name of the server
:type entities: list[str]
:param name: entity ids to which the alert condition is applied
:type : str
:param metric: The target metric
:type : str
:param runbook_url: The url of the runbook
:type terms: list[hash]
:param terms: list of hashes containing threshold config for the alert
:type user_defined: hash
:param user_defined: hash containing threshold user_defined for the alert
required if metric is set to user_defined
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
:raises: This will raise a
:class:`NewRelicAPIServerException<newrelic_api.exceptions.NoEntityException>`
if target alert condition is not included in target policy
:raises: This will raise a
:class:`ConfigurationException<newrelic_api.exceptions.ConfigurationException>`
if metric is set as user_defined but user_defined config is not passed
::
{
"condition": {
"id": "integer",
"type": "string",
"condition_scope": "string",
"name": "string",
"enabled": "boolean",
"entities": [
"integer"
],
"metric": "string",
"runbook_url": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"user_defined": {
"metric": "string",
"value_function": "string"
}
}
} | [
"Updates",
"any",
"of",
"the",
"optional",
"parameters",
"of",
"the",
"alert",
"condition"
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions.py#L74-L207 | train |
ambitioninc/newrelic-api | newrelic_api/alert_conditions.py | AlertConditions.create | def create(
self, policy_id,
type,
condition_scope,
name,
entities,
metric,
terms,
runbook_url=None,
user_defined=None,
enabled=True):
"""
Creates an alert condition
:type policy_id: int
:param policy_id: Alert policy id where target alert condition belongs to
:type type: str
:param type: The type of the condition, can be apm_app_metric,
apm_kt_metric, servers_metric, browser_metric, mobile_metric
:type condition_scope: str
:param condition_scope: The scope of the condition, can be instance or application
:type name: str
:param name: The name of the server
:type entities: list[str]
:param name: entity ids to which the alert condition is applied
:type : str
:param metric: The target metric
:type : str
:param runbook_url: The url of the runbook
:type terms: list[hash]
:param terms: list of hashes containing threshold config for the alert
:type user_defined: hash
:param user_defined: hash containing threshold user_defined for the alert
required if metric is set to user_defined
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
::
{
"condition": {
"id": "integer",
"type": "string",
"condition_scope": "string",
"name": "string",
"enabled": "boolean",
"entities": [
"integer"
],
"metric": "string",
"runbook_url": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"user_defined": {
"metric": "string",
"value_function": "string"
}
}
}
"""
data = {
'condition': {
'type': type,
'name': name,
'enabled': enabled,
'entities': entities,
'condition_scope': condition_scope,
'terms': terms,
'metric': metric,
'runbook_url': runbook_url,
}
}
if metric == 'user_defined':
if user_defined:
data['condition']['user_defined'] = user_defined
else:
raise ConfigurationException(
'Metric is set as user_defined but no user_defined config specified'
)
return self._post(
url='{0}alerts_conditions/policies/{1}.json'.format(self.URL, policy_id),
headers=self.headers,
data=data
) | python | def create(
self, policy_id,
type,
condition_scope,
name,
entities,
metric,
terms,
runbook_url=None,
user_defined=None,
enabled=True):
"""
Creates an alert condition
:type policy_id: int
:param policy_id: Alert policy id where target alert condition belongs to
:type type: str
:param type: The type of the condition, can be apm_app_metric,
apm_kt_metric, servers_metric, browser_metric, mobile_metric
:type condition_scope: str
:param condition_scope: The scope of the condition, can be instance or application
:type name: str
:param name: The name of the server
:type entities: list[str]
:param name: entity ids to which the alert condition is applied
:type : str
:param metric: The target metric
:type : str
:param runbook_url: The url of the runbook
:type terms: list[hash]
:param terms: list of hashes containing threshold config for the alert
:type user_defined: hash
:param user_defined: hash containing threshold user_defined for the alert
required if metric is set to user_defined
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
::
{
"condition": {
"id": "integer",
"type": "string",
"condition_scope": "string",
"name": "string",
"enabled": "boolean",
"entities": [
"integer"
],
"metric": "string",
"runbook_url": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"user_defined": {
"metric": "string",
"value_function": "string"
}
}
}
"""
data = {
'condition': {
'type': type,
'name': name,
'enabled': enabled,
'entities': entities,
'condition_scope': condition_scope,
'terms': terms,
'metric': metric,
'runbook_url': runbook_url,
}
}
if metric == 'user_defined':
if user_defined:
data['condition']['user_defined'] = user_defined
else:
raise ConfigurationException(
'Metric is set as user_defined but no user_defined config specified'
)
return self._post(
url='{0}alerts_conditions/policies/{1}.json'.format(self.URL, policy_id),
headers=self.headers,
data=data
) | [
"def",
"create",
"(",
"self",
",",
"policy_id",
",",
"type",
",",
"condition_scope",
",",
"name",
",",
"entities",
",",
"metric",
",",
"terms",
",",
"runbook_url",
"=",
"None",
",",
"user_defined",
"=",
"None",
",",
"enabled",
"=",
"True",
")",
":",
"data",
"=",
"{",
"'condition'",
":",
"{",
"'type'",
":",
"type",
",",
"'name'",
":",
"name",
",",
"'enabled'",
":",
"enabled",
",",
"'entities'",
":",
"entities",
",",
"'condition_scope'",
":",
"condition_scope",
",",
"'terms'",
":",
"terms",
",",
"'metric'",
":",
"metric",
",",
"'runbook_url'",
":",
"runbook_url",
",",
"}",
"}",
"if",
"metric",
"==",
"'user_defined'",
":",
"if",
"user_defined",
":",
"data",
"[",
"'condition'",
"]",
"[",
"'user_defined'",
"]",
"=",
"user_defined",
"else",
":",
"raise",
"ConfigurationException",
"(",
"'Metric is set as user_defined but no user_defined config specified'",
")",
"return",
"self",
".",
"_post",
"(",
"url",
"=",
"'{0}alerts_conditions/policies/{1}.json'",
".",
"format",
"(",
"self",
".",
"URL",
",",
"policy_id",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"data",
"=",
"data",
")"
]
| Creates an alert condition
:type policy_id: int
:param policy_id: Alert policy id where target alert condition belongs to
:type type: str
:param type: The type of the condition, can be apm_app_metric,
apm_kt_metric, servers_metric, browser_metric, mobile_metric
:type condition_scope: str
:param condition_scope: The scope of the condition, can be instance or application
:type name: str
:param name: The name of the server
:type entities: list[str]
:param name: entity ids to which the alert condition is applied
:type : str
:param metric: The target metric
:type : str
:param runbook_url: The url of the runbook
:type terms: list[hash]
:param terms: list of hashes containing threshold config for the alert
:type user_defined: hash
:param user_defined: hash containing threshold user_defined for the alert
required if metric is set to user_defined
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
::
{
"condition": {
"id": "integer",
"type": "string",
"condition_scope": "string",
"name": "string",
"enabled": "boolean",
"entities": [
"integer"
],
"metric": "string",
"runbook_url": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"user_defined": {
"metric": "string",
"value_function": "string"
}
}
} | [
"Creates",
"an",
"alert",
"condition"
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions.py#L209-L315 | train |
ambitioninc/newrelic-api | newrelic_api/alert_conditions.py | AlertConditions.delete | def delete(self, alert_condition_id):
"""
This API endpoint allows you to delete an alert condition
:type alert_condition_id: integer
:param alert_condition_id: Alert Condition ID
:rtype: dict
:return: The JSON response of the API
::
{
"condition": {
"id": "integer",
"type": "string",
"condition_scope": "string",
"name": "string",
"enabled": "boolean",
"entities": [
"integer"
],
"metric": "string",
"runbook_url": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"user_defined": {
"metric": "string",
"value_function": "string"
}
}
}
"""
return self._delete(
url='{0}alerts_conditions/{1}.json'.format(self.URL, alert_condition_id),
headers=self.headers
) | python | def delete(self, alert_condition_id):
"""
This API endpoint allows you to delete an alert condition
:type alert_condition_id: integer
:param alert_condition_id: Alert Condition ID
:rtype: dict
:return: The JSON response of the API
::
{
"condition": {
"id": "integer",
"type": "string",
"condition_scope": "string",
"name": "string",
"enabled": "boolean",
"entities": [
"integer"
],
"metric": "string",
"runbook_url": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"user_defined": {
"metric": "string",
"value_function": "string"
}
}
}
"""
return self._delete(
url='{0}alerts_conditions/{1}.json'.format(self.URL, alert_condition_id),
headers=self.headers
) | [
"def",
"delete",
"(",
"self",
",",
"alert_condition_id",
")",
":",
"return",
"self",
".",
"_delete",
"(",
"url",
"=",
"'{0}alerts_conditions/{1}.json'",
".",
"format",
"(",
"self",
".",
"URL",
",",
"alert_condition_id",
")",
",",
"headers",
"=",
"self",
".",
"headers",
")"
]
| This API endpoint allows you to delete an alert condition
:type alert_condition_id: integer
:param alert_condition_id: Alert Condition ID
:rtype: dict
:return: The JSON response of the API
::
{
"condition": {
"id": "integer",
"type": "string",
"condition_scope": "string",
"name": "string",
"enabled": "boolean",
"entities": [
"integer"
],
"metric": "string",
"runbook_url": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"user_defined": {
"metric": "string",
"value_function": "string"
}
}
} | [
"This",
"API",
"endpoint",
"allows",
"you",
"to",
"delete",
"an",
"alert",
"condition"
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions.py#L317-L362 | train |
ambitioninc/newrelic-api | newrelic_api/alert_conditions_infra.py | AlertConditionsInfra.list | def list(self, policy_id, limit=None, offset=None):
"""
This API endpoint returns a paginated list of alert conditions for infrastucture
metrics associated with the given policy_id.
:type policy_id: int
:param policy_id: Alert policy id
:type limit: string
:param limit: Max amount of results to return
:type offset: string
:param offset: Starting record to return
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"data": [
{
"id": "integer",
"policy_id": "integer",
"type": "string",
"name": "string",
"enabled": "boolean",
"where_clause": "string",
"comparison": "string",
"filter": "hash",
"critical_threshold": "hash",
"process_where_clause": "string",
"created_at_epoch_millis": "time",
"updated_at_epoch_millis": "time"
}
],
"meta": {
"limit": "integer",
"offset": "integer",
"total": "integer"
}
}
"""
filters = [
'policy_id={0}'.format(policy_id),
'limit={0}'.format(limit) if limit else '50',
'offset={0}'.format(offset) if offset else '0'
]
return self._get(
url='{0}alerts/conditions'.format(self.URL),
headers=self.headers,
params=self.build_param_string(filters)
) | python | def list(self, policy_id, limit=None, offset=None):
"""
This API endpoint returns a paginated list of alert conditions for infrastucture
metrics associated with the given policy_id.
:type policy_id: int
:param policy_id: Alert policy id
:type limit: string
:param limit: Max amount of results to return
:type offset: string
:param offset: Starting record to return
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"data": [
{
"id": "integer",
"policy_id": "integer",
"type": "string",
"name": "string",
"enabled": "boolean",
"where_clause": "string",
"comparison": "string",
"filter": "hash",
"critical_threshold": "hash",
"process_where_clause": "string",
"created_at_epoch_millis": "time",
"updated_at_epoch_millis": "time"
}
],
"meta": {
"limit": "integer",
"offset": "integer",
"total": "integer"
}
}
"""
filters = [
'policy_id={0}'.format(policy_id),
'limit={0}'.format(limit) if limit else '50',
'offset={0}'.format(offset) if offset else '0'
]
return self._get(
url='{0}alerts/conditions'.format(self.URL),
headers=self.headers,
params=self.build_param_string(filters)
) | [
"def",
"list",
"(",
"self",
",",
"policy_id",
",",
"limit",
"=",
"None",
",",
"offset",
"=",
"None",
")",
":",
"filters",
"=",
"[",
"'policy_id={0}'",
".",
"format",
"(",
"policy_id",
")",
",",
"'limit={0}'",
".",
"format",
"(",
"limit",
")",
"if",
"limit",
"else",
"'50'",
",",
"'offset={0}'",
".",
"format",
"(",
"offset",
")",
"if",
"offset",
"else",
"'0'",
"]",
"return",
"self",
".",
"_get",
"(",
"url",
"=",
"'{0}alerts/conditions'",
".",
"format",
"(",
"self",
".",
"URL",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"params",
"=",
"self",
".",
"build_param_string",
"(",
"filters",
")",
")"
]
| This API endpoint returns a paginated list of alert conditions for infrastucture
metrics associated with the given policy_id.
:type policy_id: int
:param policy_id: Alert policy id
:type limit: string
:param limit: Max amount of results to return
:type offset: string
:param offset: Starting record to return
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"data": [
{
"id": "integer",
"policy_id": "integer",
"type": "string",
"name": "string",
"enabled": "boolean",
"where_clause": "string",
"comparison": "string",
"filter": "hash",
"critical_threshold": "hash",
"process_where_clause": "string",
"created_at_epoch_millis": "time",
"updated_at_epoch_millis": "time"
}
],
"meta": {
"limit": "integer",
"offset": "integer",
"total": "integer"
}
} | [
"This",
"API",
"endpoint",
"returns",
"a",
"paginated",
"list",
"of",
"alert",
"conditions",
"for",
"infrastucture",
"metrics",
"associated",
"with",
"the",
"given",
"policy_id",
"."
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions_infra.py#L13-L69 | train |
ambitioninc/newrelic-api | newrelic_api/alert_conditions_infra.py | AlertConditionsInfra.show | def show(self, alert_condition_infra_id):
"""
This API endpoint returns an alert condition for infrastucture, identified by its
ID.
:type alert_condition_infra_id: int
:param alert_condition_infra_id: Alert Condition Infra ID
:rtype: dict
:return: The JSON response of the API
::
{
"data": {
"id": "integer",
"policy_id": "integer",
"type": "string",
"name": "string",
"enabled": "boolean",
"where_clause": "string",
"comparison": "string",
"filter": "hash",
"critical_threshold": "hash",
"event_type": "string",
"process_where_clause": "string",
"created_at_epoch_millis": "time",
"updated_at_epoch_millis": "time"
}
}
"""
return self._get(
url='{0}alerts/conditions/{1}'.format(self.URL, alert_condition_infra_id),
headers=self.headers,
) | python | def show(self, alert_condition_infra_id):
"""
This API endpoint returns an alert condition for infrastucture, identified by its
ID.
:type alert_condition_infra_id: int
:param alert_condition_infra_id: Alert Condition Infra ID
:rtype: dict
:return: The JSON response of the API
::
{
"data": {
"id": "integer",
"policy_id": "integer",
"type": "string",
"name": "string",
"enabled": "boolean",
"where_clause": "string",
"comparison": "string",
"filter": "hash",
"critical_threshold": "hash",
"event_type": "string",
"process_where_clause": "string",
"created_at_epoch_millis": "time",
"updated_at_epoch_millis": "time"
}
}
"""
return self._get(
url='{0}alerts/conditions/{1}'.format(self.URL, alert_condition_infra_id),
headers=self.headers,
) | [
"def",
"show",
"(",
"self",
",",
"alert_condition_infra_id",
")",
":",
"return",
"self",
".",
"_get",
"(",
"url",
"=",
"'{0}alerts/conditions/{1}'",
".",
"format",
"(",
"self",
".",
"URL",
",",
"alert_condition_infra_id",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
")"
]
| This API endpoint returns an alert condition for infrastucture, identified by its
ID.
:type alert_condition_infra_id: int
:param alert_condition_infra_id: Alert Condition Infra ID
:rtype: dict
:return: The JSON response of the API
::
{
"data": {
"id": "integer",
"policy_id": "integer",
"type": "string",
"name": "string",
"enabled": "boolean",
"where_clause": "string",
"comparison": "string",
"filter": "hash",
"critical_threshold": "hash",
"event_type": "string",
"process_where_clause": "string",
"created_at_epoch_millis": "time",
"updated_at_epoch_millis": "time"
}
} | [
"This",
"API",
"endpoint",
"returns",
"an",
"alert",
"condition",
"for",
"infrastucture",
"identified",
"by",
"its",
"ID",
"."
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions_infra.py#L71-L106 | train |
ambitioninc/newrelic-api | newrelic_api/alert_conditions_infra.py | AlertConditionsInfra.create | def create(self, policy_id, name, condition_type, alert_condition_configuration, enabled=True):
"""
This API endpoint allows you to create an alert condition for infrastucture
:type policy_id: int
:param policy_id: Alert policy id
:type name: str
:param name: The name of the alert condition
:type condition_type: str
:param condition_type: The type of the alert condition can be
infra_process_running, infra_metric or infra_host_not_reporting
:type alert_condition_configuration: hash
:param alert_condition_configuration: hash containing config for the alert
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
::
{
"data": {
"id": "integer",
"policy_id": "integer",
"type": "string",
"name": "string",
"enabled": "boolean",
"where_clause": "string",
"comparison": "string",
"filter": "hash",
"critical_threshold": "hash",
"event_type": "string",
"process_where_clause": "string",
"created_at_epoch_millis": "time",
"updated_at_epoch_millis": "time"
}
}
"""
data = {
"data": alert_condition_configuration
}
data['data']['type'] = condition_type
data['data']['policy_id'] = policy_id
data['data']['name'] = name
data['data']['enabled'] = enabled
return self._post(
url='{0}alerts/conditions'.format(self.URL),
headers=self.headers,
data=data
) | python | def create(self, policy_id, name, condition_type, alert_condition_configuration, enabled=True):
"""
This API endpoint allows you to create an alert condition for infrastucture
:type policy_id: int
:param policy_id: Alert policy id
:type name: str
:param name: The name of the alert condition
:type condition_type: str
:param condition_type: The type of the alert condition can be
infra_process_running, infra_metric or infra_host_not_reporting
:type alert_condition_configuration: hash
:param alert_condition_configuration: hash containing config for the alert
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
::
{
"data": {
"id": "integer",
"policy_id": "integer",
"type": "string",
"name": "string",
"enabled": "boolean",
"where_clause": "string",
"comparison": "string",
"filter": "hash",
"critical_threshold": "hash",
"event_type": "string",
"process_where_clause": "string",
"created_at_epoch_millis": "time",
"updated_at_epoch_millis": "time"
}
}
"""
data = {
"data": alert_condition_configuration
}
data['data']['type'] = condition_type
data['data']['policy_id'] = policy_id
data['data']['name'] = name
data['data']['enabled'] = enabled
return self._post(
url='{0}alerts/conditions'.format(self.URL),
headers=self.headers,
data=data
) | [
"def",
"create",
"(",
"self",
",",
"policy_id",
",",
"name",
",",
"condition_type",
",",
"alert_condition_configuration",
",",
"enabled",
"=",
"True",
")",
":",
"data",
"=",
"{",
"\"data\"",
":",
"alert_condition_configuration",
"}",
"data",
"[",
"'data'",
"]",
"[",
"'type'",
"]",
"=",
"condition_type",
"data",
"[",
"'data'",
"]",
"[",
"'policy_id'",
"]",
"=",
"policy_id",
"data",
"[",
"'data'",
"]",
"[",
"'name'",
"]",
"=",
"name",
"data",
"[",
"'data'",
"]",
"[",
"'enabled'",
"]",
"=",
"enabled",
"return",
"self",
".",
"_post",
"(",
"url",
"=",
"'{0}alerts/conditions'",
".",
"format",
"(",
"self",
".",
"URL",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"data",
"=",
"data",
")"
]
| This API endpoint allows you to create an alert condition for infrastucture
:type policy_id: int
:param policy_id: Alert policy id
:type name: str
:param name: The name of the alert condition
:type condition_type: str
:param condition_type: The type of the alert condition can be
infra_process_running, infra_metric or infra_host_not_reporting
:type alert_condition_configuration: hash
:param alert_condition_configuration: hash containing config for the alert
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
::
{
"data": {
"id": "integer",
"policy_id": "integer",
"type": "string",
"name": "string",
"enabled": "boolean",
"where_clause": "string",
"comparison": "string",
"filter": "hash",
"critical_threshold": "hash",
"event_type": "string",
"process_where_clause": "string",
"created_at_epoch_millis": "time",
"updated_at_epoch_millis": "time"
}
} | [
"This",
"API",
"endpoint",
"allows",
"you",
"to",
"create",
"an",
"alert",
"condition",
"for",
"infrastucture"
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions_infra.py#L108-L166 | train |
ambitioninc/newrelic-api | newrelic_api/alert_conditions_infra.py | AlertConditionsInfra.update | def update(self, alert_condition_infra_id, policy_id,
name, condition_type, alert_condition_configuration, enabled=True):
"""
This API endpoint allows you to update an alert condition for infrastucture
:type alert_condition_infra_id: int
:param alert_condition_infra_id: Alert Condition Infra ID
:type policy_id: int
:param policy_id: Alert policy id
:type name: str
:param name: The name of the alert condition
:type condition_type: str
:param condition_type: The type of the alert condition can be
infra_process_running, infra_metric or infra_host_not_reporting
:type alert_condition_configuration: hash
:param alert_condition_configuration: hash containing config for the alert
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
::
{
"data": {
"id": "integer",
"policy_id": "integer",
"type": "string",
"name": "string",
"enabled": "boolean",
"where_clause": "string",
"comparison": "string",
"filter": "hash",
"critical_threshold": "hash",
"event_type": "string",
"process_where_clause": "string",
"created_at_epoch_millis": "time",
"updated_at_epoch_millis": "time"
}
}
"""
data = {
"data": alert_condition_configuration
}
data['data']['type'] = condition_type
data['data']['policy_id'] = policy_id
data['data']['name'] = name
data['data']['enabled'] = enabled
return self._put(
url='{0}alerts/conditions/{1}'.format(self.URL, alert_condition_infra_id),
headers=self.headers,
data=data
) | python | def update(self, alert_condition_infra_id, policy_id,
name, condition_type, alert_condition_configuration, enabled=True):
"""
This API endpoint allows you to update an alert condition for infrastucture
:type alert_condition_infra_id: int
:param alert_condition_infra_id: Alert Condition Infra ID
:type policy_id: int
:param policy_id: Alert policy id
:type name: str
:param name: The name of the alert condition
:type condition_type: str
:param condition_type: The type of the alert condition can be
infra_process_running, infra_metric or infra_host_not_reporting
:type alert_condition_configuration: hash
:param alert_condition_configuration: hash containing config for the alert
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
::
{
"data": {
"id": "integer",
"policy_id": "integer",
"type": "string",
"name": "string",
"enabled": "boolean",
"where_clause": "string",
"comparison": "string",
"filter": "hash",
"critical_threshold": "hash",
"event_type": "string",
"process_where_clause": "string",
"created_at_epoch_millis": "time",
"updated_at_epoch_millis": "time"
}
}
"""
data = {
"data": alert_condition_configuration
}
data['data']['type'] = condition_type
data['data']['policy_id'] = policy_id
data['data']['name'] = name
data['data']['enabled'] = enabled
return self._put(
url='{0}alerts/conditions/{1}'.format(self.URL, alert_condition_infra_id),
headers=self.headers,
data=data
) | [
"def",
"update",
"(",
"self",
",",
"alert_condition_infra_id",
",",
"policy_id",
",",
"name",
",",
"condition_type",
",",
"alert_condition_configuration",
",",
"enabled",
"=",
"True",
")",
":",
"data",
"=",
"{",
"\"data\"",
":",
"alert_condition_configuration",
"}",
"data",
"[",
"'data'",
"]",
"[",
"'type'",
"]",
"=",
"condition_type",
"data",
"[",
"'data'",
"]",
"[",
"'policy_id'",
"]",
"=",
"policy_id",
"data",
"[",
"'data'",
"]",
"[",
"'name'",
"]",
"=",
"name",
"data",
"[",
"'data'",
"]",
"[",
"'enabled'",
"]",
"=",
"enabled",
"return",
"self",
".",
"_put",
"(",
"url",
"=",
"'{0}alerts/conditions/{1}'",
".",
"format",
"(",
"self",
".",
"URL",
",",
"alert_condition_infra_id",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"data",
"=",
"data",
")"
]
| This API endpoint allows you to update an alert condition for infrastucture
:type alert_condition_infra_id: int
:param alert_condition_infra_id: Alert Condition Infra ID
:type policy_id: int
:param policy_id: Alert policy id
:type name: str
:param name: The name of the alert condition
:type condition_type: str
:param condition_type: The type of the alert condition can be
infra_process_running, infra_metric or infra_host_not_reporting
:type alert_condition_configuration: hash
:param alert_condition_configuration: hash containing config for the alert
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
::
{
"data": {
"id": "integer",
"policy_id": "integer",
"type": "string",
"name": "string",
"enabled": "boolean",
"where_clause": "string",
"comparison": "string",
"filter": "hash",
"critical_threshold": "hash",
"event_type": "string",
"process_where_clause": "string",
"created_at_epoch_millis": "time",
"updated_at_epoch_millis": "time"
}
} | [
"This",
"API",
"endpoint",
"allows",
"you",
"to",
"update",
"an",
"alert",
"condition",
"for",
"infrastucture"
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions_infra.py#L168-L230 | train |
ambitioninc/newrelic-api | newrelic_api/alert_conditions_infra.py | AlertConditionsInfra.delete | def delete(self, alert_condition_infra_id):
"""
This API endpoint allows you to delete an alert condition for infrastucture
:type alert_condition_infra_id: integer
:param alert_condition_infra_id: Alert Condition Infra ID
:rtype: dict
:return: The JSON response of the API
::
{}
"""
return self._delete(
url='{0}alerts/conditions/{1}'.format(self.URL, alert_condition_infra_id),
headers=self.headers
) | python | def delete(self, alert_condition_infra_id):
"""
This API endpoint allows you to delete an alert condition for infrastucture
:type alert_condition_infra_id: integer
:param alert_condition_infra_id: Alert Condition Infra ID
:rtype: dict
:return: The JSON response of the API
::
{}
"""
return self._delete(
url='{0}alerts/conditions/{1}'.format(self.URL, alert_condition_infra_id),
headers=self.headers
) | [
"def",
"delete",
"(",
"self",
",",
"alert_condition_infra_id",
")",
":",
"return",
"self",
".",
"_delete",
"(",
"url",
"=",
"'{0}alerts/conditions/{1}'",
".",
"format",
"(",
"self",
".",
"URL",
",",
"alert_condition_infra_id",
")",
",",
"headers",
"=",
"self",
".",
"headers",
")"
]
| This API endpoint allows you to delete an alert condition for infrastucture
:type alert_condition_infra_id: integer
:param alert_condition_infra_id: Alert Condition Infra ID
:rtype: dict
:return: The JSON response of the API
::
{} | [
"This",
"API",
"endpoint",
"allows",
"you",
"to",
"delete",
"an",
"alert",
"condition",
"for",
"infrastucture"
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions_infra.py#L232-L251 | train |
ambitioninc/newrelic-api | newrelic_api/labels.py | Labels.create | def create(self, name, category, applications=None, servers=None):
"""
This API endpoint will create a new label with the provided name and
category
:type name: str
:param name: The name of the label
:type category: str
:param category: The Category
:type applications: list of int
:param applications: An optional list of application ID's
:type servers: list of int
:param servers: An optional list of server ID's
:rtype: dict
:return: The JSON response of the API
::
{
"label": {
"key": "string",
"category": "string",
"name": "string",
"links": {
"applications": [
"integer"
],
"servers": [
"integer"
]
}
}
}
"""
data = {
"label": {
"category": category,
"name": name,
"links": {
"applications": applications or [],
"servers": servers or []
}
}
}
return self._put(
url='{0}labels.json'.format(self.URL),
headers=self.headers,
data=data
) | python | def create(self, name, category, applications=None, servers=None):
"""
This API endpoint will create a new label with the provided name and
category
:type name: str
:param name: The name of the label
:type category: str
:param category: The Category
:type applications: list of int
:param applications: An optional list of application ID's
:type servers: list of int
:param servers: An optional list of server ID's
:rtype: dict
:return: The JSON response of the API
::
{
"label": {
"key": "string",
"category": "string",
"name": "string",
"links": {
"applications": [
"integer"
],
"servers": [
"integer"
]
}
}
}
"""
data = {
"label": {
"category": category,
"name": name,
"links": {
"applications": applications or [],
"servers": servers or []
}
}
}
return self._put(
url='{0}labels.json'.format(self.URL),
headers=self.headers,
data=data
) | [
"def",
"create",
"(",
"self",
",",
"name",
",",
"category",
",",
"applications",
"=",
"None",
",",
"servers",
"=",
"None",
")",
":",
"data",
"=",
"{",
"\"label\"",
":",
"{",
"\"category\"",
":",
"category",
",",
"\"name\"",
":",
"name",
",",
"\"links\"",
":",
"{",
"\"applications\"",
":",
"applications",
"or",
"[",
"]",
",",
"\"servers\"",
":",
"servers",
"or",
"[",
"]",
"}",
"}",
"}",
"return",
"self",
".",
"_put",
"(",
"url",
"=",
"'{0}labels.json'",
".",
"format",
"(",
"self",
".",
"URL",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"data",
"=",
"data",
")"
]
| This API endpoint will create a new label with the provided name and
category
:type name: str
:param name: The name of the label
:type category: str
:param category: The Category
:type applications: list of int
:param applications: An optional list of application ID's
:type servers: list of int
:param servers: An optional list of server ID's
:rtype: dict
:return: The JSON response of the API
::
{
"label": {
"key": "string",
"category": "string",
"name": "string",
"links": {
"applications": [
"integer"
],
"servers": [
"integer"
]
}
}
} | [
"This",
"API",
"endpoint",
"will",
"create",
"a",
"new",
"label",
"with",
"the",
"provided",
"name",
"and",
"category"
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/labels.py#L62-L117 | train |
ambitioninc/newrelic-api | newrelic_api/labels.py | Labels.delete | def delete(self, key):
"""
When applications are provided, this endpoint will remove those
applications from the label.
When no applications are provided, this endpoint will remove the label.
:type key: str
:param key: Label key. Example: 'Language:Java'
:rtype: dict
:return: The JSON response of the API
::
{
"label": {
"key": "string",
"category": "string",
"name": "string",
"links": {
"applications": [
"integer"
],
"servers": [
"integer"
]
}
}
}
"""
return self._delete(
url='{url}labels/labels/{key}.json'.format(
url=self.URL,
key=key),
headers=self.headers,
) | python | def delete(self, key):
"""
When applications are provided, this endpoint will remove those
applications from the label.
When no applications are provided, this endpoint will remove the label.
:type key: str
:param key: Label key. Example: 'Language:Java'
:rtype: dict
:return: The JSON response of the API
::
{
"label": {
"key": "string",
"category": "string",
"name": "string",
"links": {
"applications": [
"integer"
],
"servers": [
"integer"
]
}
}
}
"""
return self._delete(
url='{url}labels/labels/{key}.json'.format(
url=self.URL,
key=key),
headers=self.headers,
) | [
"def",
"delete",
"(",
"self",
",",
"key",
")",
":",
"return",
"self",
".",
"_delete",
"(",
"url",
"=",
"'{url}labels/labels/{key}.json'",
".",
"format",
"(",
"url",
"=",
"self",
".",
"URL",
",",
"key",
"=",
"key",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
")"
]
| When applications are provided, this endpoint will remove those
applications from the label.
When no applications are provided, this endpoint will remove the label.
:type key: str
:param key: Label key. Example: 'Language:Java'
:rtype: dict
:return: The JSON response of the API
::
{
"label": {
"key": "string",
"category": "string",
"name": "string",
"links": {
"applications": [
"integer"
],
"servers": [
"integer"
]
}
}
} | [
"When",
"applications",
"are",
"provided",
"this",
"endpoint",
"will",
"remove",
"those",
"applications",
"from",
"the",
"label",
"."
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/labels.py#L119-L156 | train |
ambitioninc/newrelic-api | newrelic_api/plugins.py | Plugins.list | def list(self, filter_guid=None, filter_ids=None, detailed=None, page=None):
"""
This API endpoint returns a paginated list of the plugins associated
with your New Relic account.
Plugins can be filtered by their name or by a list of IDs.
:type filter_guid: str
:param filter_guid: Filter by name
:type filter_ids: list of ints
:param filter_ids: Filter by user ids
:type detailed: bool
:param detailed: Include all data about a plugin
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"plugins": [
{
"id": "integer",
"name": "string",
"guid": "string",
"publisher": "string",
"details": {
"description": "integer",
"is_public": "string",
"created_at": "time",
"updated_at": "time",
"last_published_at": "time",
"has_unpublished_changes": "boolean",
"branding_image_url": "string",
"upgraded_at": "time",
"short_name": "string",
"publisher_about_url": "string",
"publisher_support_url": "string",
"download_url": "string",
"first_edited_at": "time",
"last_edited_at": "time",
"first_published_at": "time",
"published_version": "string"
},
"summary_metrics": [
{
"id": "integer",
"name": "string",
"metric": "string",
"value_function": "string",
"thresholds": {
"caution": "float",
"critical": "float"
},
"values": {
"raw": "float",
"formatted": "string"
}
}
]
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/plugins.json?page=2",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/plugins.json?page=2",
"rel": "next"
}
}
}
"""
filters = [
'filter[guid]={0}'.format(filter_guid) if filter_guid else None,
'filter[ids]={0}'.format(','.join([str(app_id) for app_id in filter_ids])) if filter_ids else None,
'detailed={0}'.format(detailed) if detailed is not None else None,
'page={0}'.format(page) if page else None
]
return self._get(
url='{0}plugins.json'.format(self.URL),
headers=self.headers,
params=self.build_param_string(filters)
) | python | def list(self, filter_guid=None, filter_ids=None, detailed=None, page=None):
"""
This API endpoint returns a paginated list of the plugins associated
with your New Relic account.
Plugins can be filtered by their name or by a list of IDs.
:type filter_guid: str
:param filter_guid: Filter by name
:type filter_ids: list of ints
:param filter_ids: Filter by user ids
:type detailed: bool
:param detailed: Include all data about a plugin
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"plugins": [
{
"id": "integer",
"name": "string",
"guid": "string",
"publisher": "string",
"details": {
"description": "integer",
"is_public": "string",
"created_at": "time",
"updated_at": "time",
"last_published_at": "time",
"has_unpublished_changes": "boolean",
"branding_image_url": "string",
"upgraded_at": "time",
"short_name": "string",
"publisher_about_url": "string",
"publisher_support_url": "string",
"download_url": "string",
"first_edited_at": "time",
"last_edited_at": "time",
"first_published_at": "time",
"published_version": "string"
},
"summary_metrics": [
{
"id": "integer",
"name": "string",
"metric": "string",
"value_function": "string",
"thresholds": {
"caution": "float",
"critical": "float"
},
"values": {
"raw": "float",
"formatted": "string"
}
}
]
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/plugins.json?page=2",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/plugins.json?page=2",
"rel": "next"
}
}
}
"""
filters = [
'filter[guid]={0}'.format(filter_guid) if filter_guid else None,
'filter[ids]={0}'.format(','.join([str(app_id) for app_id in filter_ids])) if filter_ids else None,
'detailed={0}'.format(detailed) if detailed is not None else None,
'page={0}'.format(page) if page else None
]
return self._get(
url='{0}plugins.json'.format(self.URL),
headers=self.headers,
params=self.build_param_string(filters)
) | [
"def",
"list",
"(",
"self",
",",
"filter_guid",
"=",
"None",
",",
"filter_ids",
"=",
"None",
",",
"detailed",
"=",
"None",
",",
"page",
"=",
"None",
")",
":",
"filters",
"=",
"[",
"'filter[guid]={0}'",
".",
"format",
"(",
"filter_guid",
")",
"if",
"filter_guid",
"else",
"None",
",",
"'filter[ids]={0}'",
".",
"format",
"(",
"','",
".",
"join",
"(",
"[",
"str",
"(",
"app_id",
")",
"for",
"app_id",
"in",
"filter_ids",
"]",
")",
")",
"if",
"filter_ids",
"else",
"None",
",",
"'detailed={0}'",
".",
"format",
"(",
"detailed",
")",
"if",
"detailed",
"is",
"not",
"None",
"else",
"None",
",",
"'page={0}'",
".",
"format",
"(",
"page",
")",
"if",
"page",
"else",
"None",
"]",
"return",
"self",
".",
"_get",
"(",
"url",
"=",
"'{0}plugins.json'",
".",
"format",
"(",
"self",
".",
"URL",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"params",
"=",
"self",
".",
"build_param_string",
"(",
"filters",
")",
")"
]
| This API endpoint returns a paginated list of the plugins associated
with your New Relic account.
Plugins can be filtered by their name or by a list of IDs.
:type filter_guid: str
:param filter_guid: Filter by name
:type filter_ids: list of ints
:param filter_ids: Filter by user ids
:type detailed: bool
:param detailed: Include all data about a plugin
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"plugins": [
{
"id": "integer",
"name": "string",
"guid": "string",
"publisher": "string",
"details": {
"description": "integer",
"is_public": "string",
"created_at": "time",
"updated_at": "time",
"last_published_at": "time",
"has_unpublished_changes": "boolean",
"branding_image_url": "string",
"upgraded_at": "time",
"short_name": "string",
"publisher_about_url": "string",
"publisher_support_url": "string",
"download_url": "string",
"first_edited_at": "time",
"last_edited_at": "time",
"first_published_at": "time",
"published_version": "string"
},
"summary_metrics": [
{
"id": "integer",
"name": "string",
"metric": "string",
"value_function": "string",
"thresholds": {
"caution": "float",
"critical": "float"
},
"values": {
"raw": "float",
"formatted": "string"
}
}
]
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/plugins.json?page=2",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/plugins.json?page=2",
"rel": "next"
}
}
} | [
"This",
"API",
"endpoint",
"returns",
"a",
"paginated",
"list",
"of",
"the",
"plugins",
"associated",
"with",
"your",
"New",
"Relic",
"account",
"."
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/plugins.py#L8-L100 | train |
ambitioninc/newrelic-api | newrelic_api/application_instances.py | ApplicationInstances.list | def list(
self, application_id, filter_hostname=None, filter_ids=None,
page=None):
"""
This API endpoint returns a paginated list of instances associated with the
given application.
Application instances can be filtered by hostname, or the list of
application instance IDs.
:type application_id: int
:param application_id: Application ID
:type filter_hostname: str
:param filter_hostname: Filter by server hostname
:type filter_ids: list of ints
:param filter_ids: Filter by application instance ids
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"application_instances": [
{
"id": "integer",
"application_name": "string",
"host": "string",
"port": "integer",
"language": "integer",
"health_status": "string",
"application_summary": {
"response_time": "float",
"throughput": "float",
"error_rate": "float",
"apdex_score": "float"
},
"end_user_summary": {
"response_time": "float",
"throughput": "float",
"apdex_score": "float"
},
"links": {
"application": "integer",
"application_host": "integer",
"server": "integer"
}
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/applications/{application_id}/instances.json?page=2",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/applications/{application_id}/instances.json?page=2",
"rel": "next"
}
}
}
"""
filters = [
'filter[hostname]={0}'.format(filter_hostname) if filter_hostname else None,
'filter[ids]={0}'.format(','.join([str(app_id) for app_id in filter_ids])) if filter_ids else None,
'page={0}'.format(page) if page else None
]
return self._get(
url='{root}applications/{application_id}/instances.json'.format(
root=self.URL,
application_id=application_id
),
headers=self.headers,
params=self.build_param_string(filters)
) | python | def list(
self, application_id, filter_hostname=None, filter_ids=None,
page=None):
"""
This API endpoint returns a paginated list of instances associated with the
given application.
Application instances can be filtered by hostname, or the list of
application instance IDs.
:type application_id: int
:param application_id: Application ID
:type filter_hostname: str
:param filter_hostname: Filter by server hostname
:type filter_ids: list of ints
:param filter_ids: Filter by application instance ids
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"application_instances": [
{
"id": "integer",
"application_name": "string",
"host": "string",
"port": "integer",
"language": "integer",
"health_status": "string",
"application_summary": {
"response_time": "float",
"throughput": "float",
"error_rate": "float",
"apdex_score": "float"
},
"end_user_summary": {
"response_time": "float",
"throughput": "float",
"apdex_score": "float"
},
"links": {
"application": "integer",
"application_host": "integer",
"server": "integer"
}
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/applications/{application_id}/instances.json?page=2",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/applications/{application_id}/instances.json?page=2",
"rel": "next"
}
}
}
"""
filters = [
'filter[hostname]={0}'.format(filter_hostname) if filter_hostname else None,
'filter[ids]={0}'.format(','.join([str(app_id) for app_id in filter_ids])) if filter_ids else None,
'page={0}'.format(page) if page else None
]
return self._get(
url='{root}applications/{application_id}/instances.json'.format(
root=self.URL,
application_id=application_id
),
headers=self.headers,
params=self.build_param_string(filters)
) | [
"def",
"list",
"(",
"self",
",",
"application_id",
",",
"filter_hostname",
"=",
"None",
",",
"filter_ids",
"=",
"None",
",",
"page",
"=",
"None",
")",
":",
"filters",
"=",
"[",
"'filter[hostname]={0}'",
".",
"format",
"(",
"filter_hostname",
")",
"if",
"filter_hostname",
"else",
"None",
",",
"'filter[ids]={0}'",
".",
"format",
"(",
"','",
".",
"join",
"(",
"[",
"str",
"(",
"app_id",
")",
"for",
"app_id",
"in",
"filter_ids",
"]",
")",
")",
"if",
"filter_ids",
"else",
"None",
",",
"'page={0}'",
".",
"format",
"(",
"page",
")",
"if",
"page",
"else",
"None",
"]",
"return",
"self",
".",
"_get",
"(",
"url",
"=",
"'{root}applications/{application_id}/instances.json'",
".",
"format",
"(",
"root",
"=",
"self",
".",
"URL",
",",
"application_id",
"=",
"application_id",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"params",
"=",
"self",
".",
"build_param_string",
"(",
"filters",
")",
")"
]
| This API endpoint returns a paginated list of instances associated with the
given application.
Application instances can be filtered by hostname, or the list of
application instance IDs.
:type application_id: int
:param application_id: Application ID
:type filter_hostname: str
:param filter_hostname: Filter by server hostname
:type filter_ids: list of ints
:param filter_ids: Filter by application instance ids
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"application_instances": [
{
"id": "integer",
"application_name": "string",
"host": "string",
"port": "integer",
"language": "integer",
"health_status": "string",
"application_summary": {
"response_time": "float",
"throughput": "float",
"error_rate": "float",
"apdex_score": "float"
},
"end_user_summary": {
"response_time": "float",
"throughput": "float",
"apdex_score": "float"
},
"links": {
"application": "integer",
"application_host": "integer",
"server": "integer"
}
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/applications/{application_id}/instances.json?page=2",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/applications/{application_id}/instances.json?page=2",
"rel": "next"
}
}
} | [
"This",
"API",
"endpoint",
"returns",
"a",
"paginated",
"list",
"of",
"instances",
"associated",
"with",
"the",
"given",
"application",
"."
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/application_instances.py#L8-L88 | train |
ambitioninc/newrelic-api | newrelic_api/application_hosts.py | ApplicationHosts.show | def show(self, application_id, host_id):
"""
This API endpoint returns a single application host, identified by its
ID.
:type application_id: int
:param application_id: Application ID
:type host_id: int
:param host_id: Application host ID
:rtype: dict
:return: The JSON response of the API
::
{
"application_host": {
"id": "integer",
"application_name": "string",
"host": "string",
"language": "integer",
"health_status": "string",
"application_summary": {
"response_time": "float",
"throughput": "float",
"error_rate": "float",
"apdex_score": "float"
},
"end_user_summary": {
"response_time": "float",
"throughput": "float",
"apdex_score": "float"
},
"links": {
"application": "integer",
"application_instances": [
"integer"
],
"server": "integer"
}
}
}
"""
return self._get(
url='{root}applications/{application_id}/hosts/{host_id}.json'.format(
root=self.URL,
application_id=application_id,
host_id=host_id
),
headers=self.headers,
) | python | def show(self, application_id, host_id):
"""
This API endpoint returns a single application host, identified by its
ID.
:type application_id: int
:param application_id: Application ID
:type host_id: int
:param host_id: Application host ID
:rtype: dict
:return: The JSON response of the API
::
{
"application_host": {
"id": "integer",
"application_name": "string",
"host": "string",
"language": "integer",
"health_status": "string",
"application_summary": {
"response_time": "float",
"throughput": "float",
"error_rate": "float",
"apdex_score": "float"
},
"end_user_summary": {
"response_time": "float",
"throughput": "float",
"apdex_score": "float"
},
"links": {
"application": "integer",
"application_instances": [
"integer"
],
"server": "integer"
}
}
}
"""
return self._get(
url='{root}applications/{application_id}/hosts/{host_id}.json'.format(
root=self.URL,
application_id=application_id,
host_id=host_id
),
headers=self.headers,
) | [
"def",
"show",
"(",
"self",
",",
"application_id",
",",
"host_id",
")",
":",
"return",
"self",
".",
"_get",
"(",
"url",
"=",
"'{root}applications/{application_id}/hosts/{host_id}.json'",
".",
"format",
"(",
"root",
"=",
"self",
".",
"URL",
",",
"application_id",
"=",
"application_id",
",",
"host_id",
"=",
"host_id",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
")"
]
| This API endpoint returns a single application host, identified by its
ID.
:type application_id: int
:param application_id: Application ID
:type host_id: int
:param host_id: Application host ID
:rtype: dict
:return: The JSON response of the API
::
{
"application_host": {
"id": "integer",
"application_name": "string",
"host": "string",
"language": "integer",
"health_status": "string",
"application_summary": {
"response_time": "float",
"throughput": "float",
"error_rate": "float",
"apdex_score": "float"
},
"end_user_summary": {
"response_time": "float",
"throughput": "float",
"apdex_score": "float"
},
"links": {
"application": "integer",
"application_instances": [
"integer"
],
"server": "integer"
}
}
} | [
"This",
"API",
"endpoint",
"returns",
"a",
"single",
"application",
"host",
"identified",
"by",
"its",
"ID",
"."
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/application_hosts.py#L91-L143 | train |
ambitioninc/newrelic-api | newrelic_api/components.py | Components.metric_data | def metric_data(
self, id, names, values=None, from_dt=None, to_dt=None,
summarize=False):
"""
This API endpoint returns a list of values for each of the requested
metrics. The list of available metrics can be returned using the Metric
Name API endpoint.
Metric data can be filtered by a number of parameters, including
multiple names and values, and by time range. Metric names and values
will be matched intelligently in the background.
You can also retrieve a summarized data point across the entire time
range selected by using the summarize parameter.
**Note** All times sent and received are formatted in UTC. The default
time range is the last 30 minutes.
:type id: int
:param id: Component ID
:type names: list of str
:param names: Retrieve specific metrics by name
:type values: list of str
:param values: Retrieve specific metric values
:type from_dt: datetime
:param from_dt: Retrieve metrics after this time
:type to_dt: datetime
:param to_dt: Retrieve metrics before this time
:type summarize: bool
:param summarize: Summarize the data
:rtype: dict
:return: The JSON response of the API
::
{
"metric_data": {
"from": "time",
"to": "time",
"metrics": [
{
"name": "string",
"timeslices": [
{
"from": "time",
"to": "time",
"values": "hash"
}
]
}
]
}
}
"""
params = [
'from={0}'.format(from_dt) if from_dt else None,
'to={0}'.format(to_dt) if to_dt else None,
'summarize=true' if summarize else None
]
params += ['names[]={0}'.format(name) for name in names]
if values:
params += ['values[]={0}'.format(value) for value in values]
return self._get(
url='{0}components/{1}/metrics/data.json'.format(self.URL, id),
headers=self.headers,
params=self.build_param_string(params)
) | python | def metric_data(
self, id, names, values=None, from_dt=None, to_dt=None,
summarize=False):
"""
This API endpoint returns a list of values for each of the requested
metrics. The list of available metrics can be returned using the Metric
Name API endpoint.
Metric data can be filtered by a number of parameters, including
multiple names and values, and by time range. Metric names and values
will be matched intelligently in the background.
You can also retrieve a summarized data point across the entire time
range selected by using the summarize parameter.
**Note** All times sent and received are formatted in UTC. The default
time range is the last 30 minutes.
:type id: int
:param id: Component ID
:type names: list of str
:param names: Retrieve specific metrics by name
:type values: list of str
:param values: Retrieve specific metric values
:type from_dt: datetime
:param from_dt: Retrieve metrics after this time
:type to_dt: datetime
:param to_dt: Retrieve metrics before this time
:type summarize: bool
:param summarize: Summarize the data
:rtype: dict
:return: The JSON response of the API
::
{
"metric_data": {
"from": "time",
"to": "time",
"metrics": [
{
"name": "string",
"timeslices": [
{
"from": "time",
"to": "time",
"values": "hash"
}
]
}
]
}
}
"""
params = [
'from={0}'.format(from_dt) if from_dt else None,
'to={0}'.format(to_dt) if to_dt else None,
'summarize=true' if summarize else None
]
params += ['names[]={0}'.format(name) for name in names]
if values:
params += ['values[]={0}'.format(value) for value in values]
return self._get(
url='{0}components/{1}/metrics/data.json'.format(self.URL, id),
headers=self.headers,
params=self.build_param_string(params)
) | [
"def",
"metric_data",
"(",
"self",
",",
"id",
",",
"names",
",",
"values",
"=",
"None",
",",
"from_dt",
"=",
"None",
",",
"to_dt",
"=",
"None",
",",
"summarize",
"=",
"False",
")",
":",
"params",
"=",
"[",
"'from={0}'",
".",
"format",
"(",
"from_dt",
")",
"if",
"from_dt",
"else",
"None",
",",
"'to={0}'",
".",
"format",
"(",
"to_dt",
")",
"if",
"to_dt",
"else",
"None",
",",
"'summarize=true'",
"if",
"summarize",
"else",
"None",
"]",
"params",
"+=",
"[",
"'names[]={0}'",
".",
"format",
"(",
"name",
")",
"for",
"name",
"in",
"names",
"]",
"if",
"values",
":",
"params",
"+=",
"[",
"'values[]={0}'",
".",
"format",
"(",
"value",
")",
"for",
"value",
"in",
"values",
"]",
"return",
"self",
".",
"_get",
"(",
"url",
"=",
"'{0}components/{1}/metrics/data.json'",
".",
"format",
"(",
"self",
".",
"URL",
",",
"id",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"params",
"=",
"self",
".",
"build_param_string",
"(",
"params",
")",
")"
]
| This API endpoint returns a list of values for each of the requested
metrics. The list of available metrics can be returned using the Metric
Name API endpoint.
Metric data can be filtered by a number of parameters, including
multiple names and values, and by time range. Metric names and values
will be matched intelligently in the background.
You can also retrieve a summarized data point across the entire time
range selected by using the summarize parameter.
**Note** All times sent and received are formatted in UTC. The default
time range is the last 30 minutes.
:type id: int
:param id: Component ID
:type names: list of str
:param names: Retrieve specific metrics by name
:type values: list of str
:param values: Retrieve specific metric values
:type from_dt: datetime
:param from_dt: Retrieve metrics after this time
:type to_dt: datetime
:param to_dt: Retrieve metrics before this time
:type summarize: bool
:param summarize: Summarize the data
:rtype: dict
:return: The JSON response of the API
::
{
"metric_data": {
"from": "time",
"to": "time",
"metrics": [
{
"name": "string",
"timeslices": [
{
"from": "time",
"to": "time",
"values": "hash"
}
]
}
]
}
} | [
"This",
"API",
"endpoint",
"returns",
"a",
"list",
"of",
"values",
"for",
"each",
"of",
"the",
"requested",
"metrics",
".",
"The",
"list",
"of",
"available",
"metrics",
"can",
"be",
"returned",
"using",
"the",
"Metric",
"Name",
"API",
"endpoint",
"."
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/components.py#L176-L251 | train |
ambitioninc/newrelic-api | newrelic_api/dashboards.py | Dashboards.create | def create(self, dashboard_data):
"""
This API endpoint creates a dashboard and all defined widgets.
:type dashboard: dict
:param dashboard: Dashboard Dictionary
:rtype dict
:return: The JSON response of the API
::
{
"dashboard": {
"id": "integer",
"title": "string",
"description": "string",
"icon": "string",
"created_at": "time",
"updated_at": "time",
"visibility": "string",
"editable": "string",
"ui_url": "string",
"api_url": "string",
"owner_email": "string",
"metadata": {
"version": "integer"
},
"widgets": [
{
"visualization": "string",
"layout": {
"width": "integer",
"height": "integer",
"row": "integer",
"column": "integer"
},
"widget_id": "integer",
"account_id": "integer",
"data": [
"nrql": "string"
],
"presentation": {
"title": "string",
"notes": "string"
}
}
],
"filter": {
"event_types": ["string"],
"attributes": ["string"]
}
}
}
"""
return self._post(
url='{0}dashboards.json'.format(self.URL),
headers=self.headers,
data=dashboard_data,
) | python | def create(self, dashboard_data):
"""
This API endpoint creates a dashboard and all defined widgets.
:type dashboard: dict
:param dashboard: Dashboard Dictionary
:rtype dict
:return: The JSON response of the API
::
{
"dashboard": {
"id": "integer",
"title": "string",
"description": "string",
"icon": "string",
"created_at": "time",
"updated_at": "time",
"visibility": "string",
"editable": "string",
"ui_url": "string",
"api_url": "string",
"owner_email": "string",
"metadata": {
"version": "integer"
},
"widgets": [
{
"visualization": "string",
"layout": {
"width": "integer",
"height": "integer",
"row": "integer",
"column": "integer"
},
"widget_id": "integer",
"account_id": "integer",
"data": [
"nrql": "string"
],
"presentation": {
"title": "string",
"notes": "string"
}
}
],
"filter": {
"event_types": ["string"],
"attributes": ["string"]
}
}
}
"""
return self._post(
url='{0}dashboards.json'.format(self.URL),
headers=self.headers,
data=dashboard_data,
) | [
"def",
"create",
"(",
"self",
",",
"dashboard_data",
")",
":",
"return",
"self",
".",
"_post",
"(",
"url",
"=",
"'{0}dashboards.json'",
".",
"format",
"(",
"self",
".",
"URL",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"data",
"=",
"dashboard_data",
",",
")"
]
| This API endpoint creates a dashboard and all defined widgets.
:type dashboard: dict
:param dashboard: Dashboard Dictionary
:rtype dict
:return: The JSON response of the API
::
{
"dashboard": {
"id": "integer",
"title": "string",
"description": "string",
"icon": "string",
"created_at": "time",
"updated_at": "time",
"visibility": "string",
"editable": "string",
"ui_url": "string",
"api_url": "string",
"owner_email": "string",
"metadata": {
"version": "integer"
},
"widgets": [
{
"visualization": "string",
"layout": {
"width": "integer",
"height": "integer",
"row": "integer",
"column": "integer"
},
"widget_id": "integer",
"account_id": "integer",
"data": [
"nrql": "string"
],
"presentation": {
"title": "string",
"notes": "string"
}
}
],
"filter": {
"event_types": ["string"],
"attributes": ["string"]
}
}
} | [
"This",
"API",
"endpoint",
"creates",
"a",
"dashboard",
"and",
"all",
"defined",
"widgets",
"."
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/dashboards.py#L151-L209 | train |
ambitioninc/newrelic-api | newrelic_api/dashboards.py | Dashboards.update | def update(self, id, dashboard_data):
"""
This API endpoint updates a dashboard and all defined widgets.
:type id: int
:param id: Dashboard ID
:type dashboard: dict
:param dashboard: Dashboard Dictionary
:rtype dict
:return: The JSON response of the API
::
{
"dashboard": {
"id": "integer",
"title": "string",
"description": "string",
"icon": "string",
"created_at": "time",
"updated_at": "time",
"visibility": "string",
"editable": "string",
"ui_url": "string",
"api_url": "string",
"owner_email": "string",
"metadata": {
"version": "integer"
},
"widgets": [
{
"visualization": "string",
"layout": {
"width": "integer",
"height": "integer",
"row": "integer",
"column": "integer"
},
"widget_id": "integer",
"account_id": "integer",
"data": [
"nrql": "string"
],
"presentation": {
"title": "string",
"notes": "string"
}
}
],
"filter": {
"event_types": ["string"],
"attributes": ["string"]
}
}
}
"""
return self._put(
url='{0}dashboards/{1}.json'.format(self.URL, id),
headers=self.headers,
data=dashboard_data,
) | python | def update(self, id, dashboard_data):
"""
This API endpoint updates a dashboard and all defined widgets.
:type id: int
:param id: Dashboard ID
:type dashboard: dict
:param dashboard: Dashboard Dictionary
:rtype dict
:return: The JSON response of the API
::
{
"dashboard": {
"id": "integer",
"title": "string",
"description": "string",
"icon": "string",
"created_at": "time",
"updated_at": "time",
"visibility": "string",
"editable": "string",
"ui_url": "string",
"api_url": "string",
"owner_email": "string",
"metadata": {
"version": "integer"
},
"widgets": [
{
"visualization": "string",
"layout": {
"width": "integer",
"height": "integer",
"row": "integer",
"column": "integer"
},
"widget_id": "integer",
"account_id": "integer",
"data": [
"nrql": "string"
],
"presentation": {
"title": "string",
"notes": "string"
}
}
],
"filter": {
"event_types": ["string"],
"attributes": ["string"]
}
}
}
"""
return self._put(
url='{0}dashboards/{1}.json'.format(self.URL, id),
headers=self.headers,
data=dashboard_data,
) | [
"def",
"update",
"(",
"self",
",",
"id",
",",
"dashboard_data",
")",
":",
"return",
"self",
".",
"_put",
"(",
"url",
"=",
"'{0}dashboards/{1}.json'",
".",
"format",
"(",
"self",
".",
"URL",
",",
"id",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"data",
"=",
"dashboard_data",
",",
")"
]
| This API endpoint updates a dashboard and all defined widgets.
:type id: int
:param id: Dashboard ID
:type dashboard: dict
:param dashboard: Dashboard Dictionary
:rtype dict
:return: The JSON response of the API
::
{
"dashboard": {
"id": "integer",
"title": "string",
"description": "string",
"icon": "string",
"created_at": "time",
"updated_at": "time",
"visibility": "string",
"editable": "string",
"ui_url": "string",
"api_url": "string",
"owner_email": "string",
"metadata": {
"version": "integer"
},
"widgets": [
{
"visualization": "string",
"layout": {
"width": "integer",
"height": "integer",
"row": "integer",
"column": "integer"
},
"widget_id": "integer",
"account_id": "integer",
"data": [
"nrql": "string"
],
"presentation": {
"title": "string",
"notes": "string"
}
}
],
"filter": {
"event_types": ["string"],
"attributes": ["string"]
}
}
} | [
"This",
"API",
"endpoint",
"updates",
"a",
"dashboard",
"and",
"all",
"defined",
"widgets",
"."
]
| 07b4430aa6ae61e4704e2928a6e7a24c76f0f424 | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/dashboards.py#L211-L272 | train |
borntyping/python-dice | dice/grammar.py | operatorPrecedence | def operatorPrecedence(base, operators):
"""
This re-implements pyparsing's operatorPrecedence function.
It gets rid of a few annoying bugs, like always putting operators inside
a Group, and matching the whole grammar with Forward first (there may
actually be a reason for that, but I couldn't find it). It doesn't
support trinary expressions, but they should be easy to add if it turns
out I need them.
"""
# The full expression, used to provide sub-expressions
expression = Forward()
# The initial expression
last = base | Suppress('(') + expression + Suppress(')')
def parse_operator(expr, arity, association, action=None, extra=None):
return expr, arity, association, action, extra
for op in operators:
# Use a function to default action to None
expr, arity, association, action, extra = parse_operator(*op)
# Check that the arity is valid
if arity < 1 or arity > 2:
raise Exception("Arity must be unary (1) or binary (2)")
if association not in (opAssoc.LEFT, opAssoc.RIGHT):
raise Exception("Association must be LEFT or RIGHT")
# This will contain the expression
this = Forward()
# Create an expression based on the association and arity
if association is opAssoc.LEFT:
new_last = (last | extra) if extra else last
if arity == 1:
operator_expression = new_last + OneOrMore(expr)
elif arity == 2:
operator_expression = last + OneOrMore(expr + new_last)
elif association is opAssoc.RIGHT:
new_this = (this | extra) if extra else this
if arity == 1:
operator_expression = expr + new_this
# Currently no operator uses this, so marking it nocover for now
elif arity == 2: # nocover
operator_expression = last + OneOrMore(new_this) # nocover
# Set the parse action for the operator
if action is not None:
operator_expression.setParseAction(action)
this <<= (operator_expression | last)
last = this
# Set the full expression and return it
expression <<= last
return expression | python | def operatorPrecedence(base, operators):
"""
This re-implements pyparsing's operatorPrecedence function.
It gets rid of a few annoying bugs, like always putting operators inside
a Group, and matching the whole grammar with Forward first (there may
actually be a reason for that, but I couldn't find it). It doesn't
support trinary expressions, but they should be easy to add if it turns
out I need them.
"""
# The full expression, used to provide sub-expressions
expression = Forward()
# The initial expression
last = base | Suppress('(') + expression + Suppress(')')
def parse_operator(expr, arity, association, action=None, extra=None):
return expr, arity, association, action, extra
for op in operators:
# Use a function to default action to None
expr, arity, association, action, extra = parse_operator(*op)
# Check that the arity is valid
if arity < 1 or arity > 2:
raise Exception("Arity must be unary (1) or binary (2)")
if association not in (opAssoc.LEFT, opAssoc.RIGHT):
raise Exception("Association must be LEFT or RIGHT")
# This will contain the expression
this = Forward()
# Create an expression based on the association and arity
if association is opAssoc.LEFT:
new_last = (last | extra) if extra else last
if arity == 1:
operator_expression = new_last + OneOrMore(expr)
elif arity == 2:
operator_expression = last + OneOrMore(expr + new_last)
elif association is opAssoc.RIGHT:
new_this = (this | extra) if extra else this
if arity == 1:
operator_expression = expr + new_this
# Currently no operator uses this, so marking it nocover for now
elif arity == 2: # nocover
operator_expression = last + OneOrMore(new_this) # nocover
# Set the parse action for the operator
if action is not None:
operator_expression.setParseAction(action)
this <<= (operator_expression | last)
last = this
# Set the full expression and return it
expression <<= last
return expression | [
"def",
"operatorPrecedence",
"(",
"base",
",",
"operators",
")",
":",
"# The full expression, used to provide sub-expressions",
"expression",
"=",
"Forward",
"(",
")",
"# The initial expression",
"last",
"=",
"base",
"|",
"Suppress",
"(",
"'('",
")",
"+",
"expression",
"+",
"Suppress",
"(",
"')'",
")",
"def",
"parse_operator",
"(",
"expr",
",",
"arity",
",",
"association",
",",
"action",
"=",
"None",
",",
"extra",
"=",
"None",
")",
":",
"return",
"expr",
",",
"arity",
",",
"association",
",",
"action",
",",
"extra",
"for",
"op",
"in",
"operators",
":",
"# Use a function to default action to None",
"expr",
",",
"arity",
",",
"association",
",",
"action",
",",
"extra",
"=",
"parse_operator",
"(",
"*",
"op",
")",
"# Check that the arity is valid",
"if",
"arity",
"<",
"1",
"or",
"arity",
">",
"2",
":",
"raise",
"Exception",
"(",
"\"Arity must be unary (1) or binary (2)\"",
")",
"if",
"association",
"not",
"in",
"(",
"opAssoc",
".",
"LEFT",
",",
"opAssoc",
".",
"RIGHT",
")",
":",
"raise",
"Exception",
"(",
"\"Association must be LEFT or RIGHT\"",
")",
"# This will contain the expression",
"this",
"=",
"Forward",
"(",
")",
"# Create an expression based on the association and arity",
"if",
"association",
"is",
"opAssoc",
".",
"LEFT",
":",
"new_last",
"=",
"(",
"last",
"|",
"extra",
")",
"if",
"extra",
"else",
"last",
"if",
"arity",
"==",
"1",
":",
"operator_expression",
"=",
"new_last",
"+",
"OneOrMore",
"(",
"expr",
")",
"elif",
"arity",
"==",
"2",
":",
"operator_expression",
"=",
"last",
"+",
"OneOrMore",
"(",
"expr",
"+",
"new_last",
")",
"elif",
"association",
"is",
"opAssoc",
".",
"RIGHT",
":",
"new_this",
"=",
"(",
"this",
"|",
"extra",
")",
"if",
"extra",
"else",
"this",
"if",
"arity",
"==",
"1",
":",
"operator_expression",
"=",
"expr",
"+",
"new_this",
"# Currently no operator uses this, so marking it nocover for now",
"elif",
"arity",
"==",
"2",
":",
"# nocover",
"operator_expression",
"=",
"last",
"+",
"OneOrMore",
"(",
"new_this",
")",
"# nocover",
"# Set the parse action for the operator",
"if",
"action",
"is",
"not",
"None",
":",
"operator_expression",
".",
"setParseAction",
"(",
"action",
")",
"this",
"<<=",
"(",
"operator_expression",
"|",
"last",
")",
"last",
"=",
"this",
"# Set the full expression and return it",
"expression",
"<<=",
"last",
"return",
"expression"
]
| This re-implements pyparsing's operatorPrecedence function.
It gets rid of a few annoying bugs, like always putting operators inside
a Group, and matching the whole grammar with Forward first (there may
actually be a reason for that, but I couldn't find it). It doesn't
support trinary expressions, but they should be easy to add if it turns
out I need them. | [
"This",
"re",
"-",
"implements",
"pyparsing",
"s",
"operatorPrecedence",
"function",
"."
]
| 88398c77534ebec19f1f18478e475d0b7a5bc717 | https://github.com/borntyping/python-dice/blob/88398c77534ebec19f1f18478e475d0b7a5bc717/dice/grammar.py#L25-L83 | train |
borntyping/python-dice | dice/elements.py | Element.set_parse_attributes | def set_parse_attributes(self, string, location, tokens):
"Fluent API for setting parsed location"
self.string = string
self.location = location
self.tokens = tokens
return self | python | def set_parse_attributes(self, string, location, tokens):
"Fluent API for setting parsed location"
self.string = string
self.location = location
self.tokens = tokens
return self | [
"def",
"set_parse_attributes",
"(",
"self",
",",
"string",
",",
"location",
",",
"tokens",
")",
":",
"self",
".",
"string",
"=",
"string",
"self",
".",
"location",
"=",
"location",
"self",
".",
"tokens",
"=",
"tokens",
"return",
"self"
]
| Fluent API for setting parsed location | [
"Fluent",
"API",
"for",
"setting",
"parsed",
"location"
]
| 88398c77534ebec19f1f18478e475d0b7a5bc717 | https://github.com/borntyping/python-dice/blob/88398c77534ebec19f1f18478e475d0b7a5bc717/dice/elements.py#L26-L31 | train |
borntyping/python-dice | dice/elements.py | Element.evaluate_object | def evaluate_object(obj, cls=None, cache=False, **kwargs):
"""Evaluates elements, and coerces objects to a class if needed"""
old_obj = obj
if isinstance(obj, Element):
if cache:
obj = obj.evaluate_cached(**kwargs)
else:
obj = obj.evaluate(cache=cache, **kwargs)
if cls is not None and type(obj) != cls:
obj = cls(obj)
for attr in ('string', 'location', 'tokens'):
if hasattr(old_obj, attr):
setattr(obj, attr, getattr(old_obj, attr))
return obj | python | def evaluate_object(obj, cls=None, cache=False, **kwargs):
"""Evaluates elements, and coerces objects to a class if needed"""
old_obj = obj
if isinstance(obj, Element):
if cache:
obj = obj.evaluate_cached(**kwargs)
else:
obj = obj.evaluate(cache=cache, **kwargs)
if cls is not None and type(obj) != cls:
obj = cls(obj)
for attr in ('string', 'location', 'tokens'):
if hasattr(old_obj, attr):
setattr(obj, attr, getattr(old_obj, attr))
return obj | [
"def",
"evaluate_object",
"(",
"obj",
",",
"cls",
"=",
"None",
",",
"cache",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"old_obj",
"=",
"obj",
"if",
"isinstance",
"(",
"obj",
",",
"Element",
")",
":",
"if",
"cache",
":",
"obj",
"=",
"obj",
".",
"evaluate_cached",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"obj",
"=",
"obj",
".",
"evaluate",
"(",
"cache",
"=",
"cache",
",",
"*",
"*",
"kwargs",
")",
"if",
"cls",
"is",
"not",
"None",
"and",
"type",
"(",
"obj",
")",
"!=",
"cls",
":",
"obj",
"=",
"cls",
"(",
"obj",
")",
"for",
"attr",
"in",
"(",
"'string'",
",",
"'location'",
",",
"'tokens'",
")",
":",
"if",
"hasattr",
"(",
"old_obj",
",",
"attr",
")",
":",
"setattr",
"(",
"obj",
",",
"attr",
",",
"getattr",
"(",
"old_obj",
",",
"attr",
")",
")",
"return",
"obj"
]
| Evaluates elements, and coerces objects to a class if needed | [
"Evaluates",
"elements",
"and",
"coerces",
"objects",
"to",
"a",
"class",
"if",
"needed"
]
| 88398c77534ebec19f1f18478e475d0b7a5bc717 | https://github.com/borntyping/python-dice/blob/88398c77534ebec19f1f18478e475d0b7a5bc717/dice/elements.py#L44-L60 | train |
networks-lab/metaknowledge | metaknowledge/graphHelpers.py | readGraph | def readGraph(edgeList, nodeList = None, directed = False, idKey = 'ID', eSource = 'From', eDest = 'To'):
"""Reads the files given by _edgeList_ and _nodeList_ and creates a networkx graph for the files.
This is designed only for the files produced by metaknowledge and is meant to be the reverse of [writeGraph()](#metaknowledge.graphHelpers.writeGraph), if this does not produce the desired results the networkx builtin [networkx.read_edgelist()](https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.readwrite.edgelist.read_edgelist.html) could be tried as it is aimed at a more general usage.
The read edge list format assumes the column named _eSource_ (default `'From'`) is the source node, then the column _eDest_ (default `'To'`) givens the destination and all other columns are attributes of the edges, e.g. weight.
The read node list format assumes the column _idKey_ (default `'ID'`) is the ID of the node for the edge list and the resulting network. All other columns are considered attributes of the node, e.g. count.
**Note**: If the names of the columns do not match those given to **readGraph()** a `KeyError` exception will be raised.
**Note**: If nodes appear in the edgelist but not the nodeList they will be created silently with no attributes.
# Parameters
_edgeList_ : `str`
> a string giving the path to the edge list file
_nodeList_ : `optional [str]`
> default `None`, a string giving the path to the node list file
_directed_ : `optional [bool]`
> default `False`, if `True` the produced network is directed from _eSource_ to _eDest_
_idKey_ : `optional [str]`
> default `'ID'`, the name of the ID column in the node list
_eSource_ : `optional [str]`
> default `'From'`, the name of the source column in the edge list
_eDest_ : `optional [str]`
> default `'To'`, the name of the destination column in the edge list
# Returns
`networkx Graph`
> the graph described by the input files
"""
progArgs = (0, "Starting to reading graphs")
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
if directed:
grph = nx.DiGraph()
else:
grph = nx.Graph()
if nodeList:
PBar.updateVal(0, "Reading " + nodeList)
f = open(os.path.expanduser(os.path.abspath(nodeList)))
nFile = csv.DictReader(f)
for line in nFile:
vals = line
ndID = vals[idKey]
del vals[idKey]
if len(vals) > 0:
grph.add_node(ndID, **vals)
else:
grph.add_node(ndID)
f.close()
PBar.updateVal(.25, "Reading " + edgeList)
f = open(os.path.expanduser(os.path.abspath(edgeList)))
eFile = csv.DictReader(f)
for line in eFile:
vals = line
eFrom = vals[eSource]
eTo = vals[eDest]
del vals[eSource]
del vals[eDest]
if len(vals) > 0:
grph.add_edge(eFrom, eTo, **vals)
else:
grph.add_edge(eFrom, eTo)
PBar.finish("{} nodes and {} edges found".format(len(grph.nodes()), len(grph.edges())))
f.close()
return grph | python | def readGraph(edgeList, nodeList = None, directed = False, idKey = 'ID', eSource = 'From', eDest = 'To'):
"""Reads the files given by _edgeList_ and _nodeList_ and creates a networkx graph for the files.
This is designed only for the files produced by metaknowledge and is meant to be the reverse of [writeGraph()](#metaknowledge.graphHelpers.writeGraph), if this does not produce the desired results the networkx builtin [networkx.read_edgelist()](https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.readwrite.edgelist.read_edgelist.html) could be tried as it is aimed at a more general usage.
The read edge list format assumes the column named _eSource_ (default `'From'`) is the source node, then the column _eDest_ (default `'To'`) givens the destination and all other columns are attributes of the edges, e.g. weight.
The read node list format assumes the column _idKey_ (default `'ID'`) is the ID of the node for the edge list and the resulting network. All other columns are considered attributes of the node, e.g. count.
**Note**: If the names of the columns do not match those given to **readGraph()** a `KeyError` exception will be raised.
**Note**: If nodes appear in the edgelist but not the nodeList they will be created silently with no attributes.
# Parameters
_edgeList_ : `str`
> a string giving the path to the edge list file
_nodeList_ : `optional [str]`
> default `None`, a string giving the path to the node list file
_directed_ : `optional [bool]`
> default `False`, if `True` the produced network is directed from _eSource_ to _eDest_
_idKey_ : `optional [str]`
> default `'ID'`, the name of the ID column in the node list
_eSource_ : `optional [str]`
> default `'From'`, the name of the source column in the edge list
_eDest_ : `optional [str]`
> default `'To'`, the name of the destination column in the edge list
# Returns
`networkx Graph`
> the graph described by the input files
"""
progArgs = (0, "Starting to reading graphs")
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
if directed:
grph = nx.DiGraph()
else:
grph = nx.Graph()
if nodeList:
PBar.updateVal(0, "Reading " + nodeList)
f = open(os.path.expanduser(os.path.abspath(nodeList)))
nFile = csv.DictReader(f)
for line in nFile:
vals = line
ndID = vals[idKey]
del vals[idKey]
if len(vals) > 0:
grph.add_node(ndID, **vals)
else:
grph.add_node(ndID)
f.close()
PBar.updateVal(.25, "Reading " + edgeList)
f = open(os.path.expanduser(os.path.abspath(edgeList)))
eFile = csv.DictReader(f)
for line in eFile:
vals = line
eFrom = vals[eSource]
eTo = vals[eDest]
del vals[eSource]
del vals[eDest]
if len(vals) > 0:
grph.add_edge(eFrom, eTo, **vals)
else:
grph.add_edge(eFrom, eTo)
PBar.finish("{} nodes and {} edges found".format(len(grph.nodes()), len(grph.edges())))
f.close()
return grph | [
"def",
"readGraph",
"(",
"edgeList",
",",
"nodeList",
"=",
"None",
",",
"directed",
"=",
"False",
",",
"idKey",
"=",
"'ID'",
",",
"eSource",
"=",
"'From'",
",",
"eDest",
"=",
"'To'",
")",
":",
"progArgs",
"=",
"(",
"0",
",",
"\"Starting to reading graphs\"",
")",
"if",
"metaknowledge",
".",
"VERBOSE_MODE",
":",
"progKwargs",
"=",
"{",
"'dummy'",
":",
"False",
"}",
"else",
":",
"progKwargs",
"=",
"{",
"'dummy'",
":",
"True",
"}",
"with",
"_ProgressBar",
"(",
"*",
"progArgs",
",",
"*",
"*",
"progKwargs",
")",
"as",
"PBar",
":",
"if",
"directed",
":",
"grph",
"=",
"nx",
".",
"DiGraph",
"(",
")",
"else",
":",
"grph",
"=",
"nx",
".",
"Graph",
"(",
")",
"if",
"nodeList",
":",
"PBar",
".",
"updateVal",
"(",
"0",
",",
"\"Reading \"",
"+",
"nodeList",
")",
"f",
"=",
"open",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"nodeList",
")",
")",
")",
"nFile",
"=",
"csv",
".",
"DictReader",
"(",
"f",
")",
"for",
"line",
"in",
"nFile",
":",
"vals",
"=",
"line",
"ndID",
"=",
"vals",
"[",
"idKey",
"]",
"del",
"vals",
"[",
"idKey",
"]",
"if",
"len",
"(",
"vals",
")",
">",
"0",
":",
"grph",
".",
"add_node",
"(",
"ndID",
",",
"*",
"*",
"vals",
")",
"else",
":",
"grph",
".",
"add_node",
"(",
"ndID",
")",
"f",
".",
"close",
"(",
")",
"PBar",
".",
"updateVal",
"(",
".25",
",",
"\"Reading \"",
"+",
"edgeList",
")",
"f",
"=",
"open",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"edgeList",
")",
")",
")",
"eFile",
"=",
"csv",
".",
"DictReader",
"(",
"f",
")",
"for",
"line",
"in",
"eFile",
":",
"vals",
"=",
"line",
"eFrom",
"=",
"vals",
"[",
"eSource",
"]",
"eTo",
"=",
"vals",
"[",
"eDest",
"]",
"del",
"vals",
"[",
"eSource",
"]",
"del",
"vals",
"[",
"eDest",
"]",
"if",
"len",
"(",
"vals",
")",
">",
"0",
":",
"grph",
".",
"add_edge",
"(",
"eFrom",
",",
"eTo",
",",
"*",
"*",
"vals",
")",
"else",
":",
"grph",
".",
"add_edge",
"(",
"eFrom",
",",
"eTo",
")",
"PBar",
".",
"finish",
"(",
"\"{} nodes and {} edges found\"",
".",
"format",
"(",
"len",
"(",
"grph",
".",
"nodes",
"(",
")",
")",
",",
"len",
"(",
"grph",
".",
"edges",
"(",
")",
")",
")",
")",
"f",
".",
"close",
"(",
")",
"return",
"grph"
]
| Reads the files given by _edgeList_ and _nodeList_ and creates a networkx graph for the files.
This is designed only for the files produced by metaknowledge and is meant to be the reverse of [writeGraph()](#metaknowledge.graphHelpers.writeGraph), if this does not produce the desired results the networkx builtin [networkx.read_edgelist()](https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.readwrite.edgelist.read_edgelist.html) could be tried as it is aimed at a more general usage.
The read edge list format assumes the column named _eSource_ (default `'From'`) is the source node, then the column _eDest_ (default `'To'`) givens the destination and all other columns are attributes of the edges, e.g. weight.
The read node list format assumes the column _idKey_ (default `'ID'`) is the ID of the node for the edge list and the resulting network. All other columns are considered attributes of the node, e.g. count.
**Note**: If the names of the columns do not match those given to **readGraph()** a `KeyError` exception will be raised.
**Note**: If nodes appear in the edgelist but not the nodeList they will be created silently with no attributes.
# Parameters
_edgeList_ : `str`
> a string giving the path to the edge list file
_nodeList_ : `optional [str]`
> default `None`, a string giving the path to the node list file
_directed_ : `optional [bool]`
> default `False`, if `True` the produced network is directed from _eSource_ to _eDest_
_idKey_ : `optional [str]`
> default `'ID'`, the name of the ID column in the node list
_eSource_ : `optional [str]`
> default `'From'`, the name of the source column in the edge list
_eDest_ : `optional [str]`
> default `'To'`, the name of the destination column in the edge list
# Returns
`networkx Graph`
> the graph described by the input files | [
"Reads",
"the",
"files",
"given",
"by",
"_edgeList_",
"and",
"_nodeList_",
"and",
"creates",
"a",
"networkx",
"graph",
"for",
"the",
"files",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/graphHelpers.py#L11-L94 | train |
networks-lab/metaknowledge | metaknowledge/graphHelpers.py | writeGraph | def writeGraph(grph, name, edgeInfo = True, typing = False, suffix = 'csv', overwrite = True, allSameAttribute = False):
"""Writes both the edge list and the node attribute list of _grph_ to files starting with _name_.
The output files start with _name_, the file type (edgeList, nodeAttributes) then if typing is True the type of graph (directed or undirected) then the suffix, the default is as follows:
>> name_fileType.suffix
Both files are csv's with comma delimiters and double quote quoting characters. The edge list has two columns for the source and destination of the edge, `'From'` and `'To'` respectively, then, if _edgeInfo_ is `True`, for each attribute of the node another column is created. The node list has one column call "ID" with the node ids used by networkx and all other columns are the node attributes.
To read back these files use [readGraph()](#metaknowledge.graphHelpers.readGraph) and to write only one type of lsit use [writeEdgeList()](#metaknowledge.graphHelpers.writeEdgeList) or [writeNodeAttributeFile()](#metaknowledge.graphHelpers.writeNodeAttributeFile).
**Warning**: this function will overwrite files, if they are in the way of the output, to prevent this set _overwrite_ to `False`
**Note**: If any nodes or edges are missing an attribute a `KeyError` will be raised.
# Parameters
_grph_ : `networkx Graph`
> A networkx graph of the network to be written.
_name_ : `str`
> The start of the file name to be written, can include a path.
_edgeInfo_ : `optional [bool]`
> Default `True`, if `True` the the attributes of each edge are written to the edge list.
_typing_ : `optional [bool]`
> Default `False`, if `True` the directed ness of the graph will be added to the file names.
_suffix_ : `optional [str]`
> Default `"csv"`, the suffix of the file.
_overwrite_ : `optional [bool]`
> Default `True`, if `True` files will be overwritten silently, otherwise an `OSError` exception will be raised.
"""
progArgs = (0, "Writing the graph to files starting with: {}".format(name))
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
if typing:
if isinstance(grph, nx.classes.digraph.DiGraph) or isinstance(grph, nx.classes.multidigraph.MultiDiGraph):
grphType = "_directed"
else:
grphType = "_undirected"
else:
grphType = ''
nameCompts = os.path.split(os.path.expanduser(os.path.normpath(name)))
if nameCompts[0] == '' and nameCompts[1] == '':
edgeListName = "edgeList"+ grphType + '.' + suffix
nodesAtrName = "nodeAttributes"+ grphType + '.' + suffix
elif nameCompts[0] == '':
edgeListName = nameCompts[1] + "_edgeList"+ grphType + '.' + suffix
nodesAtrName = nameCompts[1] + "_nodeAttributes"+ grphType + '.' + suffix
elif nameCompts[1] == '':
edgeListName = os.path.join(nameCompts[0], "edgeList"+ grphType + '.' + suffix)
nodesAtrName = os.path.join(nameCompts[0], "nodeAttributes"+ grphType + '.' + suffix)
else:
edgeListName = os.path.join(nameCompts[0], nameCompts[1] + "_edgeList"+ grphType + '.' + suffix)
nodesAtrName = os.path.join(nameCompts[0], nameCompts[1] + "_nodeAttributes"+ grphType + '.' + suffix)
if not overwrite:
if os.path.isfile(edgeListName):
raise OSError(edgeListName+ " already exists")
if os.path.isfile(nodesAtrName):
raise OSError(nodesAtrName + " already exists")
writeEdgeList(grph, edgeListName, extraInfo = edgeInfo, allSameAttribute = allSameAttribute, _progBar = PBar)
writeNodeAttributeFile(grph, nodesAtrName, allSameAttribute = allSameAttribute, _progBar = PBar)
PBar.finish("{} nodes and {} edges written to file".format(len(grph.nodes()), len(grph.edges()))) | python | def writeGraph(grph, name, edgeInfo = True, typing = False, suffix = 'csv', overwrite = True, allSameAttribute = False):
"""Writes both the edge list and the node attribute list of _grph_ to files starting with _name_.
The output files start with _name_, the file type (edgeList, nodeAttributes) then if typing is True the type of graph (directed or undirected) then the suffix, the default is as follows:
>> name_fileType.suffix
Both files are csv's with comma delimiters and double quote quoting characters. The edge list has two columns for the source and destination of the edge, `'From'` and `'To'` respectively, then, if _edgeInfo_ is `True`, for each attribute of the node another column is created. The node list has one column call "ID" with the node ids used by networkx and all other columns are the node attributes.
To read back these files use [readGraph()](#metaknowledge.graphHelpers.readGraph) and to write only one type of lsit use [writeEdgeList()](#metaknowledge.graphHelpers.writeEdgeList) or [writeNodeAttributeFile()](#metaknowledge.graphHelpers.writeNodeAttributeFile).
**Warning**: this function will overwrite files, if they are in the way of the output, to prevent this set _overwrite_ to `False`
**Note**: If any nodes or edges are missing an attribute a `KeyError` will be raised.
# Parameters
_grph_ : `networkx Graph`
> A networkx graph of the network to be written.
_name_ : `str`
> The start of the file name to be written, can include a path.
_edgeInfo_ : `optional [bool]`
> Default `True`, if `True` the the attributes of each edge are written to the edge list.
_typing_ : `optional [bool]`
> Default `False`, if `True` the directed ness of the graph will be added to the file names.
_suffix_ : `optional [str]`
> Default `"csv"`, the suffix of the file.
_overwrite_ : `optional [bool]`
> Default `True`, if `True` files will be overwritten silently, otherwise an `OSError` exception will be raised.
"""
progArgs = (0, "Writing the graph to files starting with: {}".format(name))
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
if typing:
if isinstance(grph, nx.classes.digraph.DiGraph) or isinstance(grph, nx.classes.multidigraph.MultiDiGraph):
grphType = "_directed"
else:
grphType = "_undirected"
else:
grphType = ''
nameCompts = os.path.split(os.path.expanduser(os.path.normpath(name)))
if nameCompts[0] == '' and nameCompts[1] == '':
edgeListName = "edgeList"+ grphType + '.' + suffix
nodesAtrName = "nodeAttributes"+ grphType + '.' + suffix
elif nameCompts[0] == '':
edgeListName = nameCompts[1] + "_edgeList"+ grphType + '.' + suffix
nodesAtrName = nameCompts[1] + "_nodeAttributes"+ grphType + '.' + suffix
elif nameCompts[1] == '':
edgeListName = os.path.join(nameCompts[0], "edgeList"+ grphType + '.' + suffix)
nodesAtrName = os.path.join(nameCompts[0], "nodeAttributes"+ grphType + '.' + suffix)
else:
edgeListName = os.path.join(nameCompts[0], nameCompts[1] + "_edgeList"+ grphType + '.' + suffix)
nodesAtrName = os.path.join(nameCompts[0], nameCompts[1] + "_nodeAttributes"+ grphType + '.' + suffix)
if not overwrite:
if os.path.isfile(edgeListName):
raise OSError(edgeListName+ " already exists")
if os.path.isfile(nodesAtrName):
raise OSError(nodesAtrName + " already exists")
writeEdgeList(grph, edgeListName, extraInfo = edgeInfo, allSameAttribute = allSameAttribute, _progBar = PBar)
writeNodeAttributeFile(grph, nodesAtrName, allSameAttribute = allSameAttribute, _progBar = PBar)
PBar.finish("{} nodes and {} edges written to file".format(len(grph.nodes()), len(grph.edges()))) | [
"def",
"writeGraph",
"(",
"grph",
",",
"name",
",",
"edgeInfo",
"=",
"True",
",",
"typing",
"=",
"False",
",",
"suffix",
"=",
"'csv'",
",",
"overwrite",
"=",
"True",
",",
"allSameAttribute",
"=",
"False",
")",
":",
"progArgs",
"=",
"(",
"0",
",",
"\"Writing the graph to files starting with: {}\"",
".",
"format",
"(",
"name",
")",
")",
"if",
"metaknowledge",
".",
"VERBOSE_MODE",
":",
"progKwargs",
"=",
"{",
"'dummy'",
":",
"False",
"}",
"else",
":",
"progKwargs",
"=",
"{",
"'dummy'",
":",
"True",
"}",
"with",
"_ProgressBar",
"(",
"*",
"progArgs",
",",
"*",
"*",
"progKwargs",
")",
"as",
"PBar",
":",
"if",
"typing",
":",
"if",
"isinstance",
"(",
"grph",
",",
"nx",
".",
"classes",
".",
"digraph",
".",
"DiGraph",
")",
"or",
"isinstance",
"(",
"grph",
",",
"nx",
".",
"classes",
".",
"multidigraph",
".",
"MultiDiGraph",
")",
":",
"grphType",
"=",
"\"_directed\"",
"else",
":",
"grphType",
"=",
"\"_undirected\"",
"else",
":",
"grphType",
"=",
"''",
"nameCompts",
"=",
"os",
".",
"path",
".",
"split",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"name",
")",
")",
")",
"if",
"nameCompts",
"[",
"0",
"]",
"==",
"''",
"and",
"nameCompts",
"[",
"1",
"]",
"==",
"''",
":",
"edgeListName",
"=",
"\"edgeList\"",
"+",
"grphType",
"+",
"'.'",
"+",
"suffix",
"nodesAtrName",
"=",
"\"nodeAttributes\"",
"+",
"grphType",
"+",
"'.'",
"+",
"suffix",
"elif",
"nameCompts",
"[",
"0",
"]",
"==",
"''",
":",
"edgeListName",
"=",
"nameCompts",
"[",
"1",
"]",
"+",
"\"_edgeList\"",
"+",
"grphType",
"+",
"'.'",
"+",
"suffix",
"nodesAtrName",
"=",
"nameCompts",
"[",
"1",
"]",
"+",
"\"_nodeAttributes\"",
"+",
"grphType",
"+",
"'.'",
"+",
"suffix",
"elif",
"nameCompts",
"[",
"1",
"]",
"==",
"''",
":",
"edgeListName",
"=",
"os",
".",
"path",
".",
"join",
"(",
"nameCompts",
"[",
"0",
"]",
",",
"\"edgeList\"",
"+",
"grphType",
"+",
"'.'",
"+",
"suffix",
")",
"nodesAtrName",
"=",
"os",
".",
"path",
".",
"join",
"(",
"nameCompts",
"[",
"0",
"]",
",",
"\"nodeAttributes\"",
"+",
"grphType",
"+",
"'.'",
"+",
"suffix",
")",
"else",
":",
"edgeListName",
"=",
"os",
".",
"path",
".",
"join",
"(",
"nameCompts",
"[",
"0",
"]",
",",
"nameCompts",
"[",
"1",
"]",
"+",
"\"_edgeList\"",
"+",
"grphType",
"+",
"'.'",
"+",
"suffix",
")",
"nodesAtrName",
"=",
"os",
".",
"path",
".",
"join",
"(",
"nameCompts",
"[",
"0",
"]",
",",
"nameCompts",
"[",
"1",
"]",
"+",
"\"_nodeAttributes\"",
"+",
"grphType",
"+",
"'.'",
"+",
"suffix",
")",
"if",
"not",
"overwrite",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"edgeListName",
")",
":",
"raise",
"OSError",
"(",
"edgeListName",
"+",
"\" already exists\"",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"nodesAtrName",
")",
":",
"raise",
"OSError",
"(",
"nodesAtrName",
"+",
"\" already exists\"",
")",
"writeEdgeList",
"(",
"grph",
",",
"edgeListName",
",",
"extraInfo",
"=",
"edgeInfo",
",",
"allSameAttribute",
"=",
"allSameAttribute",
",",
"_progBar",
"=",
"PBar",
")",
"writeNodeAttributeFile",
"(",
"grph",
",",
"nodesAtrName",
",",
"allSameAttribute",
"=",
"allSameAttribute",
",",
"_progBar",
"=",
"PBar",
")",
"PBar",
".",
"finish",
"(",
"\"{} nodes and {} edges written to file\"",
".",
"format",
"(",
"len",
"(",
"grph",
".",
"nodes",
"(",
")",
")",
",",
"len",
"(",
"grph",
".",
"edges",
"(",
")",
")",
")",
")"
]
| Writes both the edge list and the node attribute list of _grph_ to files starting with _name_.
The output files start with _name_, the file type (edgeList, nodeAttributes) then if typing is True the type of graph (directed or undirected) then the suffix, the default is as follows:
>> name_fileType.suffix
Both files are csv's with comma delimiters and double quote quoting characters. The edge list has two columns for the source and destination of the edge, `'From'` and `'To'` respectively, then, if _edgeInfo_ is `True`, for each attribute of the node another column is created. The node list has one column call "ID" with the node ids used by networkx and all other columns are the node attributes.
To read back these files use [readGraph()](#metaknowledge.graphHelpers.readGraph) and to write only one type of lsit use [writeEdgeList()](#metaknowledge.graphHelpers.writeEdgeList) or [writeNodeAttributeFile()](#metaknowledge.graphHelpers.writeNodeAttributeFile).
**Warning**: this function will overwrite files, if they are in the way of the output, to prevent this set _overwrite_ to `False`
**Note**: If any nodes or edges are missing an attribute a `KeyError` will be raised.
# Parameters
_grph_ : `networkx Graph`
> A networkx graph of the network to be written.
_name_ : `str`
> The start of the file name to be written, can include a path.
_edgeInfo_ : `optional [bool]`
> Default `True`, if `True` the the attributes of each edge are written to the edge list.
_typing_ : `optional [bool]`
> Default `False`, if `True` the directed ness of the graph will be added to the file names.
_suffix_ : `optional [str]`
> Default `"csv"`, the suffix of the file.
_overwrite_ : `optional [bool]`
> Default `True`, if `True` files will be overwritten silently, otherwise an `OSError` exception will be raised. | [
"Writes",
"both",
"the",
"edge",
"list",
"and",
"the",
"node",
"attribute",
"list",
"of",
"_grph_",
"to",
"files",
"starting",
"with",
"_name_",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/graphHelpers.py#L96-L170 | train |
networks-lab/metaknowledge | metaknowledge/graphHelpers.py | getNodeDegrees | def getNodeDegrees(grph, weightString = "weight", strictMode = False, returnType = int, edgeType = 'bi'):
"""
Retunrs a dictionary of nodes to their degrees, the degree is determined by adding the weight of edge with the weight being the string weightString that gives the name of the attribute of each edge containng thier weight. The Weights are then converted to the type returnType. If weightString is give as False instead each edge is counted as 1.
edgeType, takes in one of three strings: 'bi', 'in', 'out'. 'bi' means both nodes on the edge count it, 'out' mans only the one the edge comes form counts it and 'in' means only the node the edge goes to counts it. 'bi' is the default. Use only on directional graphs as otherwise the selected nodes is random.
"""
ndsDict = {}
for nd in grph.nodes():
ndsDict[nd] = returnType(0)
for e in grph.edges(data = True):
if weightString:
try:
edgVal = returnType(e[2][weightString])
except KeyError:
if strictMode:
raise KeyError("The edge from " + str(e[0]) + " to " + str(e[1]) + " does not have the attribute: '" + str(weightString) + "'")
else:
edgVal = returnType(1)
else:
edgVal = returnType(1)
if edgeType == 'bi':
ndsDict[e[0]] += edgVal
ndsDict[e[1]] += edgVal
elif edgeType == 'in':
ndsDict[e[1]] += edgVal
elif edgeType == 'out':
ndsDict[e[0]] += edgVal
else:
raise ValueError("edgeType must be 'bi', 'in', or 'out'")
return ndsDict | python | def getNodeDegrees(grph, weightString = "weight", strictMode = False, returnType = int, edgeType = 'bi'):
"""
Retunrs a dictionary of nodes to their degrees, the degree is determined by adding the weight of edge with the weight being the string weightString that gives the name of the attribute of each edge containng thier weight. The Weights are then converted to the type returnType. If weightString is give as False instead each edge is counted as 1.
edgeType, takes in one of three strings: 'bi', 'in', 'out'. 'bi' means both nodes on the edge count it, 'out' mans only the one the edge comes form counts it and 'in' means only the node the edge goes to counts it. 'bi' is the default. Use only on directional graphs as otherwise the selected nodes is random.
"""
ndsDict = {}
for nd in grph.nodes():
ndsDict[nd] = returnType(0)
for e in grph.edges(data = True):
if weightString:
try:
edgVal = returnType(e[2][weightString])
except KeyError:
if strictMode:
raise KeyError("The edge from " + str(e[0]) + " to " + str(e[1]) + " does not have the attribute: '" + str(weightString) + "'")
else:
edgVal = returnType(1)
else:
edgVal = returnType(1)
if edgeType == 'bi':
ndsDict[e[0]] += edgVal
ndsDict[e[1]] += edgVal
elif edgeType == 'in':
ndsDict[e[1]] += edgVal
elif edgeType == 'out':
ndsDict[e[0]] += edgVal
else:
raise ValueError("edgeType must be 'bi', 'in', or 'out'")
return ndsDict | [
"def",
"getNodeDegrees",
"(",
"grph",
",",
"weightString",
"=",
"\"weight\"",
",",
"strictMode",
"=",
"False",
",",
"returnType",
"=",
"int",
",",
"edgeType",
"=",
"'bi'",
")",
":",
"ndsDict",
"=",
"{",
"}",
"for",
"nd",
"in",
"grph",
".",
"nodes",
"(",
")",
":",
"ndsDict",
"[",
"nd",
"]",
"=",
"returnType",
"(",
"0",
")",
"for",
"e",
"in",
"grph",
".",
"edges",
"(",
"data",
"=",
"True",
")",
":",
"if",
"weightString",
":",
"try",
":",
"edgVal",
"=",
"returnType",
"(",
"e",
"[",
"2",
"]",
"[",
"weightString",
"]",
")",
"except",
"KeyError",
":",
"if",
"strictMode",
":",
"raise",
"KeyError",
"(",
"\"The edge from \"",
"+",
"str",
"(",
"e",
"[",
"0",
"]",
")",
"+",
"\" to \"",
"+",
"str",
"(",
"e",
"[",
"1",
"]",
")",
"+",
"\" does not have the attribute: '\"",
"+",
"str",
"(",
"weightString",
")",
"+",
"\"'\"",
")",
"else",
":",
"edgVal",
"=",
"returnType",
"(",
"1",
")",
"else",
":",
"edgVal",
"=",
"returnType",
"(",
"1",
")",
"if",
"edgeType",
"==",
"'bi'",
":",
"ndsDict",
"[",
"e",
"[",
"0",
"]",
"]",
"+=",
"edgVal",
"ndsDict",
"[",
"e",
"[",
"1",
"]",
"]",
"+=",
"edgVal",
"elif",
"edgeType",
"==",
"'in'",
":",
"ndsDict",
"[",
"e",
"[",
"1",
"]",
"]",
"+=",
"edgVal",
"elif",
"edgeType",
"==",
"'out'",
":",
"ndsDict",
"[",
"e",
"[",
"0",
"]",
"]",
"+=",
"edgVal",
"else",
":",
"raise",
"ValueError",
"(",
"\"edgeType must be 'bi', 'in', or 'out'\"",
")",
"return",
"ndsDict"
]
| Retunrs a dictionary of nodes to their degrees, the degree is determined by adding the weight of edge with the weight being the string weightString that gives the name of the attribute of each edge containng thier weight. The Weights are then converted to the type returnType. If weightString is give as False instead each edge is counted as 1.
edgeType, takes in one of three strings: 'bi', 'in', 'out'. 'bi' means both nodes on the edge count it, 'out' mans only the one the edge comes form counts it and 'in' means only the node the edge goes to counts it. 'bi' is the default. Use only on directional graphs as otherwise the selected nodes is random. | [
"Retunrs",
"a",
"dictionary",
"of",
"nodes",
"to",
"their",
"degrees",
"the",
"degree",
"is",
"determined",
"by",
"adding",
"the",
"weight",
"of",
"edge",
"with",
"the",
"weight",
"being",
"the",
"string",
"weightString",
"that",
"gives",
"the",
"name",
"of",
"the",
"attribute",
"of",
"each",
"edge",
"containng",
"thier",
"weight",
".",
"The",
"Weights",
"are",
"then",
"converted",
"to",
"the",
"type",
"returnType",
".",
"If",
"weightString",
"is",
"give",
"as",
"False",
"instead",
"each",
"edge",
"is",
"counted",
"as",
"1",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/graphHelpers.py#L457-L486 | train |
networks-lab/metaknowledge | metaknowledge/graphHelpers.py | mergeGraphs | def mergeGraphs(targetGraph, addedGraph, incrementedNodeVal = 'count', incrementedEdgeVal = 'weight'):
"""A quick way of merging graphs, this is meant to be quick and is only intended for graphs generated by metaknowledge. This does not check anything and as such may cause unexpected results if the source and target were not generated by the same method.
**mergeGraphs**() will **modify** _targetGraph_ in place by adding the nodes and edges found in the second, _addedGraph_. If a node or edge exists _targetGraph_ is given precedence, but the edge and node attributes given by _incrementedNodeVal_ and incrementedEdgeVal are added instead of being overwritten.
# Parameters
_targetGraph_ : `networkx Graph`
> the graph to be modified, it has precedence.
_addedGraph_ : `networkx Graph`
> the graph that is unmodified, it is added and does **not** have precedence.
_incrementedNodeVal_ : `optional [str]`
> default `'count'`, the name of the count attribute for the graph's nodes. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value.
_incrementedEdgeVal_ : `optional [str]`
> default `'weight'`, the name of the weight attribute for the graph's edges. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value.
"""
for addedNode, attribs in addedGraph.nodes(data = True):
if incrementedNodeVal:
try:
targetGraph.node[addedNode][incrementedNodeVal] += attribs[incrementedNodeVal]
except KeyError:
targetGraph.add_node(addedNode, **attribs)
else:
if not targetGraph.has_node(addedNode):
targetGraph.add_node(addedNode, **attribs)
for edgeNode1, edgeNode2, attribs in addedGraph.edges(data = True):
if incrementedEdgeVal:
try:
targetGraph.edges[edgeNode1, edgeNode2][incrementedEdgeVal] += attribs[incrementedEdgeVal]
except KeyError:
targetGraph.add_edge(edgeNode1, edgeNode2, **attribs)
else:
if not targetGraph.Graph.has_edge(edgeNode1, edgeNode2):
targetGraph.add_edge(edgeNode1, edgeNode2, **attribs) | python | def mergeGraphs(targetGraph, addedGraph, incrementedNodeVal = 'count', incrementedEdgeVal = 'weight'):
"""A quick way of merging graphs, this is meant to be quick and is only intended for graphs generated by metaknowledge. This does not check anything and as such may cause unexpected results if the source and target were not generated by the same method.
**mergeGraphs**() will **modify** _targetGraph_ in place by adding the nodes and edges found in the second, _addedGraph_. If a node or edge exists _targetGraph_ is given precedence, but the edge and node attributes given by _incrementedNodeVal_ and incrementedEdgeVal are added instead of being overwritten.
# Parameters
_targetGraph_ : `networkx Graph`
> the graph to be modified, it has precedence.
_addedGraph_ : `networkx Graph`
> the graph that is unmodified, it is added and does **not** have precedence.
_incrementedNodeVal_ : `optional [str]`
> default `'count'`, the name of the count attribute for the graph's nodes. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value.
_incrementedEdgeVal_ : `optional [str]`
> default `'weight'`, the name of the weight attribute for the graph's edges. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value.
"""
for addedNode, attribs in addedGraph.nodes(data = True):
if incrementedNodeVal:
try:
targetGraph.node[addedNode][incrementedNodeVal] += attribs[incrementedNodeVal]
except KeyError:
targetGraph.add_node(addedNode, **attribs)
else:
if not targetGraph.has_node(addedNode):
targetGraph.add_node(addedNode, **attribs)
for edgeNode1, edgeNode2, attribs in addedGraph.edges(data = True):
if incrementedEdgeVal:
try:
targetGraph.edges[edgeNode1, edgeNode2][incrementedEdgeVal] += attribs[incrementedEdgeVal]
except KeyError:
targetGraph.add_edge(edgeNode1, edgeNode2, **attribs)
else:
if not targetGraph.Graph.has_edge(edgeNode1, edgeNode2):
targetGraph.add_edge(edgeNode1, edgeNode2, **attribs) | [
"def",
"mergeGraphs",
"(",
"targetGraph",
",",
"addedGraph",
",",
"incrementedNodeVal",
"=",
"'count'",
",",
"incrementedEdgeVal",
"=",
"'weight'",
")",
":",
"for",
"addedNode",
",",
"attribs",
"in",
"addedGraph",
".",
"nodes",
"(",
"data",
"=",
"True",
")",
":",
"if",
"incrementedNodeVal",
":",
"try",
":",
"targetGraph",
".",
"node",
"[",
"addedNode",
"]",
"[",
"incrementedNodeVal",
"]",
"+=",
"attribs",
"[",
"incrementedNodeVal",
"]",
"except",
"KeyError",
":",
"targetGraph",
".",
"add_node",
"(",
"addedNode",
",",
"*",
"*",
"attribs",
")",
"else",
":",
"if",
"not",
"targetGraph",
".",
"has_node",
"(",
"addedNode",
")",
":",
"targetGraph",
".",
"add_node",
"(",
"addedNode",
",",
"*",
"*",
"attribs",
")",
"for",
"edgeNode1",
",",
"edgeNode2",
",",
"attribs",
"in",
"addedGraph",
".",
"edges",
"(",
"data",
"=",
"True",
")",
":",
"if",
"incrementedEdgeVal",
":",
"try",
":",
"targetGraph",
".",
"edges",
"[",
"edgeNode1",
",",
"edgeNode2",
"]",
"[",
"incrementedEdgeVal",
"]",
"+=",
"attribs",
"[",
"incrementedEdgeVal",
"]",
"except",
"KeyError",
":",
"targetGraph",
".",
"add_edge",
"(",
"edgeNode1",
",",
"edgeNode2",
",",
"*",
"*",
"attribs",
")",
"else",
":",
"if",
"not",
"targetGraph",
".",
"Graph",
".",
"has_edge",
"(",
"edgeNode1",
",",
"edgeNode2",
")",
":",
"targetGraph",
".",
"add_edge",
"(",
"edgeNode1",
",",
"edgeNode2",
",",
"*",
"*",
"attribs",
")"
]
| A quick way of merging graphs, this is meant to be quick and is only intended for graphs generated by metaknowledge. This does not check anything and as such may cause unexpected results if the source and target were not generated by the same method.
**mergeGraphs**() will **modify** _targetGraph_ in place by adding the nodes and edges found in the second, _addedGraph_. If a node or edge exists _targetGraph_ is given precedence, but the edge and node attributes given by _incrementedNodeVal_ and incrementedEdgeVal are added instead of being overwritten.
# Parameters
_targetGraph_ : `networkx Graph`
> the graph to be modified, it has precedence.
_addedGraph_ : `networkx Graph`
> the graph that is unmodified, it is added and does **not** have precedence.
_incrementedNodeVal_ : `optional [str]`
> default `'count'`, the name of the count attribute for the graph's nodes. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value.
_incrementedEdgeVal_ : `optional [str]`
> default `'weight'`, the name of the weight attribute for the graph's edges. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value. | [
"A",
"quick",
"way",
"of",
"merging",
"graphs",
"this",
"is",
"meant",
"to",
"be",
"quick",
"and",
"is",
"only",
"intended",
"for",
"graphs",
"generated",
"by",
"metaknowledge",
".",
"This",
"does",
"not",
"check",
"anything",
"and",
"as",
"such",
"may",
"cause",
"unexpected",
"results",
"if",
"the",
"source",
"and",
"target",
"were",
"not",
"generated",
"by",
"the",
"same",
"method",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/graphHelpers.py#L691-L732 | train |
networks-lab/metaknowledge | metaknowledge/medline/tagProcessing/tagFunctions.py | AD | def AD(val):
"""Affiliation
Undoing what the parser does then splitting at the semicolons and dropping newlines extra fitlering is required beacuse some AD's end with a semicolon"""
retDict = {}
for v in val:
split = v.split(' : ')
retDict[split[0]] = [s for s in' : '.join(split[1:]).replace('\n', '').split(';') if s != '']
return retDict | python | def AD(val):
"""Affiliation
Undoing what the parser does then splitting at the semicolons and dropping newlines extra fitlering is required beacuse some AD's end with a semicolon"""
retDict = {}
for v in val:
split = v.split(' : ')
retDict[split[0]] = [s for s in' : '.join(split[1:]).replace('\n', '').split(';') if s != '']
return retDict | [
"def",
"AD",
"(",
"val",
")",
":",
"retDict",
"=",
"{",
"}",
"for",
"v",
"in",
"val",
":",
"split",
"=",
"v",
".",
"split",
"(",
"' : '",
")",
"retDict",
"[",
"split",
"[",
"0",
"]",
"]",
"=",
"[",
"s",
"for",
"s",
"in",
"' : '",
".",
"join",
"(",
"split",
"[",
"1",
":",
"]",
")",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
".",
"split",
"(",
"';'",
")",
"if",
"s",
"!=",
"''",
"]",
"return",
"retDict"
]
| Affiliation
Undoing what the parser does then splitting at the semicolons and dropping newlines extra fitlering is required beacuse some AD's end with a semicolon | [
"Affiliation",
"Undoing",
"what",
"the",
"parser",
"does",
"then",
"splitting",
"at",
"the",
"semicolons",
"and",
"dropping",
"newlines",
"extra",
"fitlering",
"is",
"required",
"beacuse",
"some",
"AD",
"s",
"end",
"with",
"a",
"semicolon"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/medline/tagProcessing/tagFunctions.py#L218-L225 | train |
networks-lab/metaknowledge | metaknowledge/medline/tagProcessing/tagFunctions.py | AUID | def AUID(val):
"""AuthorIdentifier
one line only just need to undo the parser's effects"""
retDict = {}
for v in val:
split = v.split(' : ')
retDict[split[0]] = ' : '.join(split[1:])
return retDict | python | def AUID(val):
"""AuthorIdentifier
one line only just need to undo the parser's effects"""
retDict = {}
for v in val:
split = v.split(' : ')
retDict[split[0]] = ' : '.join(split[1:])
return retDict | [
"def",
"AUID",
"(",
"val",
")",
":",
"retDict",
"=",
"{",
"}",
"for",
"v",
"in",
"val",
":",
"split",
"=",
"v",
".",
"split",
"(",
"' : '",
")",
"retDict",
"[",
"split",
"[",
"0",
"]",
"]",
"=",
"' : '",
".",
"join",
"(",
"split",
"[",
"1",
":",
"]",
")",
"return",
"retDict"
]
| AuthorIdentifier
one line only just need to undo the parser's effects | [
"AuthorIdentifier",
"one",
"line",
"only",
"just",
"need",
"to",
"undo",
"the",
"parser",
"s",
"effects"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/medline/tagProcessing/tagFunctions.py#L322-L329 | train |
networks-lab/metaknowledge | metaknowledge/constants.py | isInteractive | def isInteractive():
"""
A basic check of if the program is running in interactive mode
"""
if sys.stdout.isatty() and os.name != 'nt':
#Hopefully everything but ms supports '\r'
try:
import threading
except ImportError:
return False
else:
return True
else:
return False | python | def isInteractive():
"""
A basic check of if the program is running in interactive mode
"""
if sys.stdout.isatty() and os.name != 'nt':
#Hopefully everything but ms supports '\r'
try:
import threading
except ImportError:
return False
else:
return True
else:
return False | [
"def",
"isInteractive",
"(",
")",
":",
"if",
"sys",
".",
"stdout",
".",
"isatty",
"(",
")",
"and",
"os",
".",
"name",
"!=",
"'nt'",
":",
"#Hopefully everything but ms supports '\\r'",
"try",
":",
"import",
"threading",
"except",
"ImportError",
":",
"return",
"False",
"else",
":",
"return",
"True",
"else",
":",
"return",
"False"
]
| A basic check of if the program is running in interactive mode | [
"A",
"basic",
"check",
"of",
"if",
"the",
"program",
"is",
"running",
"in",
"interactive",
"mode"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/constants.py#L27-L40 | train |
networks-lab/metaknowledge | metaknowledge/grants/nsercGrant.py | NSERCGrant.getInstitutions | def getInstitutions(self, tags = None, seperator = ";", _getTag = False):
"""Returns a list with the names of the institution. The optional arguments are ignored
# Returns
`list [str]`
> A list with 1 entry the name of the institution
"""
if tags is None:
tags = []
elif isinstance(tags, str):
tags = [tags]
for k in self.keys():
if 'institution' in k.lower() and k not in tags:
tags.append(k)
return super().getInvestigators(tags = tags, seperator = seperator, _getTag = _getTag) | python | def getInstitutions(self, tags = None, seperator = ";", _getTag = False):
"""Returns a list with the names of the institution. The optional arguments are ignored
# Returns
`list [str]`
> A list with 1 entry the name of the institution
"""
if tags is None:
tags = []
elif isinstance(tags, str):
tags = [tags]
for k in self.keys():
if 'institution' in k.lower() and k not in tags:
tags.append(k)
return super().getInvestigators(tags = tags, seperator = seperator, _getTag = _getTag) | [
"def",
"getInstitutions",
"(",
"self",
",",
"tags",
"=",
"None",
",",
"seperator",
"=",
"\";\"",
",",
"_getTag",
"=",
"False",
")",
":",
"if",
"tags",
"is",
"None",
":",
"tags",
"=",
"[",
"]",
"elif",
"isinstance",
"(",
"tags",
",",
"str",
")",
":",
"tags",
"=",
"[",
"tags",
"]",
"for",
"k",
"in",
"self",
".",
"keys",
"(",
")",
":",
"if",
"'institution'",
"in",
"k",
".",
"lower",
"(",
")",
"and",
"k",
"not",
"in",
"tags",
":",
"tags",
".",
"append",
"(",
"k",
")",
"return",
"super",
"(",
")",
".",
"getInvestigators",
"(",
"tags",
"=",
"tags",
",",
"seperator",
"=",
"seperator",
",",
"_getTag",
"=",
"_getTag",
")"
]
| Returns a list with the names of the institution. The optional arguments are ignored
# Returns
`list [str]`
> A list with 1 entry the name of the institution | [
"Returns",
"a",
"list",
"with",
"the",
"names",
"of",
"the",
"institution",
".",
"The",
"optional",
"arguments",
"are",
"ignored"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/grants/nsercGrant.py#L46-L62 | train |
networks-lab/metaknowledge | metaknowledge/medline/recordMedline.py | MedlineRecord.writeRecord | def writeRecord(self, f):
"""This is nearly identical to the original the FAU tag is the only tag not writen in the same place, doing so would require changing the parser and lots of extra logic.
"""
if self.bad:
raise BadPubmedRecord("This record cannot be converted to a file as the input was malformed.\nThe original line number (if any) is: {} and the original file is: '{}'".format(self._sourceLine, self._sourceFile))
else:
authTags = {}
for tag in authorBasedTags:
for val in self._fieldDict.get(tag, []):
split = val.split(' : ')
try:
authTags[split[0]].append("{0}{1}- {2}\n".format(tag, ' ' * (4 - len(tag)),' : '.join(split[1:]).replace('\n', '\n ')))
except KeyError:
authTags[split[0]] = ["{0}{1}- {2}\n".format(tag, ' ' * (4 - len(tag)),' : '.join(split[1:]).replace('\n', '\n '))]
for tag, value in self._fieldDict.items():
if tag in authorBasedTags:
continue
else:
for v in value:
f.write("{0}{1}- {2}\n".format(tag, ' ' * (4 - len(tag)), v.replace('\n', '\n ')))
if tag == 'AU':
for authVal in authTags.get(v,[]):
f.write(authVal) | python | def writeRecord(self, f):
"""This is nearly identical to the original the FAU tag is the only tag not writen in the same place, doing so would require changing the parser and lots of extra logic.
"""
if self.bad:
raise BadPubmedRecord("This record cannot be converted to a file as the input was malformed.\nThe original line number (if any) is: {} and the original file is: '{}'".format(self._sourceLine, self._sourceFile))
else:
authTags = {}
for tag in authorBasedTags:
for val in self._fieldDict.get(tag, []):
split = val.split(' : ')
try:
authTags[split[0]].append("{0}{1}- {2}\n".format(tag, ' ' * (4 - len(tag)),' : '.join(split[1:]).replace('\n', '\n ')))
except KeyError:
authTags[split[0]] = ["{0}{1}- {2}\n".format(tag, ' ' * (4 - len(tag)),' : '.join(split[1:]).replace('\n', '\n '))]
for tag, value in self._fieldDict.items():
if tag in authorBasedTags:
continue
else:
for v in value:
f.write("{0}{1}- {2}\n".format(tag, ' ' * (4 - len(tag)), v.replace('\n', '\n ')))
if tag == 'AU':
for authVal in authTags.get(v,[]):
f.write(authVal) | [
"def",
"writeRecord",
"(",
"self",
",",
"f",
")",
":",
"if",
"self",
".",
"bad",
":",
"raise",
"BadPubmedRecord",
"(",
"\"This record cannot be converted to a file as the input was malformed.\\nThe original line number (if any) is: {} and the original file is: '{}'\"",
".",
"format",
"(",
"self",
".",
"_sourceLine",
",",
"self",
".",
"_sourceFile",
")",
")",
"else",
":",
"authTags",
"=",
"{",
"}",
"for",
"tag",
"in",
"authorBasedTags",
":",
"for",
"val",
"in",
"self",
".",
"_fieldDict",
".",
"get",
"(",
"tag",
",",
"[",
"]",
")",
":",
"split",
"=",
"val",
".",
"split",
"(",
"' : '",
")",
"try",
":",
"authTags",
"[",
"split",
"[",
"0",
"]",
"]",
".",
"append",
"(",
"\"{0}{1}- {2}\\n\"",
".",
"format",
"(",
"tag",
",",
"' '",
"*",
"(",
"4",
"-",
"len",
"(",
"tag",
")",
")",
",",
"' : '",
".",
"join",
"(",
"split",
"[",
"1",
":",
"]",
")",
".",
"replace",
"(",
"'\\n'",
",",
"'\\n '",
")",
")",
")",
"except",
"KeyError",
":",
"authTags",
"[",
"split",
"[",
"0",
"]",
"]",
"=",
"[",
"\"{0}{1}- {2}\\n\"",
".",
"format",
"(",
"tag",
",",
"' '",
"*",
"(",
"4",
"-",
"len",
"(",
"tag",
")",
")",
",",
"' : '",
".",
"join",
"(",
"split",
"[",
"1",
":",
"]",
")",
".",
"replace",
"(",
"'\\n'",
",",
"'\\n '",
")",
")",
"]",
"for",
"tag",
",",
"value",
"in",
"self",
".",
"_fieldDict",
".",
"items",
"(",
")",
":",
"if",
"tag",
"in",
"authorBasedTags",
":",
"continue",
"else",
":",
"for",
"v",
"in",
"value",
":",
"f",
".",
"write",
"(",
"\"{0}{1}- {2}\\n\"",
".",
"format",
"(",
"tag",
",",
"' '",
"*",
"(",
"4",
"-",
"len",
"(",
"tag",
")",
")",
",",
"v",
".",
"replace",
"(",
"'\\n'",
",",
"'\\n '",
")",
")",
")",
"if",
"tag",
"==",
"'AU'",
":",
"for",
"authVal",
"in",
"authTags",
".",
"get",
"(",
"v",
",",
"[",
"]",
")",
":",
"f",
".",
"write",
"(",
"authVal",
")"
]
| This is nearly identical to the original the FAU tag is the only tag not writen in the same place, doing so would require changing the parser and lots of extra logic. | [
"This",
"is",
"nearly",
"identical",
"to",
"the",
"original",
"the",
"FAU",
"tag",
"is",
"the",
"only",
"tag",
"not",
"writen",
"in",
"the",
"same",
"place",
"doing",
"so",
"would",
"require",
"changing",
"the",
"parser",
"and",
"lots",
"of",
"extra",
"logic",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/medline/recordMedline.py#L66-L88 | train |
networks-lab/metaknowledge | metaknowledge/contour/plotting.py | quickVisual | def quickVisual(G, showLabel = False):
"""Just makes a simple _matplotlib_ figure and displays it, with each node coloured by its type. You can add labels with _showLabel_. This looks a bit nicer than the one provided my _networkx_'s defaults.
# Parameters
_showLabel_ : `optional [bool]`
> Default `False`, if `True` labels will be added to the nodes giving their IDs.
"""
colours = "brcmykwg"
f = plt.figure(1)
ax = f.add_subplot(1,1,1)
ndTypes = []
ndColours = []
layout = nx.spring_layout(G, k = 4 / math.sqrt(len(G.nodes())))
for nd in G.nodes(data = True):
if 'type' in nd[1]:
if nd[1]['type'] not in ndTypes:
ndTypes.append(nd[1]['type'])
ndColours.append(colours[ndTypes.index(nd[1]['type']) % len(colours)])
elif len(ndColours) > 1:
raise RuntimeError("Some nodes do not have a type")
if len(ndColours) < 1:
nx.draw_networkx_nodes(G, pos = layout, node_color = colours[0], node_shape = '8', node_size = 100, ax = ax)
else:
nx.draw_networkx_nodes(G, pos = layout, node_color = ndColours, node_shape = '8', node_size = 100, ax = ax)
nx.draw_networkx_edges(G, pos = layout, width = .7, ax = ax)
if showLabel:
nx.draw_networkx_labels(G, pos = layout, font_size = 8, ax = ax)
plt.axis('off')
f.set_facecolor('w') | python | def quickVisual(G, showLabel = False):
"""Just makes a simple _matplotlib_ figure and displays it, with each node coloured by its type. You can add labels with _showLabel_. This looks a bit nicer than the one provided my _networkx_'s defaults.
# Parameters
_showLabel_ : `optional [bool]`
> Default `False`, if `True` labels will be added to the nodes giving their IDs.
"""
colours = "brcmykwg"
f = plt.figure(1)
ax = f.add_subplot(1,1,1)
ndTypes = []
ndColours = []
layout = nx.spring_layout(G, k = 4 / math.sqrt(len(G.nodes())))
for nd in G.nodes(data = True):
if 'type' in nd[1]:
if nd[1]['type'] not in ndTypes:
ndTypes.append(nd[1]['type'])
ndColours.append(colours[ndTypes.index(nd[1]['type']) % len(colours)])
elif len(ndColours) > 1:
raise RuntimeError("Some nodes do not have a type")
if len(ndColours) < 1:
nx.draw_networkx_nodes(G, pos = layout, node_color = colours[0], node_shape = '8', node_size = 100, ax = ax)
else:
nx.draw_networkx_nodes(G, pos = layout, node_color = ndColours, node_shape = '8', node_size = 100, ax = ax)
nx.draw_networkx_edges(G, pos = layout, width = .7, ax = ax)
if showLabel:
nx.draw_networkx_labels(G, pos = layout, font_size = 8, ax = ax)
plt.axis('off')
f.set_facecolor('w') | [
"def",
"quickVisual",
"(",
"G",
",",
"showLabel",
"=",
"False",
")",
":",
"colours",
"=",
"\"brcmykwg\"",
"f",
"=",
"plt",
".",
"figure",
"(",
"1",
")",
"ax",
"=",
"f",
".",
"add_subplot",
"(",
"1",
",",
"1",
",",
"1",
")",
"ndTypes",
"=",
"[",
"]",
"ndColours",
"=",
"[",
"]",
"layout",
"=",
"nx",
".",
"spring_layout",
"(",
"G",
",",
"k",
"=",
"4",
"/",
"math",
".",
"sqrt",
"(",
"len",
"(",
"G",
".",
"nodes",
"(",
")",
")",
")",
")",
"for",
"nd",
"in",
"G",
".",
"nodes",
"(",
"data",
"=",
"True",
")",
":",
"if",
"'type'",
"in",
"nd",
"[",
"1",
"]",
":",
"if",
"nd",
"[",
"1",
"]",
"[",
"'type'",
"]",
"not",
"in",
"ndTypes",
":",
"ndTypes",
".",
"append",
"(",
"nd",
"[",
"1",
"]",
"[",
"'type'",
"]",
")",
"ndColours",
".",
"append",
"(",
"colours",
"[",
"ndTypes",
".",
"index",
"(",
"nd",
"[",
"1",
"]",
"[",
"'type'",
"]",
")",
"%",
"len",
"(",
"colours",
")",
"]",
")",
"elif",
"len",
"(",
"ndColours",
")",
">",
"1",
":",
"raise",
"RuntimeError",
"(",
"\"Some nodes do not have a type\"",
")",
"if",
"len",
"(",
"ndColours",
")",
"<",
"1",
":",
"nx",
".",
"draw_networkx_nodes",
"(",
"G",
",",
"pos",
"=",
"layout",
",",
"node_color",
"=",
"colours",
"[",
"0",
"]",
",",
"node_shape",
"=",
"'8'",
",",
"node_size",
"=",
"100",
",",
"ax",
"=",
"ax",
")",
"else",
":",
"nx",
".",
"draw_networkx_nodes",
"(",
"G",
",",
"pos",
"=",
"layout",
",",
"node_color",
"=",
"ndColours",
",",
"node_shape",
"=",
"'8'",
",",
"node_size",
"=",
"100",
",",
"ax",
"=",
"ax",
")",
"nx",
".",
"draw_networkx_edges",
"(",
"G",
",",
"pos",
"=",
"layout",
",",
"width",
"=",
".7",
",",
"ax",
"=",
"ax",
")",
"if",
"showLabel",
":",
"nx",
".",
"draw_networkx_labels",
"(",
"G",
",",
"pos",
"=",
"layout",
",",
"font_size",
"=",
"8",
",",
"ax",
"=",
"ax",
")",
"plt",
".",
"axis",
"(",
"'off'",
")",
"f",
".",
"set_facecolor",
"(",
"'w'",
")"
]
| Just makes a simple _matplotlib_ figure and displays it, with each node coloured by its type. You can add labels with _showLabel_. This looks a bit nicer than the one provided my _networkx_'s defaults.
# Parameters
_showLabel_ : `optional [bool]`
> Default `False`, if `True` labels will be added to the nodes giving their IDs. | [
"Just",
"makes",
"a",
"simple",
"_matplotlib_",
"figure",
"and",
"displays",
"it",
"with",
"each",
"node",
"coloured",
"by",
"its",
"type",
".",
"You",
"can",
"add",
"labels",
"with",
"_showLabel_",
".",
"This",
"looks",
"a",
"bit",
"nicer",
"than",
"the",
"one",
"provided",
"my",
"_networkx_",
"s",
"defaults",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/contour/plotting.py#L8-L38 | train |
networks-lab/metaknowledge | metaknowledge/contour/plotting.py | graphDensityContourPlot | def graphDensityContourPlot(G, iters = 50, layout = None, layoutScaleFactor = 1, overlay = False, nodeSize = 10, axisSamples = 100, blurringFactor = .1, contours = 15, graphType = 'coloured'):
"""Creates a 3D plot giving the density of nodes on a 2D plane, as a surface in 3D.
Most of the options are for tweaking the final appearance. _layout_ and _layoutScaleFactor_ allow a pre-layout graph to be provided. If a layout is not provided the [networkx.spring_layout()](https://networkx.github.io/documentation/latest/reference/generated/networkx.drawing.layout.spring_layout.html) is used after _iters_ iterations. Then, once the graph has been laid out a grid of _axisSamples_ cells by _axisSamples_ cells is overlaid and the number of nodes in each cell is determined, a gaussian blur is then applied with a sigma of _blurringFactor_. This then forms a surface in 3 dimensions, which is then plotted.
If you find the resultant image looks too banded raise the the _contours_ number to ~50.
# Parameters
_G_ : `networkx Graph`
> The graph to be plotted
_iters_ : `optional [int]`
> Default `50`, the number of iterations for the spring layout if _layout_ is not provided.
_layout_ : `optional [networkx layout dictionary]`
> Default `None`, if provided will be used as a layout of the graph, the maximum distance from the origin along any axis must also given as _layoutScaleFactor_, which is by default `1`.
_layoutScaleFactor_ : `optional [double]`
> Default `1`, The maximum distance from the origin allowed along any axis given by _layout_, i.e. the layout must fit in a square centered at the origin with side lengths 2 * _layoutScaleFactor_
_overlay_ : `optional [bool]`
> Default `False`, if `True` the 2D graph will be plotted on the X-Y plane at Z = 0.
_nodeSize_ : `optional [double]`
> Default `10`, the size of the nodes dawn in the overlay
_axisSamples_ : `optional [int]`
> Default 100, the number of cells used along each axis for sampling. A larger number will mean a lower average density.
_blurringFactor_ : `optional [double]`
> Default `0.1`, the sigma value used for smoothing the surface density. The higher this number the smoother the surface.
_contours_ : `optional [int]`
> Default 15, the number of different heights drawn. If this number is low the resultant image will look very banded. It is recommended this be raised above `50` if you want your images to look good, **Warning** this will make them much slower to generate and interact with.
_graphType_ : `optional [str]`
> Default `'coloured'`, if `'coloured'` the image will have a destiny based colourization applied, the only other option is `'solid'` which removes the colourization.
"""
from mpl_toolkits.mplot3d import Axes3D
if not isinstance(G, nx.classes.digraph.DiGraph) and not isinstance(G, nx.classes.graph.Graph):
raise TypeError("{} is not a valid input.".format(type(G)))
if layout is None:
layout = nx.spring_layout(G, scale = axisSamples - 1, iterations = iters)
grid = np.zeros( [axisSamples, axisSamples],dtype=np.float32)
for v in layout.values():
x, y = tuple(int(x) for x in v.round(0))
grid[y][x] += 1
elif isinstance(layout, dict):
layout = layout.copy()
grid = np.zeros([axisSamples, axisSamples],dtype=np.float32)
multFactor = (axisSamples - 1) / layoutScaleFactor
for k in layout.keys():
tmpPos = layout[k] * multFactor
layout[k] = tmpPos
x, y = tuple(int(x) for x in tmpPos.round(0))
grid[y][x] += 1
else:
raise TypeError("{} is not a valid input.".format(type(layout)))
fig = plt.figure()
#axis = fig.add_subplot(111)
axis = fig.gca(projection='3d')
if overlay:
nx.draw_networkx(G, pos = layout, ax = axis, node_size = nodeSize, with_labels = False, edgelist = [])
grid = ndi.gaussian_filter(grid, (blurringFactor * axisSamples, blurringFactor * axisSamples))
X = Y = np.arange(0, axisSamples, 1)
X, Y = np.meshgrid(X, Y)
if graphType == "solid":
CS = axis.plot_surface(X,Y, grid)
else:
CS = axis.contourf(X, Y, grid, contours)
axis.set_xlabel('X')
axis.set_ylabel('Y')
axis.set_zlabel('Node Density') | python | def graphDensityContourPlot(G, iters = 50, layout = None, layoutScaleFactor = 1, overlay = False, nodeSize = 10, axisSamples = 100, blurringFactor = .1, contours = 15, graphType = 'coloured'):
"""Creates a 3D plot giving the density of nodes on a 2D plane, as a surface in 3D.
Most of the options are for tweaking the final appearance. _layout_ and _layoutScaleFactor_ allow a pre-layout graph to be provided. If a layout is not provided the [networkx.spring_layout()](https://networkx.github.io/documentation/latest/reference/generated/networkx.drawing.layout.spring_layout.html) is used after _iters_ iterations. Then, once the graph has been laid out a grid of _axisSamples_ cells by _axisSamples_ cells is overlaid and the number of nodes in each cell is determined, a gaussian blur is then applied with a sigma of _blurringFactor_. This then forms a surface in 3 dimensions, which is then plotted.
If you find the resultant image looks too banded raise the the _contours_ number to ~50.
# Parameters
_G_ : `networkx Graph`
> The graph to be plotted
_iters_ : `optional [int]`
> Default `50`, the number of iterations for the spring layout if _layout_ is not provided.
_layout_ : `optional [networkx layout dictionary]`
> Default `None`, if provided will be used as a layout of the graph, the maximum distance from the origin along any axis must also given as _layoutScaleFactor_, which is by default `1`.
_layoutScaleFactor_ : `optional [double]`
> Default `1`, The maximum distance from the origin allowed along any axis given by _layout_, i.e. the layout must fit in a square centered at the origin with side lengths 2 * _layoutScaleFactor_
_overlay_ : `optional [bool]`
> Default `False`, if `True` the 2D graph will be plotted on the X-Y plane at Z = 0.
_nodeSize_ : `optional [double]`
> Default `10`, the size of the nodes dawn in the overlay
_axisSamples_ : `optional [int]`
> Default 100, the number of cells used along each axis for sampling. A larger number will mean a lower average density.
_blurringFactor_ : `optional [double]`
> Default `0.1`, the sigma value used for smoothing the surface density. The higher this number the smoother the surface.
_contours_ : `optional [int]`
> Default 15, the number of different heights drawn. If this number is low the resultant image will look very banded. It is recommended this be raised above `50` if you want your images to look good, **Warning** this will make them much slower to generate and interact with.
_graphType_ : `optional [str]`
> Default `'coloured'`, if `'coloured'` the image will have a destiny based colourization applied, the only other option is `'solid'` which removes the colourization.
"""
from mpl_toolkits.mplot3d import Axes3D
if not isinstance(G, nx.classes.digraph.DiGraph) and not isinstance(G, nx.classes.graph.Graph):
raise TypeError("{} is not a valid input.".format(type(G)))
if layout is None:
layout = nx.spring_layout(G, scale = axisSamples - 1, iterations = iters)
grid = np.zeros( [axisSamples, axisSamples],dtype=np.float32)
for v in layout.values():
x, y = tuple(int(x) for x in v.round(0))
grid[y][x] += 1
elif isinstance(layout, dict):
layout = layout.copy()
grid = np.zeros([axisSamples, axisSamples],dtype=np.float32)
multFactor = (axisSamples - 1) / layoutScaleFactor
for k in layout.keys():
tmpPos = layout[k] * multFactor
layout[k] = tmpPos
x, y = tuple(int(x) for x in tmpPos.round(0))
grid[y][x] += 1
else:
raise TypeError("{} is not a valid input.".format(type(layout)))
fig = plt.figure()
#axis = fig.add_subplot(111)
axis = fig.gca(projection='3d')
if overlay:
nx.draw_networkx(G, pos = layout, ax = axis, node_size = nodeSize, with_labels = False, edgelist = [])
grid = ndi.gaussian_filter(grid, (blurringFactor * axisSamples, blurringFactor * axisSamples))
X = Y = np.arange(0, axisSamples, 1)
X, Y = np.meshgrid(X, Y)
if graphType == "solid":
CS = axis.plot_surface(X,Y, grid)
else:
CS = axis.contourf(X, Y, grid, contours)
axis.set_xlabel('X')
axis.set_ylabel('Y')
axis.set_zlabel('Node Density') | [
"def",
"graphDensityContourPlot",
"(",
"G",
",",
"iters",
"=",
"50",
",",
"layout",
"=",
"None",
",",
"layoutScaleFactor",
"=",
"1",
",",
"overlay",
"=",
"False",
",",
"nodeSize",
"=",
"10",
",",
"axisSamples",
"=",
"100",
",",
"blurringFactor",
"=",
".1",
",",
"contours",
"=",
"15",
",",
"graphType",
"=",
"'coloured'",
")",
":",
"from",
"mpl_toolkits",
".",
"mplot3d",
"import",
"Axes3D",
"if",
"not",
"isinstance",
"(",
"G",
",",
"nx",
".",
"classes",
".",
"digraph",
".",
"DiGraph",
")",
"and",
"not",
"isinstance",
"(",
"G",
",",
"nx",
".",
"classes",
".",
"graph",
".",
"Graph",
")",
":",
"raise",
"TypeError",
"(",
"\"{} is not a valid input.\"",
".",
"format",
"(",
"type",
"(",
"G",
")",
")",
")",
"if",
"layout",
"is",
"None",
":",
"layout",
"=",
"nx",
".",
"spring_layout",
"(",
"G",
",",
"scale",
"=",
"axisSamples",
"-",
"1",
",",
"iterations",
"=",
"iters",
")",
"grid",
"=",
"np",
".",
"zeros",
"(",
"[",
"axisSamples",
",",
"axisSamples",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"for",
"v",
"in",
"layout",
".",
"values",
"(",
")",
":",
"x",
",",
"y",
"=",
"tuple",
"(",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"v",
".",
"round",
"(",
"0",
")",
")",
"grid",
"[",
"y",
"]",
"[",
"x",
"]",
"+=",
"1",
"elif",
"isinstance",
"(",
"layout",
",",
"dict",
")",
":",
"layout",
"=",
"layout",
".",
"copy",
"(",
")",
"grid",
"=",
"np",
".",
"zeros",
"(",
"[",
"axisSamples",
",",
"axisSamples",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"multFactor",
"=",
"(",
"axisSamples",
"-",
"1",
")",
"/",
"layoutScaleFactor",
"for",
"k",
"in",
"layout",
".",
"keys",
"(",
")",
":",
"tmpPos",
"=",
"layout",
"[",
"k",
"]",
"*",
"multFactor",
"layout",
"[",
"k",
"]",
"=",
"tmpPos",
"x",
",",
"y",
"=",
"tuple",
"(",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"tmpPos",
".",
"round",
"(",
"0",
")",
")",
"grid",
"[",
"y",
"]",
"[",
"x",
"]",
"+=",
"1",
"else",
":",
"raise",
"TypeError",
"(",
"\"{} is not a valid input.\"",
".",
"format",
"(",
"type",
"(",
"layout",
")",
")",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"#axis = fig.add_subplot(111)",
"axis",
"=",
"fig",
".",
"gca",
"(",
"projection",
"=",
"'3d'",
")",
"if",
"overlay",
":",
"nx",
".",
"draw_networkx",
"(",
"G",
",",
"pos",
"=",
"layout",
",",
"ax",
"=",
"axis",
",",
"node_size",
"=",
"nodeSize",
",",
"with_labels",
"=",
"False",
",",
"edgelist",
"=",
"[",
"]",
")",
"grid",
"=",
"ndi",
".",
"gaussian_filter",
"(",
"grid",
",",
"(",
"blurringFactor",
"*",
"axisSamples",
",",
"blurringFactor",
"*",
"axisSamples",
")",
")",
"X",
"=",
"Y",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"axisSamples",
",",
"1",
")",
"X",
",",
"Y",
"=",
"np",
".",
"meshgrid",
"(",
"X",
",",
"Y",
")",
"if",
"graphType",
"==",
"\"solid\"",
":",
"CS",
"=",
"axis",
".",
"plot_surface",
"(",
"X",
",",
"Y",
",",
"grid",
")",
"else",
":",
"CS",
"=",
"axis",
".",
"contourf",
"(",
"X",
",",
"Y",
",",
"grid",
",",
"contours",
")",
"axis",
".",
"set_xlabel",
"(",
"'X'",
")",
"axis",
".",
"set_ylabel",
"(",
"'Y'",
")",
"axis",
".",
"set_zlabel",
"(",
"'Node Density'",
")"
]
| Creates a 3D plot giving the density of nodes on a 2D plane, as a surface in 3D.
Most of the options are for tweaking the final appearance. _layout_ and _layoutScaleFactor_ allow a pre-layout graph to be provided. If a layout is not provided the [networkx.spring_layout()](https://networkx.github.io/documentation/latest/reference/generated/networkx.drawing.layout.spring_layout.html) is used after _iters_ iterations. Then, once the graph has been laid out a grid of _axisSamples_ cells by _axisSamples_ cells is overlaid and the number of nodes in each cell is determined, a gaussian blur is then applied with a sigma of _blurringFactor_. This then forms a surface in 3 dimensions, which is then plotted.
If you find the resultant image looks too banded raise the the _contours_ number to ~50.
# Parameters
_G_ : `networkx Graph`
> The graph to be plotted
_iters_ : `optional [int]`
> Default `50`, the number of iterations for the spring layout if _layout_ is not provided.
_layout_ : `optional [networkx layout dictionary]`
> Default `None`, if provided will be used as a layout of the graph, the maximum distance from the origin along any axis must also given as _layoutScaleFactor_, which is by default `1`.
_layoutScaleFactor_ : `optional [double]`
> Default `1`, The maximum distance from the origin allowed along any axis given by _layout_, i.e. the layout must fit in a square centered at the origin with side lengths 2 * _layoutScaleFactor_
_overlay_ : `optional [bool]`
> Default `False`, if `True` the 2D graph will be plotted on the X-Y plane at Z = 0.
_nodeSize_ : `optional [double]`
> Default `10`, the size of the nodes dawn in the overlay
_axisSamples_ : `optional [int]`
> Default 100, the number of cells used along each axis for sampling. A larger number will mean a lower average density.
_blurringFactor_ : `optional [double]`
> Default `0.1`, the sigma value used for smoothing the surface density. The higher this number the smoother the surface.
_contours_ : `optional [int]`
> Default 15, the number of different heights drawn. If this number is low the resultant image will look very banded. It is recommended this be raised above `50` if you want your images to look good, **Warning** this will make them much slower to generate and interact with.
_graphType_ : `optional [str]`
> Default `'coloured'`, if `'coloured'` the image will have a destiny based colourization applied, the only other option is `'solid'` which removes the colourization. | [
"Creates",
"a",
"3D",
"plot",
"giving",
"the",
"density",
"of",
"nodes",
"on",
"a",
"2D",
"plane",
"as",
"a",
"surface",
"in",
"3D",
"."
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/contour/plotting.py#L40-L125 | train |
networks-lab/metaknowledge | metaknowledge/WOS/tagProcessing/helpFuncs.py | makeBiDirectional | def makeBiDirectional(d):
"""
Helper for generating tagNameConverter
Makes dict that maps from key to value and back
"""
dTmp = d.copy()
for k in d:
dTmp[d[k]] = k
return dTmp | python | def makeBiDirectional(d):
"""
Helper for generating tagNameConverter
Makes dict that maps from key to value and back
"""
dTmp = d.copy()
for k in d:
dTmp[d[k]] = k
return dTmp | [
"def",
"makeBiDirectional",
"(",
"d",
")",
":",
"dTmp",
"=",
"d",
".",
"copy",
"(",
")",
"for",
"k",
"in",
"d",
":",
"dTmp",
"[",
"d",
"[",
"k",
"]",
"]",
"=",
"k",
"return",
"dTmp"
]
| Helper for generating tagNameConverter
Makes dict that maps from key to value and back | [
"Helper",
"for",
"generating",
"tagNameConverter",
"Makes",
"dict",
"that",
"maps",
"from",
"key",
"to",
"value",
"and",
"back"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/WOS/tagProcessing/helpFuncs.py#L27-L35 | train |
networks-lab/metaknowledge | metaknowledge/WOS/tagProcessing/helpFuncs.py | reverseDict | def reverseDict(d):
"""
Helper for generating fullToTag
Makes dict of value to key
"""
retD = {}
for k in d:
retD[d[k]] = k
return retD | python | def reverseDict(d):
"""
Helper for generating fullToTag
Makes dict of value to key
"""
retD = {}
for k in d:
retD[d[k]] = k
return retD | [
"def",
"reverseDict",
"(",
"d",
")",
":",
"retD",
"=",
"{",
"}",
"for",
"k",
"in",
"d",
":",
"retD",
"[",
"d",
"[",
"k",
"]",
"]",
"=",
"k",
"return",
"retD"
]
| Helper for generating fullToTag
Makes dict of value to key | [
"Helper",
"for",
"generating",
"fullToTag",
"Makes",
"dict",
"of",
"value",
"to",
"key"
]
| 8162bf95e66bb6f9916081338e6e2a6132faff75 | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/WOS/tagProcessing/helpFuncs.py#L37-L45 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.