repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
openvax/varlens | varlens/read_evidence/pileup.py | Pileup.filter | def filter(self, filters):
'''
Apply filters to the pileup elements, and return a new Pileup with the
filtered elements removed.
Parameters
----------
filters : list of PileupElement -> bool callables
A PileupUp element is retained if all filters return True when
called on it.
'''
new_elements = [
e for e in self.elements
if all(function(e) for function in filters)]
return Pileup(self.locus, new_elements) | python | def filter(self, filters):
'''
Apply filters to the pileup elements, and return a new Pileup with the
filtered elements removed.
Parameters
----------
filters : list of PileupElement -> bool callables
A PileupUp element is retained if all filters return True when
called on it.
'''
new_elements = [
e for e in self.elements
if all(function(e) for function in filters)]
return Pileup(self.locus, new_elements) | [
"def",
"filter",
"(",
"self",
",",
"filters",
")",
":",
"new_elements",
"=",
"[",
"e",
"for",
"e",
"in",
"self",
".",
"elements",
"if",
"all",
"(",
"function",
"(",
"e",
")",
"for",
"function",
"in",
"filters",
")",
"]",
"return",
"Pileup",
"(",
"self",
".",
"locus",
",",
"new_elements",
")"
] | Apply filters to the pileup elements, and return a new Pileup with the
filtered elements removed.
Parameters
----------
filters : list of PileupElement -> bool callables
A PileupUp element is retained if all filters return True when
called on it. | [
"Apply",
"filters",
"to",
"the",
"pileup",
"elements",
"and",
"return",
"a",
"new",
"Pileup",
"with",
"the",
"filtered",
"elements",
"removed",
"."
] | 715d3ede5893757b2fcba4117515621bca7b1e5d | https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/read_evidence/pileup.py#L72-L86 | train |
BernardFW/bernard | src/bernard/analytics/base.py | new_task | def new_task(func):
"""
Runs the decorated function in a new task
"""
@wraps(func)
async def wrapper(self, *args, **kwargs):
loop = get_event_loop()
loop.create_task(func(self, *args, **kwargs))
return wrapper | python | def new_task(func):
"""
Runs the decorated function in a new task
"""
@wraps(func)
async def wrapper(self, *args, **kwargs):
loop = get_event_loop()
loop.create_task(func(self, *args, **kwargs))
return wrapper | [
"def",
"new_task",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"async",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"loop",
"=",
"get_event_loop",
"(",
")",
"loop",
".",
"create_task",
"(",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
"wrapper"
] | Runs the decorated function in a new task | [
"Runs",
"the",
"decorated",
"function",
"in",
"a",
"new",
"task"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/analytics/base.py#L65-L74 | train |
BernardFW/bernard | src/bernard/analytics/base.py | providers | async def providers():
"""
Iterates over all instances of analytics provider found in configuration
"""
for provider in settings.ANALYTICS_PROVIDERS:
cls: BaseAnalytics = import_class(provider['class'])
yield await cls.instance(*provider['args']) | python | async def providers():
"""
Iterates over all instances of analytics provider found in configuration
"""
for provider in settings.ANALYTICS_PROVIDERS:
cls: BaseAnalytics = import_class(provider['class'])
yield await cls.instance(*provider['args']) | [
"async",
"def",
"providers",
"(",
")",
":",
"for",
"provider",
"in",
"settings",
".",
"ANALYTICS_PROVIDERS",
":",
"cls",
":",
"BaseAnalytics",
"=",
"import_class",
"(",
"provider",
"[",
"'class'",
"]",
")",
"yield",
"await",
"cls",
".",
"instance",
"(",
"*",
"provider",
"[",
"'args'",
"]",
")"
] | Iterates over all instances of analytics provider found in configuration | [
"Iterates",
"over",
"all",
"instances",
"of",
"analytics",
"provider",
"found",
"in",
"configuration"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/analytics/base.py#L77-L84 | train |
BernardFW/bernard | src/bernard/analytics/base.py | BaseAnalytics.page_view | async def page_view(self,
url: str,
title: str,
user_id: str,
user_lang: str='') -> None:
"""
Track the view of a page
"""
raise NotImplementedError | python | async def page_view(self,
url: str,
title: str,
user_id: str,
user_lang: str='') -> None:
"""
Track the view of a page
"""
raise NotImplementedError | [
"async",
"def",
"page_view",
"(",
"self",
",",
"url",
":",
"str",
",",
"title",
":",
"str",
",",
"user_id",
":",
"str",
",",
"user_lang",
":",
"str",
"=",
"''",
")",
"->",
"None",
":",
"raise",
"NotImplementedError"
] | Track the view of a page | [
"Track",
"the",
"view",
"of",
"a",
"page"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/analytics/base.py#L37-L46 | train |
BernardFW/bernard | src/bernard/analytics/base.py | BaseAnalytics.hash_user_id | def hash_user_id(self, user_id: str) -> str:
"""
As per the law, anonymize user identifier before sending it.
"""
h = sha256()
h.update(user_id.encode())
return h.hexdigest() | python | def hash_user_id(self, user_id: str) -> str:
"""
As per the law, anonymize user identifier before sending it.
"""
h = sha256()
h.update(user_id.encode())
return h.hexdigest() | [
"def",
"hash_user_id",
"(",
"self",
",",
"user_id",
":",
"str",
")",
"->",
"str",
":",
"h",
"=",
"sha256",
"(",
")",
"h",
".",
"update",
"(",
"user_id",
".",
"encode",
"(",
")",
")",
"return",
"h",
".",
"hexdigest",
"(",
")"
] | As per the law, anonymize user identifier before sending it. | [
"As",
"per",
"the",
"law",
"anonymize",
"user",
"identifier",
"before",
"sending",
"it",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/analytics/base.py#L48-L55 | train |
inveniosoftware-contrib/invenio-workflows | invenio_workflows/models.py | Workflow.delete | def delete(cls, uuid):
"""Delete a workflow."""
to_delete = Workflow.query.get(uuid)
db.session.delete(to_delete) | python | def delete(cls, uuid):
"""Delete a workflow."""
to_delete = Workflow.query.get(uuid)
db.session.delete(to_delete) | [
"def",
"delete",
"(",
"cls",
",",
"uuid",
")",
":",
"to_delete",
"=",
"Workflow",
".",
"query",
".",
"get",
"(",
"uuid",
")",
"db",
".",
"session",
".",
"delete",
"(",
"to_delete",
")"
] | Delete a workflow. | [
"Delete",
"a",
"workflow",
"."
] | 9c09fd29509a3db975ac2aba337e6760d8cfd3c2 | https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/models.py#L96-L99 | train |
inveniosoftware-contrib/invenio-workflows | invenio_workflows/worker_engine.py | run_worker | def run_worker(wname, data, engine_uuid_hex=None, **kwargs):
"""Run a workflow by name with list of data objects.
The list of data can also contain WorkflowObjects.
``**kwargs`` can be used to pass custom arguments to the engine/object.
:param wname: name of workflow to run.
:type wname: str
:param data: objects to run through the workflow.
:type data: list
:param engine_uuid_hex: hex string of the uuid of the engine to use, if
not passed will create a new one.
:type data: str
:return: WorkflowEngine instance
"""
if 'stop_on_halt' not in kwargs:
kwargs['stop_on_halt'] = False
if engine_uuid_hex:
engine_uuid = uuid.UUID(hex=engine_uuid_hex)
engine = WorkflowEngine.from_uuid(uuid=engine_uuid, **kwargs)
else:
engine = WorkflowEngine.with_name(wname, **kwargs)
engine.save()
objects = get_workflow_object_instances(data, engine)
db.session.commit()
engine.process(objects, **kwargs)
return engine | python | def run_worker(wname, data, engine_uuid_hex=None, **kwargs):
"""Run a workflow by name with list of data objects.
The list of data can also contain WorkflowObjects.
``**kwargs`` can be used to pass custom arguments to the engine/object.
:param wname: name of workflow to run.
:type wname: str
:param data: objects to run through the workflow.
:type data: list
:param engine_uuid_hex: hex string of the uuid of the engine to use, if
not passed will create a new one.
:type data: str
:return: WorkflowEngine instance
"""
if 'stop_on_halt' not in kwargs:
kwargs['stop_on_halt'] = False
if engine_uuid_hex:
engine_uuid = uuid.UUID(hex=engine_uuid_hex)
engine = WorkflowEngine.from_uuid(uuid=engine_uuid, **kwargs)
else:
engine = WorkflowEngine.with_name(wname, **kwargs)
engine.save()
objects = get_workflow_object_instances(data, engine)
db.session.commit()
engine.process(objects, **kwargs)
return engine | [
"def",
"run_worker",
"(",
"wname",
",",
"data",
",",
"engine_uuid_hex",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'stop_on_halt'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'stop_on_halt'",
"]",
"=",
"False",
"if",
"engine_uuid_hex",
":",
"engine_uuid",
"=",
"uuid",
".",
"UUID",
"(",
"hex",
"=",
"engine_uuid_hex",
")",
"engine",
"=",
"WorkflowEngine",
".",
"from_uuid",
"(",
"uuid",
"=",
"engine_uuid",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"engine",
"=",
"WorkflowEngine",
".",
"with_name",
"(",
"wname",
",",
"*",
"*",
"kwargs",
")",
"engine",
".",
"save",
"(",
")",
"objects",
"=",
"get_workflow_object_instances",
"(",
"data",
",",
"engine",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"engine",
".",
"process",
"(",
"objects",
",",
"*",
"*",
"kwargs",
")",
"return",
"engine"
] | Run a workflow by name with list of data objects.
The list of data can also contain WorkflowObjects.
``**kwargs`` can be used to pass custom arguments to the engine/object.
:param wname: name of workflow to run.
:type wname: str
:param data: objects to run through the workflow.
:type data: list
:param engine_uuid_hex: hex string of the uuid of the engine to use, if
not passed will create a new one.
:type data: str
:return: WorkflowEngine instance | [
"Run",
"a",
"workflow",
"by",
"name",
"with",
"list",
"of",
"data",
"objects",
"."
] | 9c09fd29509a3db975ac2aba337e6760d8cfd3c2 | https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/worker_engine.py#L30-L62 | train |
inveniosoftware-contrib/invenio-workflows | invenio_workflows/worker_engine.py | restart_worker | def restart_worker(uuid, **kwargs):
"""Restart workflow from beginning with given engine UUID and any data.
``**kwargs`` can be used to pass custom arguments to the engine/object such
as ``data``. If ``data`` is not specified then it will load all
initial data for the data objects.
Data can be specified as list of objects or single id of
WorkflowObjects.
:param uuid: workflow id (uuid) of the ``WorkflowEngine`` to be restarted
:type uuid: str
:return: ``WorkflowEngine`` instance
"""
if 'stop_on_halt' not in kwargs:
kwargs['stop_on_halt'] = False
engine = WorkflowEngine.from_uuid(uuid=uuid, **kwargs)
if "data" not in kwargs:
objects = workflow_object_class.query(id_workflow=uuid)
else:
data = kwargs.pop("data")
if not isinstance(data, (list, tuple)):
data = [data]
objects = get_workflow_object_instances(data, engine)
db.session.commit()
engine.process(objects, **kwargs)
return engine | python | def restart_worker(uuid, **kwargs):
"""Restart workflow from beginning with given engine UUID and any data.
``**kwargs`` can be used to pass custom arguments to the engine/object such
as ``data``. If ``data`` is not specified then it will load all
initial data for the data objects.
Data can be specified as list of objects or single id of
WorkflowObjects.
:param uuid: workflow id (uuid) of the ``WorkflowEngine`` to be restarted
:type uuid: str
:return: ``WorkflowEngine`` instance
"""
if 'stop_on_halt' not in kwargs:
kwargs['stop_on_halt'] = False
engine = WorkflowEngine.from_uuid(uuid=uuid, **kwargs)
if "data" not in kwargs:
objects = workflow_object_class.query(id_workflow=uuid)
else:
data = kwargs.pop("data")
if not isinstance(data, (list, tuple)):
data = [data]
objects = get_workflow_object_instances(data, engine)
db.session.commit()
engine.process(objects, **kwargs)
return engine | [
"def",
"restart_worker",
"(",
"uuid",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'stop_on_halt'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'stop_on_halt'",
"]",
"=",
"False",
"engine",
"=",
"WorkflowEngine",
".",
"from_uuid",
"(",
"uuid",
"=",
"uuid",
",",
"*",
"*",
"kwargs",
")",
"if",
"\"data\"",
"not",
"in",
"kwargs",
":",
"objects",
"=",
"workflow_object_class",
".",
"query",
"(",
"id_workflow",
"=",
"uuid",
")",
"else",
":",
"data",
"=",
"kwargs",
".",
"pop",
"(",
"\"data\"",
")",
"if",
"not",
"isinstance",
"(",
"data",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"data",
"=",
"[",
"data",
"]",
"objects",
"=",
"get_workflow_object_instances",
"(",
"data",
",",
"engine",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"engine",
".",
"process",
"(",
"objects",
",",
"*",
"*",
"kwargs",
")",
"return",
"engine"
] | Restart workflow from beginning with given engine UUID and any data.
``**kwargs`` can be used to pass custom arguments to the engine/object such
as ``data``. If ``data`` is not specified then it will load all
initial data for the data objects.
Data can be specified as list of objects or single id of
WorkflowObjects.
:param uuid: workflow id (uuid) of the ``WorkflowEngine`` to be restarted
:type uuid: str
:return: ``WorkflowEngine`` instance | [
"Restart",
"workflow",
"from",
"beginning",
"with",
"given",
"engine",
"UUID",
"and",
"any",
"data",
"."
] | 9c09fd29509a3db975ac2aba337e6760d8cfd3c2 | https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/worker_engine.py#L65-L95 | train |
inveniosoftware-contrib/invenio-workflows | invenio_workflows/worker_engine.py | get_workflow_object_instances | def get_workflow_object_instances(data, engine):
"""Analyze data and create corresponding WorkflowObjects.
Wrap each item in the given list of data objects into WorkflowObject
instances - creating appropriate status of objects in the database and
returning a list of these objects.
This process is necessary to save an initial status of the data before
running it (and potentially changing it) in the workflow.
This function also takes into account if given data objects are already
WorkflowObject instances.
:param data: list of data objects to wrap
:type data: list
:param engine: instance of WorkflowEngine
:type engine: py:class:`.engine.WorkflowEngine`
:return: list of WorkflowObject
"""
workflow_objects = []
data_type = engine.get_default_data_type()
for data_object in data:
if isinstance(
data_object, workflow_object_class._get_current_object()
):
if not data_object.data_type:
data_object.data_type = data_type
if data_object.id:
data_object.log.debug("Existing workflow object found for "
"this object.")
if data_object.status == data_object.known_statuses.COMPLETED:
data_object.status = data_object.known_statuses.INITIAL
workflow_objects.append(data_object)
else:
# Data is not already a WorkflowObject, we then
# add the running object to run through the workflow.
current_obj = create_data_object_from_data(
data_object,
engine,
data_type
)
workflow_objects.append(current_obj)
return workflow_objects | python | def get_workflow_object_instances(data, engine):
"""Analyze data and create corresponding WorkflowObjects.
Wrap each item in the given list of data objects into WorkflowObject
instances - creating appropriate status of objects in the database and
returning a list of these objects.
This process is necessary to save an initial status of the data before
running it (and potentially changing it) in the workflow.
This function also takes into account if given data objects are already
WorkflowObject instances.
:param data: list of data objects to wrap
:type data: list
:param engine: instance of WorkflowEngine
:type engine: py:class:`.engine.WorkflowEngine`
:return: list of WorkflowObject
"""
workflow_objects = []
data_type = engine.get_default_data_type()
for data_object in data:
if isinstance(
data_object, workflow_object_class._get_current_object()
):
if not data_object.data_type:
data_object.data_type = data_type
if data_object.id:
data_object.log.debug("Existing workflow object found for "
"this object.")
if data_object.status == data_object.known_statuses.COMPLETED:
data_object.status = data_object.known_statuses.INITIAL
workflow_objects.append(data_object)
else:
# Data is not already a WorkflowObject, we then
# add the running object to run through the workflow.
current_obj = create_data_object_from_data(
data_object,
engine,
data_type
)
workflow_objects.append(current_obj)
return workflow_objects | [
"def",
"get_workflow_object_instances",
"(",
"data",
",",
"engine",
")",
":",
"workflow_objects",
"=",
"[",
"]",
"data_type",
"=",
"engine",
".",
"get_default_data_type",
"(",
")",
"for",
"data_object",
"in",
"data",
":",
"if",
"isinstance",
"(",
"data_object",
",",
"workflow_object_class",
".",
"_get_current_object",
"(",
")",
")",
":",
"if",
"not",
"data_object",
".",
"data_type",
":",
"data_object",
".",
"data_type",
"=",
"data_type",
"if",
"data_object",
".",
"id",
":",
"data_object",
".",
"log",
".",
"debug",
"(",
"\"Existing workflow object found for \"",
"\"this object.\"",
")",
"if",
"data_object",
".",
"status",
"==",
"data_object",
".",
"known_statuses",
".",
"COMPLETED",
":",
"data_object",
".",
"status",
"=",
"data_object",
".",
"known_statuses",
".",
"INITIAL",
"workflow_objects",
".",
"append",
"(",
"data_object",
")",
"else",
":",
"# Data is not already a WorkflowObject, we then",
"# add the running object to run through the workflow.",
"current_obj",
"=",
"create_data_object_from_data",
"(",
"data_object",
",",
"engine",
",",
"data_type",
")",
"workflow_objects",
".",
"append",
"(",
"current_obj",
")",
"return",
"workflow_objects"
] | Analyze data and create corresponding WorkflowObjects.
Wrap each item in the given list of data objects into WorkflowObject
instances - creating appropriate status of objects in the database and
returning a list of these objects.
This process is necessary to save an initial status of the data before
running it (and potentially changing it) in the workflow.
This function also takes into account if given data objects are already
WorkflowObject instances.
:param data: list of data objects to wrap
:type data: list
:param engine: instance of WorkflowEngine
:type engine: py:class:`.engine.WorkflowEngine`
:return: list of WorkflowObject | [
"Analyze",
"data",
"and",
"create",
"corresponding",
"WorkflowObjects",
"."
] | 9c09fd29509a3db975ac2aba337e6760d8cfd3c2 | https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/worker_engine.py#L136-L184 | train |
inveniosoftware-contrib/invenio-workflows | invenio_workflows/worker_engine.py | create_data_object_from_data | def create_data_object_from_data(data_object, engine, data_type):
"""Create a new WorkflowObject from given data and return it.
Returns a data object wrapped around data_object given.
:param data_object: object containing the data
:type data_object: object
:param engine: Instance of Workflow that is currently running.
:type engine: py:class:`.engine.WorkflowEngine`
:param data_type: type of the data given as taken from workflow definition.
:type data_type: str
:returns: new WorkflowObject
"""
# Data is not already a WorkflowObject, we first
# create an initial object for each data object.
return workflow_object_class.create(
data=data_object,
id_workflow=engine.uuid,
status=workflow_object_class.known_statuses.INITIAL,
data_type=data_type,
) | python | def create_data_object_from_data(data_object, engine, data_type):
"""Create a new WorkflowObject from given data and return it.
Returns a data object wrapped around data_object given.
:param data_object: object containing the data
:type data_object: object
:param engine: Instance of Workflow that is currently running.
:type engine: py:class:`.engine.WorkflowEngine`
:param data_type: type of the data given as taken from workflow definition.
:type data_type: str
:returns: new WorkflowObject
"""
# Data is not already a WorkflowObject, we first
# create an initial object for each data object.
return workflow_object_class.create(
data=data_object,
id_workflow=engine.uuid,
status=workflow_object_class.known_statuses.INITIAL,
data_type=data_type,
) | [
"def",
"create_data_object_from_data",
"(",
"data_object",
",",
"engine",
",",
"data_type",
")",
":",
"# Data is not already a WorkflowObject, we first",
"# create an initial object for each data object.",
"return",
"workflow_object_class",
".",
"create",
"(",
"data",
"=",
"data_object",
",",
"id_workflow",
"=",
"engine",
".",
"uuid",
",",
"status",
"=",
"workflow_object_class",
".",
"known_statuses",
".",
"INITIAL",
",",
"data_type",
"=",
"data_type",
",",
")"
] | Create a new WorkflowObject from given data and return it.
Returns a data object wrapped around data_object given.
:param data_object: object containing the data
:type data_object: object
:param engine: Instance of Workflow that is currently running.
:type engine: py:class:`.engine.WorkflowEngine`
:param data_type: type of the data given as taken from workflow definition.
:type data_type: str
:returns: new WorkflowObject | [
"Create",
"a",
"new",
"WorkflowObject",
"from",
"given",
"data",
"and",
"return",
"it",
"."
] | 9c09fd29509a3db975ac2aba337e6760d8cfd3c2 | https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/worker_engine.py#L187-L210 | train |
cloudmesh-cmd3/cmd3 | cmd3/plugins/rst.py | rst._print_rst | def _print_rst(self, what):
"""
prints the rst page of the command what
:param what: the command
:type what: string
"""
print
print "Command - %s::" % what
exec ("h = self.do_%s.__doc__" % what)
# noinspection PyUnboundLocalVariable
h = textwrap.dedent(h).replace("::\n\n", "")
h = textwrap.dedent(h).replace("\n", "\n ")
print h | python | def _print_rst(self, what):
"""
prints the rst page of the command what
:param what: the command
:type what: string
"""
print
print "Command - %s::" % what
exec ("h = self.do_%s.__doc__" % what)
# noinspection PyUnboundLocalVariable
h = textwrap.dedent(h).replace("::\n\n", "")
h = textwrap.dedent(h).replace("\n", "\n ")
print h | [
"def",
"_print_rst",
"(",
"self",
",",
"what",
")",
":",
"print",
"print",
"\"Command - %s::\"",
"%",
"what",
"exec",
"(",
"\"h = self.do_%s.__doc__\"",
"%",
"what",
")",
"# noinspection PyUnboundLocalVariable",
"h",
"=",
"textwrap",
".",
"dedent",
"(",
"h",
")",
".",
"replace",
"(",
"\"::\\n\\n\"",
",",
"\"\"",
")",
"h",
"=",
"textwrap",
".",
"dedent",
"(",
"h",
")",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\\n \"",
")",
"print",
"h"
] | prints the rst page of the command what
:param what: the command
:type what: string | [
"prints",
"the",
"rst",
"page",
"of",
"the",
"command",
"what"
] | 92e33c96032fd3921f159198a0e57917c4dc34ed | https://github.com/cloudmesh-cmd3/cmd3/blob/92e33c96032fd3921f159198a0e57917c4dc34ed/cmd3/plugins/rst.py#L11-L27 | train |
garenchan/policy | policy/enforcer.py | Rules.load_json | def load_json(cls, data, default_rule=None, raise_error=False):
"""Allow loading of JSON rule data."""
rules = {k: _parser.parse_rule(v, raise_error)
for k, v in json.loads(data).items()}
return cls(rules, default_rule) | python | def load_json(cls, data, default_rule=None, raise_error=False):
"""Allow loading of JSON rule data."""
rules = {k: _parser.parse_rule(v, raise_error)
for k, v in json.loads(data).items()}
return cls(rules, default_rule) | [
"def",
"load_json",
"(",
"cls",
",",
"data",
",",
"default_rule",
"=",
"None",
",",
"raise_error",
"=",
"False",
")",
":",
"rules",
"=",
"{",
"k",
":",
"_parser",
".",
"parse_rule",
"(",
"v",
",",
"raise_error",
")",
"for",
"k",
",",
"v",
"in",
"json",
".",
"loads",
"(",
"data",
")",
".",
"items",
"(",
")",
"}",
"return",
"cls",
"(",
"rules",
",",
"default_rule",
")"
] | Allow loading of JSON rule data. | [
"Allow",
"loading",
"of",
"JSON",
"rule",
"data",
"."
] | 7709ae5f371146f8c90380d0877a5e59d731f644 | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/enforcer.py#L30-L36 | train |
garenchan/policy | policy/enforcer.py | Rules.from_dict | def from_dict(cls, rules_dict: dict, default_rule=None, raise_error=False):
"""Allow loading of rule data from a dictionary."""
# Parse the rules stored in the dictionary
rules = {k: _parser.parse_rule(v, raise_error)
for k, v in rules_dict.items()}
return cls(rules, default_rule) | python | def from_dict(cls, rules_dict: dict, default_rule=None, raise_error=False):
"""Allow loading of rule data from a dictionary."""
# Parse the rules stored in the dictionary
rules = {k: _parser.parse_rule(v, raise_error)
for k, v in rules_dict.items()}
return cls(rules, default_rule) | [
"def",
"from_dict",
"(",
"cls",
",",
"rules_dict",
":",
"dict",
",",
"default_rule",
"=",
"None",
",",
"raise_error",
"=",
"False",
")",
":",
"# Parse the rules stored in the dictionary",
"rules",
"=",
"{",
"k",
":",
"_parser",
".",
"parse_rule",
"(",
"v",
",",
"raise_error",
")",
"for",
"k",
",",
"v",
"in",
"rules_dict",
".",
"items",
"(",
")",
"}",
"return",
"cls",
"(",
"rules",
",",
"default_rule",
")"
] | Allow loading of rule data from a dictionary. | [
"Allow",
"loading",
"of",
"rule",
"data",
"from",
"a",
"dictionary",
"."
] | 7709ae5f371146f8c90380d0877a5e59d731f644 | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/enforcer.py#L39-L46 | train |
garenchan/policy | policy/enforcer.py | Enforcer._set_rules | def _set_rules(self, rules: dict, overwrite=True):
"""Created a new Rules object based on the provided dict of rules."""
if not isinstance(rules, dict):
raise TypeError('rules must be an instance of dict or Rules,'
'got %r instead' % type(rules))
if overwrite:
self.rules = Rules(rules, self.default_rule)
else:
self.rules.update(rules) | python | def _set_rules(self, rules: dict, overwrite=True):
"""Created a new Rules object based on the provided dict of rules."""
if not isinstance(rules, dict):
raise TypeError('rules must be an instance of dict or Rules,'
'got %r instead' % type(rules))
if overwrite:
self.rules = Rules(rules, self.default_rule)
else:
self.rules.update(rules) | [
"def",
"_set_rules",
"(",
"self",
",",
"rules",
":",
"dict",
",",
"overwrite",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"rules",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"'rules must be an instance of dict or Rules,'",
"'got %r instead'",
"%",
"type",
"(",
"rules",
")",
")",
"if",
"overwrite",
":",
"self",
".",
"rules",
"=",
"Rules",
"(",
"rules",
",",
"self",
".",
"default_rule",
")",
"else",
":",
"self",
".",
"rules",
".",
"update",
"(",
"rules",
")"
] | Created a new Rules object based on the provided dict of rules. | [
"Created",
"a",
"new",
"Rules",
"object",
"based",
"on",
"the",
"provided",
"dict",
"of",
"rules",
"."
] | 7709ae5f371146f8c90380d0877a5e59d731f644 | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/enforcer.py#L103-L113 | train |
garenchan/policy | policy/enforcer.py | Enforcer.load_rules | def load_rules(self, force_reload=False, overwrite=True):
"""Load rules from policy file or cache."""
# double-checked locking
if self.load_once and self._policy_loaded:
return
with self._load_lock:
if self.load_once and self._policy_loaded:
return
reloaded, data = _cache.read_file(
self.policy_file, force_reload=force_reload)
self._policy_loaded = True
if reloaded or not self.rules:
rules = Rules.load_json(data, self.default_rule, self.raise_error)
self._set_rules(rules, overwrite=overwrite)
LOG.debug('Reload policy file: %s', self.policy_file) | python | def load_rules(self, force_reload=False, overwrite=True):
"""Load rules from policy file or cache."""
# double-checked locking
if self.load_once and self._policy_loaded:
return
with self._load_lock:
if self.load_once and self._policy_loaded:
return
reloaded, data = _cache.read_file(
self.policy_file, force_reload=force_reload)
self._policy_loaded = True
if reloaded or not self.rules:
rules = Rules.load_json(data, self.default_rule, self.raise_error)
self._set_rules(rules, overwrite=overwrite)
LOG.debug('Reload policy file: %s', self.policy_file) | [
"def",
"load_rules",
"(",
"self",
",",
"force_reload",
"=",
"False",
",",
"overwrite",
"=",
"True",
")",
":",
"# double-checked locking",
"if",
"self",
".",
"load_once",
"and",
"self",
".",
"_policy_loaded",
":",
"return",
"with",
"self",
".",
"_load_lock",
":",
"if",
"self",
".",
"load_once",
"and",
"self",
".",
"_policy_loaded",
":",
"return",
"reloaded",
",",
"data",
"=",
"_cache",
".",
"read_file",
"(",
"self",
".",
"policy_file",
",",
"force_reload",
"=",
"force_reload",
")",
"self",
".",
"_policy_loaded",
"=",
"True",
"if",
"reloaded",
"or",
"not",
"self",
".",
"rules",
":",
"rules",
"=",
"Rules",
".",
"load_json",
"(",
"data",
",",
"self",
".",
"default_rule",
",",
"self",
".",
"raise_error",
")",
"self",
".",
"_set_rules",
"(",
"rules",
",",
"overwrite",
"=",
"overwrite",
")",
"LOG",
".",
"debug",
"(",
"'Reload policy file: %s'",
",",
"self",
".",
"policy_file",
")"
] | Load rules from policy file or cache. | [
"Load",
"rules",
"from",
"policy",
"file",
"or",
"cache",
"."
] | 7709ae5f371146f8c90380d0877a5e59d731f644 | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/enforcer.py#L115-L131 | train |
garenchan/policy | policy/enforcer.py | Enforcer.enforce | def enforce(self, rule, target, creds, exc=None, *args, **kwargs):
"""Checks authorization of a rule against the target and credentials."""
self.load_rules()
if isinstance(rule, checks.BaseCheck):
result = rule(target, creds, self, rule)
elif not self.rules:
# No rules means we're going to fail closed.
result = False
else:
try:
# Evaluate the rule
result = self.rules[rule](target, creds, self, rule)
except KeyError:
LOG.debug('Rule [%s] does not exist', rule)
# If the rule doesn't exist, fail closed
result = False
if self.raise_error and not result:
if exc:
raise exc(*args, **kwargs)
else:
raise PolicyNotAuthorized(rule, target, creds)
return result | python | def enforce(self, rule, target, creds, exc=None, *args, **kwargs):
"""Checks authorization of a rule against the target and credentials."""
self.load_rules()
if isinstance(rule, checks.BaseCheck):
result = rule(target, creds, self, rule)
elif not self.rules:
# No rules means we're going to fail closed.
result = False
else:
try:
# Evaluate the rule
result = self.rules[rule](target, creds, self, rule)
except KeyError:
LOG.debug('Rule [%s] does not exist', rule)
# If the rule doesn't exist, fail closed
result = False
if self.raise_error and not result:
if exc:
raise exc(*args, **kwargs)
else:
raise PolicyNotAuthorized(rule, target, creds)
return result | [
"def",
"enforce",
"(",
"self",
",",
"rule",
",",
"target",
",",
"creds",
",",
"exc",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"load_rules",
"(",
")",
"if",
"isinstance",
"(",
"rule",
",",
"checks",
".",
"BaseCheck",
")",
":",
"result",
"=",
"rule",
"(",
"target",
",",
"creds",
",",
"self",
",",
"rule",
")",
"elif",
"not",
"self",
".",
"rules",
":",
"# No rules means we're going to fail closed.",
"result",
"=",
"False",
"else",
":",
"try",
":",
"# Evaluate the rule",
"result",
"=",
"self",
".",
"rules",
"[",
"rule",
"]",
"(",
"target",
",",
"creds",
",",
"self",
",",
"rule",
")",
"except",
"KeyError",
":",
"LOG",
".",
"debug",
"(",
"'Rule [%s] does not exist'",
",",
"rule",
")",
"# If the rule doesn't exist, fail closed",
"result",
"=",
"False",
"if",
"self",
".",
"raise_error",
"and",
"not",
"result",
":",
"if",
"exc",
":",
"raise",
"exc",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"PolicyNotAuthorized",
"(",
"rule",
",",
"target",
",",
"creds",
")",
"return",
"result"
] | Checks authorization of a rule against the target and credentials. | [
"Checks",
"authorization",
"of",
"a",
"rule",
"against",
"the",
"target",
"and",
"credentials",
"."
] | 7709ae5f371146f8c90380d0877a5e59d731f644 | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/enforcer.py#L133-L158 | train |
seung-lab/EMAnnotationSchemas | emannotationschemas/utils.py | get_flattened_bsp_keys_from_schema | def get_flattened_bsp_keys_from_schema(schema):
""" Returns the flattened keys of BoundSpatialPoints in a schema
:param schema: schema
:return: list
"""
keys = []
for key in schema.declared_fields.keys():
field = schema.declared_fields[key]
if isinstance(field, mm.fields.Nested) and \
isinstance(field.schema, BoundSpatialPoint):
keys.append("{}.{}".format(key, "position"))
return keys | python | def get_flattened_bsp_keys_from_schema(schema):
""" Returns the flattened keys of BoundSpatialPoints in a schema
:param schema: schema
:return: list
"""
keys = []
for key in schema.declared_fields.keys():
field = schema.declared_fields[key]
if isinstance(field, mm.fields.Nested) and \
isinstance(field.schema, BoundSpatialPoint):
keys.append("{}.{}".format(key, "position"))
return keys | [
"def",
"get_flattened_bsp_keys_from_schema",
"(",
"schema",
")",
":",
"keys",
"=",
"[",
"]",
"for",
"key",
"in",
"schema",
".",
"declared_fields",
".",
"keys",
"(",
")",
":",
"field",
"=",
"schema",
".",
"declared_fields",
"[",
"key",
"]",
"if",
"isinstance",
"(",
"field",
",",
"mm",
".",
"fields",
".",
"Nested",
")",
"and",
"isinstance",
"(",
"field",
".",
"schema",
",",
"BoundSpatialPoint",
")",
":",
"keys",
".",
"append",
"(",
"\"{}.{}\"",
".",
"format",
"(",
"key",
",",
"\"position\"",
")",
")",
"return",
"keys"
] | Returns the flattened keys of BoundSpatialPoints in a schema
:param schema: schema
:return: list | [
"Returns",
"the",
"flattened",
"keys",
"of",
"BoundSpatialPoints",
"in",
"a",
"schema"
] | ca81eff0f449bd7eb0392e0982db8f3636446a9e | https://github.com/seung-lab/EMAnnotationSchemas/blob/ca81eff0f449bd7eb0392e0982db8f3636446a9e/emannotationschemas/utils.py#L14-L29 | train |
BernardFW/bernard | src/bernard/engine/triggers.py | SharedTrigger.lock | def lock(self) -> asyncio.Lock:
"""
Return and generate if required the lock for this request.
"""
if self.lock_key not in self.request.custom_content:
self.request.custom_content[self.lock_key] = asyncio.Lock()
return self.request.custom_content[self.lock_key] | python | def lock(self) -> asyncio.Lock:
"""
Return and generate if required the lock for this request.
"""
if self.lock_key not in self.request.custom_content:
self.request.custom_content[self.lock_key] = asyncio.Lock()
return self.request.custom_content[self.lock_key] | [
"def",
"lock",
"(",
"self",
")",
"->",
"asyncio",
".",
"Lock",
":",
"if",
"self",
".",
"lock_key",
"not",
"in",
"self",
".",
"request",
".",
"custom_content",
":",
"self",
".",
"request",
".",
"custom_content",
"[",
"self",
".",
"lock_key",
"]",
"=",
"asyncio",
".",
"Lock",
"(",
")",
"return",
"self",
".",
"request",
".",
"custom_content",
"[",
"self",
".",
"lock_key",
"]"
] | Return and generate if required the lock for this request. | [
"Return",
"and",
"generate",
"if",
"required",
"the",
"lock",
"for",
"this",
"request",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/triggers.py#L94-L102 | train |
BernardFW/bernard | src/bernard/engine/triggers.py | SharedTrigger.get_value | async def get_value(self):
"""
Get the value from the API. Make sure to use a lock in order not to
fetch the value twice at the same time.
"""
cc = self.request.custom_content
async with self.lock:
if self.content_key not in cc:
cc[self.content_key] = await self.call_api()
return cc[self.content_key] | python | async def get_value(self):
"""
Get the value from the API. Make sure to use a lock in order not to
fetch the value twice at the same time.
"""
cc = self.request.custom_content
async with self.lock:
if self.content_key not in cc:
cc[self.content_key] = await self.call_api()
return cc[self.content_key] | [
"async",
"def",
"get_value",
"(",
"self",
")",
":",
"cc",
"=",
"self",
".",
"request",
".",
"custom_content",
"async",
"with",
"self",
".",
"lock",
":",
"if",
"self",
".",
"content_key",
"not",
"in",
"cc",
":",
"cc",
"[",
"self",
".",
"content_key",
"]",
"=",
"await",
"self",
".",
"call_api",
"(",
")",
"return",
"cc",
"[",
"self",
".",
"content_key",
"]"
] | Get the value from the API. Make sure to use a lock in order not to
fetch the value twice at the same time. | [
"Get",
"the",
"value",
"from",
"the",
"API",
".",
"Make",
"sure",
"to",
"use",
"a",
"lock",
"in",
"order",
"not",
"to",
"fetch",
"the",
"value",
"twice",
"at",
"the",
"same",
"time",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/triggers.py#L119-L131 | train |
BernardFW/bernard | src/bernard/engine/triggers.py | Text.rank | async def rank(self) -> Optional[float]:
"""
If there is a text layer inside the request, try to find a matching
text in the specified intent.
"""
if not self.request.has_layer(l.RawText):
return
tl = self.request.get_layer(l.RawText)
matcher = Matcher([
tuple(Trigram(y) for y in x)
for x in await self.intent.strings(self.request)
])
return matcher % Trigram(tl.text) | python | async def rank(self) -> Optional[float]:
"""
If there is a text layer inside the request, try to find a matching
text in the specified intent.
"""
if not self.request.has_layer(l.RawText):
return
tl = self.request.get_layer(l.RawText)
matcher = Matcher([
tuple(Trigram(y) for y in x)
for x in await self.intent.strings(self.request)
])
return matcher % Trigram(tl.text) | [
"async",
"def",
"rank",
"(",
"self",
")",
"->",
"Optional",
"[",
"float",
"]",
":",
"if",
"not",
"self",
".",
"request",
".",
"has_layer",
"(",
"l",
".",
"RawText",
")",
":",
"return",
"tl",
"=",
"self",
".",
"request",
".",
"get_layer",
"(",
"l",
".",
"RawText",
")",
"matcher",
"=",
"Matcher",
"(",
"[",
"tuple",
"(",
"Trigram",
"(",
"y",
")",
"for",
"y",
"in",
"x",
")",
"for",
"x",
"in",
"await",
"self",
".",
"intent",
".",
"strings",
"(",
"self",
".",
"request",
")",
"]",
")",
"return",
"matcher",
"%",
"Trigram",
"(",
"tl",
".",
"text",
")"
] | If there is a text layer inside the request, try to find a matching
text in the specified intent. | [
"If",
"there",
"is",
"a",
"text",
"layer",
"inside",
"the",
"request",
"try",
"to",
"find",
"a",
"matching",
"text",
"in",
"the",
"specified",
"intent",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/triggers.py#L163-L178 | train |
BernardFW/bernard | src/bernard/engine/triggers.py | Choice._rank_qr | def _rank_qr(self, choices):
"""
Look for the QuickReply layer's slug into available choices.
"""
from bernard.platforms.facebook import layers as fbl
try:
qr = self.request.get_layer(fbl.QuickReply)
self.chosen = choices[qr.slug]
self.slug = qr.slug
if self.when is None or self.when == qr.slug:
return 1.0
except KeyError:
pass | python | def _rank_qr(self, choices):
"""
Look for the QuickReply layer's slug into available choices.
"""
from bernard.platforms.facebook import layers as fbl
try:
qr = self.request.get_layer(fbl.QuickReply)
self.chosen = choices[qr.slug]
self.slug = qr.slug
if self.when is None or self.when == qr.slug:
return 1.0
except KeyError:
pass | [
"def",
"_rank_qr",
"(",
"self",
",",
"choices",
")",
":",
"from",
"bernard",
".",
"platforms",
".",
"facebook",
"import",
"layers",
"as",
"fbl",
"try",
":",
"qr",
"=",
"self",
".",
"request",
".",
"get_layer",
"(",
"fbl",
".",
"QuickReply",
")",
"self",
".",
"chosen",
"=",
"choices",
"[",
"qr",
".",
"slug",
"]",
"self",
".",
"slug",
"=",
"qr",
".",
"slug",
"if",
"self",
".",
"when",
"is",
"None",
"or",
"self",
".",
"when",
"==",
"qr",
".",
"slug",
":",
"return",
"1.0",
"except",
"KeyError",
":",
"pass"
] | Look for the QuickReply layer's slug into available choices. | [
"Look",
"for",
"the",
"QuickReply",
"layer",
"s",
"slug",
"into",
"available",
"choices",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/triggers.py#L200-L214 | train |
BernardFW/bernard | src/bernard/engine/triggers.py | Choice._rank_text | async def _rank_text(self, choices):
"""
Try to match the TextLayer with choice's intents.
"""
tl = self.request.get_layer(l.RawText)
best = 0.0
for slug, params in choices.items():
strings = []
if params['intent']:
intent = getattr(intents, params['intent'])
strings += await intent.strings(self.request)
if params['text']:
strings.append((params['text'],))
matcher = Matcher([tuple(Trigram(y) for y in x) for x in strings])
score = matcher % Trigram(await render(tl.text, self.request))
if score > best:
self.chosen = params
self.slug = slug
best = score
if self.when is None or self.slug == self.when:
return best | python | async def _rank_text(self, choices):
"""
Try to match the TextLayer with choice's intents.
"""
tl = self.request.get_layer(l.RawText)
best = 0.0
for slug, params in choices.items():
strings = []
if params['intent']:
intent = getattr(intents, params['intent'])
strings += await intent.strings(self.request)
if params['text']:
strings.append((params['text'],))
matcher = Matcher([tuple(Trigram(y) for y in x) for x in strings])
score = matcher % Trigram(await render(tl.text, self.request))
if score > best:
self.chosen = params
self.slug = slug
best = score
if self.when is None or self.slug == self.when:
return best | [
"async",
"def",
"_rank_text",
"(",
"self",
",",
"choices",
")",
":",
"tl",
"=",
"self",
".",
"request",
".",
"get_layer",
"(",
"l",
".",
"RawText",
")",
"best",
"=",
"0.0",
"for",
"slug",
",",
"params",
"in",
"choices",
".",
"items",
"(",
")",
":",
"strings",
"=",
"[",
"]",
"if",
"params",
"[",
"'intent'",
"]",
":",
"intent",
"=",
"getattr",
"(",
"intents",
",",
"params",
"[",
"'intent'",
"]",
")",
"strings",
"+=",
"await",
"intent",
".",
"strings",
"(",
"self",
".",
"request",
")",
"if",
"params",
"[",
"'text'",
"]",
":",
"strings",
".",
"append",
"(",
"(",
"params",
"[",
"'text'",
"]",
",",
")",
")",
"matcher",
"=",
"Matcher",
"(",
"[",
"tuple",
"(",
"Trigram",
"(",
"y",
")",
"for",
"y",
"in",
"x",
")",
"for",
"x",
"in",
"strings",
"]",
")",
"score",
"=",
"matcher",
"%",
"Trigram",
"(",
"await",
"render",
"(",
"tl",
".",
"text",
",",
"self",
".",
"request",
")",
")",
"if",
"score",
">",
"best",
":",
"self",
".",
"chosen",
"=",
"params",
"self",
".",
"slug",
"=",
"slug",
"best",
"=",
"score",
"if",
"self",
".",
"when",
"is",
"None",
"or",
"self",
".",
"slug",
"==",
"self",
".",
"when",
":",
"return",
"best"
] | Try to match the TextLayer with choice's intents. | [
"Try",
"to",
"match",
"the",
"TextLayer",
"with",
"choice",
"s",
"intents",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/triggers.py#L216-L243 | train |
ioos/cc-plugin-ncei | cc_plugin_ncei/ncei_timeseries.py | NCEITimeSeriesOrthogonal2_0.check_recommended_attributes | def check_recommended_attributes(self, dataset):
'''
Feature type specific check of global recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended global attributes')
# Check time_coverage_duration and resolution
for attr in ['time_coverage_duration', 'time_coverage_resolution']:
attr_value = getattr(dataset, attr, '')
try:
parse_duration(attr_value)
recommended_ctx.assert_true(True, '') # Score it True!
except Exception:
recommended_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value))
results.append(recommended_ctx.to_result())
return results | python | def check_recommended_attributes(self, dataset):
'''
Feature type specific check of global recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended global attributes')
# Check time_coverage_duration and resolution
for attr in ['time_coverage_duration', 'time_coverage_resolution']:
attr_value = getattr(dataset, attr, '')
try:
parse_duration(attr_value)
recommended_ctx.assert_true(True, '') # Score it True!
except Exception:
recommended_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value))
results.append(recommended_ctx.to_result())
return results | [
"def",
"check_recommended_attributes",
"(",
"self",
",",
"dataset",
")",
":",
"results",
"=",
"[",
"]",
"recommended_ctx",
"=",
"TestCtx",
"(",
"BaseCheck",
".",
"MEDIUM",
",",
"'Recommended global attributes'",
")",
"# Check time_coverage_duration and resolution",
"for",
"attr",
"in",
"[",
"'time_coverage_duration'",
",",
"'time_coverage_resolution'",
"]",
":",
"attr_value",
"=",
"getattr",
"(",
"dataset",
",",
"attr",
",",
"''",
")",
"try",
":",
"parse_duration",
"(",
"attr_value",
")",
"recommended_ctx",
".",
"assert_true",
"(",
"True",
",",
"''",
")",
"# Score it True!",
"except",
"Exception",
":",
"recommended_ctx",
".",
"assert_true",
"(",
"False",
",",
"'{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'",
".",
"format",
"(",
"attr",
",",
"attr_value",
")",
")",
"results",
".",
"append",
"(",
"recommended_ctx",
".",
"to_result",
"(",
")",
")",
"return",
"results"
] | Feature type specific check of global recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset | [
"Feature",
"type",
"specific",
"check",
"of",
"global",
"recommended",
"attributes",
"."
] | 963fefd7fa43afd32657ac4c36aad4ddb4c25acf | https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/ncei_timeseries.py#L154-L171 | train |
ioos/cc-plugin-ncei | cc_plugin_ncei/ncei_timeseries.py | NCEITimeSeriesIncompleteBase.check_dimensions | def check_dimensions(self, dataset):
'''
Checks that the feature types of this dataset are consitent with a time series incomplete dataset
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are time-series incomplete feature types')
message = '{} must be a valid timeseries feature type. It must have dimensions of (timeSeries, time).'
message += ' And all coordinates must have dimensions of (timeSeries)'
for variable in util.get_geophysical_variables(dataset):
is_valid = util.is_multi_timeseries_incomplete(dataset, variable)
required_ctx.assert_true(
is_valid,
message.format(variable)
)
return required_ctx.to_result() | python | def check_dimensions(self, dataset):
'''
Checks that the feature types of this dataset are consitent with a time series incomplete dataset
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are time-series incomplete feature types')
message = '{} must be a valid timeseries feature type. It must have dimensions of (timeSeries, time).'
message += ' And all coordinates must have dimensions of (timeSeries)'
for variable in util.get_geophysical_variables(dataset):
is_valid = util.is_multi_timeseries_incomplete(dataset, variable)
required_ctx.assert_true(
is_valid,
message.format(variable)
)
return required_ctx.to_result() | [
"def",
"check_dimensions",
"(",
"self",
",",
"dataset",
")",
":",
"required_ctx",
"=",
"TestCtx",
"(",
"BaseCheck",
".",
"HIGH",
",",
"'All geophysical variables are time-series incomplete feature types'",
")",
"message",
"=",
"'{} must be a valid timeseries feature type. It must have dimensions of (timeSeries, time).'",
"message",
"+=",
"' And all coordinates must have dimensions of (timeSeries)'",
"for",
"variable",
"in",
"util",
".",
"get_geophysical_variables",
"(",
"dataset",
")",
":",
"is_valid",
"=",
"util",
".",
"is_multi_timeseries_incomplete",
"(",
"dataset",
",",
"variable",
")",
"required_ctx",
".",
"assert_true",
"(",
"is_valid",
",",
"message",
".",
"format",
"(",
"variable",
")",
")",
"return",
"required_ctx",
".",
"to_result",
"(",
")"
] | Checks that the feature types of this dataset are consitent with a time series incomplete dataset
:param netCDF4.Dataset dataset: An open netCDF dataset | [
"Checks",
"that",
"the",
"feature",
"types",
"of",
"this",
"dataset",
"are",
"consitent",
"with",
"a",
"time",
"series",
"incomplete",
"dataset"
] | 963fefd7fa43afd32657ac4c36aad4ddb4c25acf | https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/ncei_timeseries.py#L181-L196 | train |
garenchan/policy | policy/_cache.py | read_file | def read_file(filename: str, force_reload=False):
"""Read a file if it has been modified.
:param filename: File name which want to be read from.
:param force_reload: Whether to reload the file.
:returns: A tuple with a boolean specifying if the data is fresh or not.
"""
if force_reload:
_delete_cached_file(filename)
reloaded = False
mtime = os.path.getmtime(filename)
cache_info = CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug('Reloading cached file %s', filename)
with open(filename) as fp:
cache_info['data'] = fp.read()
cache_info['mtime'] = mtime
reloaded = True
return reloaded, cache_info['data'] | python | def read_file(filename: str, force_reload=False):
"""Read a file if it has been modified.
:param filename: File name which want to be read from.
:param force_reload: Whether to reload the file.
:returns: A tuple with a boolean specifying if the data is fresh or not.
"""
if force_reload:
_delete_cached_file(filename)
reloaded = False
mtime = os.path.getmtime(filename)
cache_info = CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug('Reloading cached file %s', filename)
with open(filename) as fp:
cache_info['data'] = fp.read()
cache_info['mtime'] = mtime
reloaded = True
return reloaded, cache_info['data'] | [
"def",
"read_file",
"(",
"filename",
":",
"str",
",",
"force_reload",
"=",
"False",
")",
":",
"if",
"force_reload",
":",
"_delete_cached_file",
"(",
"filename",
")",
"reloaded",
"=",
"False",
"mtime",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"filename",
")",
"cache_info",
"=",
"CACHE",
".",
"setdefault",
"(",
"filename",
",",
"{",
"}",
")",
"if",
"not",
"cache_info",
"or",
"mtime",
">",
"cache_info",
".",
"get",
"(",
"'mtime'",
",",
"0",
")",
":",
"LOG",
".",
"debug",
"(",
"'Reloading cached file %s'",
",",
"filename",
")",
"with",
"open",
"(",
"filename",
")",
"as",
"fp",
":",
"cache_info",
"[",
"'data'",
"]",
"=",
"fp",
".",
"read",
"(",
")",
"cache_info",
"[",
"'mtime'",
"]",
"=",
"mtime",
"reloaded",
"=",
"True",
"return",
"reloaded",
",",
"cache_info",
"[",
"'data'",
"]"
] | Read a file if it has been modified.
:param filename: File name which want to be read from.
:param force_reload: Whether to reload the file.
:returns: A tuple with a boolean specifying if the data is fresh or not. | [
"Read",
"a",
"file",
"if",
"it",
"has",
"been",
"modified",
"."
] | 7709ae5f371146f8c90380d0877a5e59d731f644 | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_cache.py#L19-L41 | train |
COALAIP/pycoalaip | coalaip/model_validators.py | use_model_attr | def use_model_attr(attr):
"""Use the validator set on a separate attribute on the class."""
def use_model_validator(instance, attribute, value):
getattr(instance, attr)(instance, attribute, value)
return use_model_validator | python | def use_model_attr(attr):
"""Use the validator set on a separate attribute on the class."""
def use_model_validator(instance, attribute, value):
getattr(instance, attr)(instance, attribute, value)
return use_model_validator | [
"def",
"use_model_attr",
"(",
"attr",
")",
":",
"def",
"use_model_validator",
"(",
"instance",
",",
"attribute",
",",
"value",
")",
":",
"getattr",
"(",
"instance",
",",
"attr",
")",
"(",
"instance",
",",
"attribute",
",",
"value",
")",
"return",
"use_model_validator"
] | Use the validator set on a separate attribute on the class. | [
"Use",
"the",
"validator",
"set",
"on",
"a",
"separate",
"attribute",
"on",
"the",
"class",
"."
] | cecc8f6ff4733f0525fafcee63647753e832f0be | https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/model_validators.py#L13-L18 | train |
COALAIP/pycoalaip | coalaip/model_validators.py | is_creation_model | def is_creation_model(instance, attribute, value):
"""Must include at least a ``name`` key."""
creation_name = value.get('name')
if not isinstance(creation_name, str):
instance_name = instance.__class__.__name__
err_str = ("'name' must be given as a string in the '{attr}' "
"parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=creation_name)
raise ModelDataError(err_str) | python | def is_creation_model(instance, attribute, value):
"""Must include at least a ``name`` key."""
creation_name = value.get('name')
if not isinstance(creation_name, str):
instance_name = instance.__class__.__name__
err_str = ("'name' must be given as a string in the '{attr}' "
"parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=creation_name)
raise ModelDataError(err_str) | [
"def",
"is_creation_model",
"(",
"instance",
",",
"attribute",
",",
"value",
")",
":",
"creation_name",
"=",
"value",
".",
"get",
"(",
"'name'",
")",
"if",
"not",
"isinstance",
"(",
"creation_name",
",",
"str",
")",
":",
"instance_name",
"=",
"instance",
".",
"__class__",
".",
"__name__",
"err_str",
"=",
"(",
"\"'name' must be given as a string in the '{attr}' \"",
"\"parameter of a '{cls}'. Given \"",
"\"'{value}'\"",
")",
".",
"format",
"(",
"attr",
"=",
"attribute",
".",
"name",
",",
"cls",
"=",
"instance_name",
",",
"value",
"=",
"creation_name",
")",
"raise",
"ModelDataError",
"(",
"err_str",
")"
] | Must include at least a ``name`` key. | [
"Must",
"include",
"at",
"least",
"a",
"name",
"key",
"."
] | cecc8f6ff4733f0525fafcee63647753e832f0be | https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/model_validators.py#L45-L56 | train |
COALAIP/pycoalaip | coalaip/model_validators.py | is_manifestation_model | def is_manifestation_model(instance, attribute, value):
"""Must include a ``manifestationOfWork`` key."""
instance_name = instance.__class__.__name__
is_creation_model(instance, attribute, value)
manifestation_of = value.get('manifestationOfWork')
if not isinstance(manifestation_of, str):
err_str = ("'manifestationOfWork' must be given as a string in the "
"'{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=manifestation_of)
print(err_str) | python | def is_manifestation_model(instance, attribute, value):
"""Must include a ``manifestationOfWork`` key."""
instance_name = instance.__class__.__name__
is_creation_model(instance, attribute, value)
manifestation_of = value.get('manifestationOfWork')
if not isinstance(manifestation_of, str):
err_str = ("'manifestationOfWork' must be given as a string in the "
"'{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=manifestation_of)
print(err_str) | [
"def",
"is_manifestation_model",
"(",
"instance",
",",
"attribute",
",",
"value",
")",
":",
"instance_name",
"=",
"instance",
".",
"__class__",
".",
"__name__",
"is_creation_model",
"(",
"instance",
",",
"attribute",
",",
"value",
")",
"manifestation_of",
"=",
"value",
".",
"get",
"(",
"'manifestationOfWork'",
")",
"if",
"not",
"isinstance",
"(",
"manifestation_of",
",",
"str",
")",
":",
"err_str",
"=",
"(",
"\"'manifestationOfWork' must be given as a string in the \"",
"\"'{attr}' parameter of a '{cls}'. Given \"",
"\"'{value}'\"",
")",
".",
"format",
"(",
"attr",
"=",
"attribute",
".",
"name",
",",
"cls",
"=",
"instance_name",
",",
"value",
"=",
"manifestation_of",
")",
"print",
"(",
"err_str",
")"
] | Must include a ``manifestationOfWork`` key. | [
"Must",
"include",
"a",
"manifestationOfWork",
"key",
"."
] | cecc8f6ff4733f0525fafcee63647753e832f0be | https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/model_validators.py#L68-L81 | train |
klmitch/turnstile | turnstile/tools.py | add_preprocessor | def add_preprocessor(preproc):
"""
Define a preprocessor to run after the arguments are parsed and
before the function is executed, when running in console script
mode.
:param preproc: The callable, which will be passed the Namespace
object generated by argparse.
"""
def decorator(func):
func = ScriptAdaptor._wrap(func)
func._add_preprocessor(preproc)
return func
return decorator | python | def add_preprocessor(preproc):
"""
Define a preprocessor to run after the arguments are parsed and
before the function is executed, when running in console script
mode.
:param preproc: The callable, which will be passed the Namespace
object generated by argparse.
"""
def decorator(func):
func = ScriptAdaptor._wrap(func)
func._add_preprocessor(preproc)
return func
return decorator | [
"def",
"add_preprocessor",
"(",
"preproc",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"func",
"=",
"ScriptAdaptor",
".",
"_wrap",
"(",
"func",
")",
"func",
".",
"_add_preprocessor",
"(",
"preproc",
")",
"return",
"func",
"return",
"decorator"
] | Define a preprocessor to run after the arguments are parsed and
before the function is executed, when running in console script
mode.
:param preproc: The callable, which will be passed the Namespace
object generated by argparse. | [
"Define",
"a",
"preprocessor",
"to",
"run",
"after",
"the",
"arguments",
"are",
"parsed",
"and",
"before",
"the",
"function",
"is",
"executed",
"when",
"running",
"in",
"console",
"script",
"mode",
"."
] | 8fe9a359b45e505d3192ab193ecf9be177ab1a17 | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/tools.py#L255-L269 | train |
klmitch/turnstile | turnstile/tools.py | add_postprocessor | def add_postprocessor(postproc):
"""
Define a postprocessor to run after the function is executed, when
running in console script mode.
:param postproc: The callable, which will be passed the Namespace
object generated by argparse and the return
result of the function. The return result of the
callable will be used as the final return result
(or as the result fed into the next
postprocessor).
"""
def decorator(func):
func = ScriptAdaptor._wrap(func)
func._add_postprocessor(postproc)
return func
return decorator | python | def add_postprocessor(postproc):
"""
Define a postprocessor to run after the function is executed, when
running in console script mode.
:param postproc: The callable, which will be passed the Namespace
object generated by argparse and the return
result of the function. The return result of the
callable will be used as the final return result
(or as the result fed into the next
postprocessor).
"""
def decorator(func):
func = ScriptAdaptor._wrap(func)
func._add_postprocessor(postproc)
return func
return decorator | [
"def",
"add_postprocessor",
"(",
"postproc",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"func",
"=",
"ScriptAdaptor",
".",
"_wrap",
"(",
"func",
")",
"func",
".",
"_add_postprocessor",
"(",
"postproc",
")",
"return",
"func",
"return",
"decorator"
] | Define a postprocessor to run after the function is executed, when
running in console script mode.
:param postproc: The callable, which will be passed the Namespace
object generated by argparse and the return
result of the function. The return result of the
callable will be used as the final return result
(or as the result fed into the next
postprocessor). | [
"Define",
"a",
"postprocessor",
"to",
"run",
"after",
"the",
"function",
"is",
"executed",
"when",
"running",
"in",
"console",
"script",
"mode",
"."
] | 8fe9a359b45e505d3192ab193ecf9be177ab1a17 | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/tools.py#L272-L289 | train |
klmitch/turnstile | turnstile/tools.py | _setup_logging | def _setup_logging(args):
"""
Set up logging for the script, based on the configuration
specified by the 'logging' attribute of the command line
arguments.
:param args: A Namespace object containing a 'logging' attribute
specifying the name of a logging configuration file
to use. If not present or not given, a basic logging
configuration will be set.
"""
log_conf = getattr(args, 'logging', None)
if log_conf:
logging.config.fileConfig(log_conf)
else:
logging.basicConfig() | python | def _setup_logging(args):
"""
Set up logging for the script, based on the configuration
specified by the 'logging' attribute of the command line
arguments.
:param args: A Namespace object containing a 'logging' attribute
specifying the name of a logging configuration file
to use. If not present or not given, a basic logging
configuration will be set.
"""
log_conf = getattr(args, 'logging', None)
if log_conf:
logging.config.fileConfig(log_conf)
else:
logging.basicConfig() | [
"def",
"_setup_logging",
"(",
"args",
")",
":",
"log_conf",
"=",
"getattr",
"(",
"args",
",",
"'logging'",
",",
"None",
")",
"if",
"log_conf",
":",
"logging",
".",
"config",
".",
"fileConfig",
"(",
"log_conf",
")",
"else",
":",
"logging",
".",
"basicConfig",
"(",
")"
] | Set up logging for the script, based on the configuration
specified by the 'logging' attribute of the command line
arguments.
:param args: A Namespace object containing a 'logging' attribute
specifying the name of a logging configuration file
to use. If not present or not given, a basic logging
configuration will be set. | [
"Set",
"up",
"logging",
"for",
"the",
"script",
"based",
"on",
"the",
"configuration",
"specified",
"by",
"the",
"logging",
"attribute",
"of",
"the",
"command",
"line",
"arguments",
"."
] | 8fe9a359b45e505d3192ab193ecf9be177ab1a17 | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/tools.py#L292-L308 | train |
klmitch/turnstile | turnstile/tools.py | setup_limits | def setup_limits(conf_file, limits_file, do_reload=True,
dry_run=False, debug=False):
"""
Set up or update limits in the Redis database.
:param conf_file: Name of the configuration file, for connecting
to the Redis database.
:param limits_file: Name of the XML file describing the limits to
configure.
:param do_reload: Controls reloading behavior. If True (the
default), a reload command is issued. If False,
no reload command is issued. String values
result in a reload command of the given load
type, and integer or float values result in a
reload command of type 'spread' with the given
spread interval.
:param dry_run: If True, no changes are made to the database.
Implies debug=True.
:param debug: If True, debugging messages are emitted while
loading the limits and updating the database.
"""
# If dry_run is set, default debug to True
if dry_run:
debug = True
# Connect to the database...
conf = config.Config(conf_file=conf_file)
db = conf.get_database()
limits_key = conf['control'].get('limits_key', 'limits')
control_channel = conf['control'].get('channel', 'control')
# Parse the limits file
limits_tree = etree.parse(limits_file)
# Now, we parse the limits XML file
lims = []
for idx, lim in enumerate(limits_tree.getroot()):
# Skip tags we don't recognize
if lim.tag != 'limit':
warnings.warn("Unrecognized tag %r in limits file at index %d" %
(lim.tag, idx))
continue
# Construct the limit and add it to the list of limits
try:
lims.append(parse_limit_node(db, idx, lim))
except Exception as exc:
warnings.warn("Couldn't understand limit at index %d: %s" %
(idx, exc))
continue
# Now that we have the limits, let's install them
if debug:
print >>sys.stderr, "Installing the following limits:"
for lim in lims:
print >>sys.stderr, " %r" % lim
if not dry_run:
database.limit_update(db, limits_key, lims)
# Were we requested to reload the limits?
if do_reload is False:
return
# OK, figure out what kind of reload to do
params = []
if do_reload is True:
# Nothing to do; use default semantics
pass
elif (isinstance(do_reload, (int, long, float)) or
(isinstance(do_reload, basestring) and do_reload.isdigit())):
params = ['spread', do_reload]
else:
params = [str(do_reload)]
# Issue the reload command
if debug:
cmd = ['reload']
cmd.extend(params)
print >>sys.stderr, ("Issuing command: %s" %
' '.join(str(c) for c in cmd))
if not dry_run:
database.command(db, control_channel, 'reload', *params) | python | def setup_limits(conf_file, limits_file, do_reload=True,
dry_run=False, debug=False):
"""
Set up or update limits in the Redis database.
:param conf_file: Name of the configuration file, for connecting
to the Redis database.
:param limits_file: Name of the XML file describing the limits to
configure.
:param do_reload: Controls reloading behavior. If True (the
default), a reload command is issued. If False,
no reload command is issued. String values
result in a reload command of the given load
type, and integer or float values result in a
reload command of type 'spread' with the given
spread interval.
:param dry_run: If True, no changes are made to the database.
Implies debug=True.
:param debug: If True, debugging messages are emitted while
loading the limits and updating the database.
"""
# If dry_run is set, default debug to True
if dry_run:
debug = True
# Connect to the database...
conf = config.Config(conf_file=conf_file)
db = conf.get_database()
limits_key = conf['control'].get('limits_key', 'limits')
control_channel = conf['control'].get('channel', 'control')
# Parse the limits file
limits_tree = etree.parse(limits_file)
# Now, we parse the limits XML file
lims = []
for idx, lim in enumerate(limits_tree.getroot()):
# Skip tags we don't recognize
if lim.tag != 'limit':
warnings.warn("Unrecognized tag %r in limits file at index %d" %
(lim.tag, idx))
continue
# Construct the limit and add it to the list of limits
try:
lims.append(parse_limit_node(db, idx, lim))
except Exception as exc:
warnings.warn("Couldn't understand limit at index %d: %s" %
(idx, exc))
continue
# Now that we have the limits, let's install them
if debug:
print >>sys.stderr, "Installing the following limits:"
for lim in lims:
print >>sys.stderr, " %r" % lim
if not dry_run:
database.limit_update(db, limits_key, lims)
# Were we requested to reload the limits?
if do_reload is False:
return
# OK, figure out what kind of reload to do
params = []
if do_reload is True:
# Nothing to do; use default semantics
pass
elif (isinstance(do_reload, (int, long, float)) or
(isinstance(do_reload, basestring) and do_reload.isdigit())):
params = ['spread', do_reload]
else:
params = [str(do_reload)]
# Issue the reload command
if debug:
cmd = ['reload']
cmd.extend(params)
print >>sys.stderr, ("Issuing command: %s" %
' '.join(str(c) for c in cmd))
if not dry_run:
database.command(db, control_channel, 'reload', *params) | [
"def",
"setup_limits",
"(",
"conf_file",
",",
"limits_file",
",",
"do_reload",
"=",
"True",
",",
"dry_run",
"=",
"False",
",",
"debug",
"=",
"False",
")",
":",
"# If dry_run is set, default debug to True",
"if",
"dry_run",
":",
"debug",
"=",
"True",
"# Connect to the database...",
"conf",
"=",
"config",
".",
"Config",
"(",
"conf_file",
"=",
"conf_file",
")",
"db",
"=",
"conf",
".",
"get_database",
"(",
")",
"limits_key",
"=",
"conf",
"[",
"'control'",
"]",
".",
"get",
"(",
"'limits_key'",
",",
"'limits'",
")",
"control_channel",
"=",
"conf",
"[",
"'control'",
"]",
".",
"get",
"(",
"'channel'",
",",
"'control'",
")",
"# Parse the limits file",
"limits_tree",
"=",
"etree",
".",
"parse",
"(",
"limits_file",
")",
"# Now, we parse the limits XML file",
"lims",
"=",
"[",
"]",
"for",
"idx",
",",
"lim",
"in",
"enumerate",
"(",
"limits_tree",
".",
"getroot",
"(",
")",
")",
":",
"# Skip tags we don't recognize",
"if",
"lim",
".",
"tag",
"!=",
"'limit'",
":",
"warnings",
".",
"warn",
"(",
"\"Unrecognized tag %r in limits file at index %d\"",
"%",
"(",
"lim",
".",
"tag",
",",
"idx",
")",
")",
"continue",
"# Construct the limit and add it to the list of limits",
"try",
":",
"lims",
".",
"append",
"(",
"parse_limit_node",
"(",
"db",
",",
"idx",
",",
"lim",
")",
")",
"except",
"Exception",
"as",
"exc",
":",
"warnings",
".",
"warn",
"(",
"\"Couldn't understand limit at index %d: %s\"",
"%",
"(",
"idx",
",",
"exc",
")",
")",
"continue",
"# Now that we have the limits, let's install them",
"if",
"debug",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"\"Installing the following limits:\"",
"for",
"lim",
"in",
"lims",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"\" %r\"",
"%",
"lim",
"if",
"not",
"dry_run",
":",
"database",
".",
"limit_update",
"(",
"db",
",",
"limits_key",
",",
"lims",
")",
"# Were we requested to reload the limits?",
"if",
"do_reload",
"is",
"False",
":",
"return",
"# OK, figure out what kind of reload to do",
"params",
"=",
"[",
"]",
"if",
"do_reload",
"is",
"True",
":",
"# Nothing to do; use default semantics",
"pass",
"elif",
"(",
"isinstance",
"(",
"do_reload",
",",
"(",
"int",
",",
"long",
",",
"float",
")",
")",
"or",
"(",
"isinstance",
"(",
"do_reload",
",",
"basestring",
")",
"and",
"do_reload",
".",
"isdigit",
"(",
")",
")",
")",
":",
"params",
"=",
"[",
"'spread'",
",",
"do_reload",
"]",
"else",
":",
"params",
"=",
"[",
"str",
"(",
"do_reload",
")",
"]",
"# Issue the reload command",
"if",
"debug",
":",
"cmd",
"=",
"[",
"'reload'",
"]",
"cmd",
".",
"extend",
"(",
"params",
")",
"print",
">>",
"sys",
".",
"stderr",
",",
"(",
"\"Issuing command: %s\"",
"%",
"' '",
".",
"join",
"(",
"str",
"(",
"c",
")",
"for",
"c",
"in",
"cmd",
")",
")",
"if",
"not",
"dry_run",
":",
"database",
".",
"command",
"(",
"db",
",",
"control_channel",
",",
"'reload'",
",",
"*",
"params",
")"
] | Set up or update limits in the Redis database.
:param conf_file: Name of the configuration file, for connecting
to the Redis database.
:param limits_file: Name of the XML file describing the limits to
configure.
:param do_reload: Controls reloading behavior. If True (the
default), a reload command is issued. If False,
no reload command is issued. String values
result in a reload command of the given load
type, and integer or float values result in a
reload command of type 'spread' with the given
spread interval.
:param dry_run: If True, no changes are made to the database.
Implies debug=True.
:param debug: If True, debugging messages are emitted while
loading the limits and updating the database. | [
"Set",
"up",
"or",
"update",
"limits",
"in",
"the",
"Redis",
"database",
"."
] | 8fe9a359b45e505d3192ab193ecf9be177ab1a17 | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/tools.py#L473-L555 | train |
klmitch/turnstile | turnstile/tools.py | make_limit_node | def make_limit_node(root, limit):
"""
Given a Limit object, generate an XML node.
:param root: The root node of the XML tree being built.
:param limit: The Limit object to serialize to XML.
"""
# Build the base limit node
limit_node = etree.SubElement(root, 'limit',
{'class': limit._limit_full_name})
# Walk through all the recognized attributes
for attr in sorted(limit.attrs):
desc = limit.attrs[attr]
attr_type = desc.get('type', str)
value = getattr(limit, attr)
# Determine the default value, if we have one...
if 'default' in desc:
default = (desc['default']() if callable(desc['default']) else
desc['default'])
# Skip attributes that have their default settings
if value == default:
continue
# Set up the attr node
attr_node = etree.SubElement(limit_node, 'attr', name=attr)
# Treat lists and dicts specially
if attr_type == list:
for val in value:
val_node = etree.SubElement(attr_node, 'value')
val_node.text = str(val)
elif attr_type == dict:
for key, val in sorted(value.items(), key=lambda x: x[0]):
val_node = etree.SubElement(attr_node, 'value', key=key)
val_node.text = str(val)
else:
attr_node.text = str(value) | python | def make_limit_node(root, limit):
"""
Given a Limit object, generate an XML node.
:param root: The root node of the XML tree being built.
:param limit: The Limit object to serialize to XML.
"""
# Build the base limit node
limit_node = etree.SubElement(root, 'limit',
{'class': limit._limit_full_name})
# Walk through all the recognized attributes
for attr in sorted(limit.attrs):
desc = limit.attrs[attr]
attr_type = desc.get('type', str)
value = getattr(limit, attr)
# Determine the default value, if we have one...
if 'default' in desc:
default = (desc['default']() if callable(desc['default']) else
desc['default'])
# Skip attributes that have their default settings
if value == default:
continue
# Set up the attr node
attr_node = etree.SubElement(limit_node, 'attr', name=attr)
# Treat lists and dicts specially
if attr_type == list:
for val in value:
val_node = etree.SubElement(attr_node, 'value')
val_node.text = str(val)
elif attr_type == dict:
for key, val in sorted(value.items(), key=lambda x: x[0]):
val_node = etree.SubElement(attr_node, 'value', key=key)
val_node.text = str(val)
else:
attr_node.text = str(value) | [
"def",
"make_limit_node",
"(",
"root",
",",
"limit",
")",
":",
"# Build the base limit node",
"limit_node",
"=",
"etree",
".",
"SubElement",
"(",
"root",
",",
"'limit'",
",",
"{",
"'class'",
":",
"limit",
".",
"_limit_full_name",
"}",
")",
"# Walk through all the recognized attributes",
"for",
"attr",
"in",
"sorted",
"(",
"limit",
".",
"attrs",
")",
":",
"desc",
"=",
"limit",
".",
"attrs",
"[",
"attr",
"]",
"attr_type",
"=",
"desc",
".",
"get",
"(",
"'type'",
",",
"str",
")",
"value",
"=",
"getattr",
"(",
"limit",
",",
"attr",
")",
"# Determine the default value, if we have one...",
"if",
"'default'",
"in",
"desc",
":",
"default",
"=",
"(",
"desc",
"[",
"'default'",
"]",
"(",
")",
"if",
"callable",
"(",
"desc",
"[",
"'default'",
"]",
")",
"else",
"desc",
"[",
"'default'",
"]",
")",
"# Skip attributes that have their default settings",
"if",
"value",
"==",
"default",
":",
"continue",
"# Set up the attr node",
"attr_node",
"=",
"etree",
".",
"SubElement",
"(",
"limit_node",
",",
"'attr'",
",",
"name",
"=",
"attr",
")",
"# Treat lists and dicts specially",
"if",
"attr_type",
"==",
"list",
":",
"for",
"val",
"in",
"value",
":",
"val_node",
"=",
"etree",
".",
"SubElement",
"(",
"attr_node",
",",
"'value'",
")",
"val_node",
".",
"text",
"=",
"str",
"(",
"val",
")",
"elif",
"attr_type",
"==",
"dict",
":",
"for",
"key",
",",
"val",
"in",
"sorted",
"(",
"value",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
":",
"val_node",
"=",
"etree",
".",
"SubElement",
"(",
"attr_node",
",",
"'value'",
",",
"key",
"=",
"key",
")",
"val_node",
".",
"text",
"=",
"str",
"(",
"val",
")",
"else",
":",
"attr_node",
".",
"text",
"=",
"str",
"(",
"value",
")"
] | Given a Limit object, generate an XML node.
:param root: The root node of the XML tree being built.
:param limit: The Limit object to serialize to XML. | [
"Given",
"a",
"Limit",
"object",
"generate",
"an",
"XML",
"node",
"."
] | 8fe9a359b45e505d3192ab193ecf9be177ab1a17 | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/tools.py#L562-L602 | train |
klmitch/turnstile | turnstile/tools.py | dump_limits | def dump_limits(conf_file, limits_file, debug=False):
"""
Dump the current limits from the Redis database.
:param conf_file: Name of the configuration file, for connecting
to the Redis database.
:param limits_file: Name of the XML file that the limits will be
dumped to. Use '-' to dump to stdout.
:param debug: If True, debugging messages are emitted while
dumping the limits.
"""
# Connect to the database...
conf = config.Config(conf_file=conf_file)
db = conf.get_database()
limits_key = conf['control'].get('limits_key', 'limits')
# Now, grab all the limits
lims = [limits.Limit.hydrate(db, msgpack.loads(lim))
for lim in db.zrange(limits_key, 0, -1)]
# Build up the limits tree
root = etree.Element('limits')
limit_tree = etree.ElementTree(root)
for idx, lim in enumerate(lims):
if debug:
print >>sys.stderr, "Dumping limit index %d: %r" % (idx, lim)
make_limit_node(root, lim)
# Write out the limits file
if limits_file == '-':
limits_file = sys.stdout
if debug:
print >>sys.stderr, "Dumping limits to file %r" % limits_file
limit_tree.write(limits_file, xml_declaration=True, encoding='UTF-8',
pretty_print=True) | python | def dump_limits(conf_file, limits_file, debug=False):
"""
Dump the current limits from the Redis database.
:param conf_file: Name of the configuration file, for connecting
to the Redis database.
:param limits_file: Name of the XML file that the limits will be
dumped to. Use '-' to dump to stdout.
:param debug: If True, debugging messages are emitted while
dumping the limits.
"""
# Connect to the database...
conf = config.Config(conf_file=conf_file)
db = conf.get_database()
limits_key = conf['control'].get('limits_key', 'limits')
# Now, grab all the limits
lims = [limits.Limit.hydrate(db, msgpack.loads(lim))
for lim in db.zrange(limits_key, 0, -1)]
# Build up the limits tree
root = etree.Element('limits')
limit_tree = etree.ElementTree(root)
for idx, lim in enumerate(lims):
if debug:
print >>sys.stderr, "Dumping limit index %d: %r" % (idx, lim)
make_limit_node(root, lim)
# Write out the limits file
if limits_file == '-':
limits_file = sys.stdout
if debug:
print >>sys.stderr, "Dumping limits to file %r" % limits_file
limit_tree.write(limits_file, xml_declaration=True, encoding='UTF-8',
pretty_print=True) | [
"def",
"dump_limits",
"(",
"conf_file",
",",
"limits_file",
",",
"debug",
"=",
"False",
")",
":",
"# Connect to the database...",
"conf",
"=",
"config",
".",
"Config",
"(",
"conf_file",
"=",
"conf_file",
")",
"db",
"=",
"conf",
".",
"get_database",
"(",
")",
"limits_key",
"=",
"conf",
"[",
"'control'",
"]",
".",
"get",
"(",
"'limits_key'",
",",
"'limits'",
")",
"# Now, grab all the limits",
"lims",
"=",
"[",
"limits",
".",
"Limit",
".",
"hydrate",
"(",
"db",
",",
"msgpack",
".",
"loads",
"(",
"lim",
")",
")",
"for",
"lim",
"in",
"db",
".",
"zrange",
"(",
"limits_key",
",",
"0",
",",
"-",
"1",
")",
"]",
"# Build up the limits tree",
"root",
"=",
"etree",
".",
"Element",
"(",
"'limits'",
")",
"limit_tree",
"=",
"etree",
".",
"ElementTree",
"(",
"root",
")",
"for",
"idx",
",",
"lim",
"in",
"enumerate",
"(",
"lims",
")",
":",
"if",
"debug",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"\"Dumping limit index %d: %r\"",
"%",
"(",
"idx",
",",
"lim",
")",
"make_limit_node",
"(",
"root",
",",
"lim",
")",
"# Write out the limits file",
"if",
"limits_file",
"==",
"'-'",
":",
"limits_file",
"=",
"sys",
".",
"stdout",
"if",
"debug",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"\"Dumping limits to file %r\"",
"%",
"limits_file",
"limit_tree",
".",
"write",
"(",
"limits_file",
",",
"xml_declaration",
"=",
"True",
",",
"encoding",
"=",
"'UTF-8'",
",",
"pretty_print",
"=",
"True",
")"
] | Dump the current limits from the Redis database.
:param conf_file: Name of the configuration file, for connecting
to the Redis database.
:param limits_file: Name of the XML file that the limits will be
dumped to. Use '-' to dump to stdout.
:param debug: If True, debugging messages are emitted while
dumping the limits. | [
"Dump",
"the",
"current",
"limits",
"from",
"the",
"Redis",
"database",
"."
] | 8fe9a359b45e505d3192ab193ecf9be177ab1a17 | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/tools.py#L617-L652 | train |
klmitch/turnstile | turnstile/tools.py | remote_daemon | def remote_daemon(conf_file):
"""
Run the external control daemon.
:param conf_file: Name of the configuration file.
"""
eventlet.monkey_patch()
conf = config.Config(conf_file=conf_file)
daemon = remote.RemoteControlDaemon(None, conf)
daemon.serve() | python | def remote_daemon(conf_file):
"""
Run the external control daemon.
:param conf_file: Name of the configuration file.
"""
eventlet.monkey_patch()
conf = config.Config(conf_file=conf_file)
daemon = remote.RemoteControlDaemon(None, conf)
daemon.serve() | [
"def",
"remote_daemon",
"(",
"conf_file",
")",
":",
"eventlet",
".",
"monkey_patch",
"(",
")",
"conf",
"=",
"config",
".",
"Config",
"(",
"conf_file",
"=",
"conf_file",
")",
"daemon",
"=",
"remote",
".",
"RemoteControlDaemon",
"(",
"None",
",",
"conf",
")",
"daemon",
".",
"serve",
"(",
")"
] | Run the external control daemon.
:param conf_file: Name of the configuration file. | [
"Run",
"the",
"external",
"control",
"daemon",
"."
] | 8fe9a359b45e505d3192ab193ecf9be177ab1a17 | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/tools.py#L673-L683 | train |
klmitch/turnstile | turnstile/tools.py | turnstile_command | def turnstile_command(conf_file, command, arguments=[], channel=None,
debug=False):
"""
Issue a command to all running control daemons.
:param conf_file: Name of the configuration file.
:param command: The command to execute. Note that 'ping' is
handled specially; in particular, the "channel"
parameter is implied. (A random value will be
used for the channel to listen on.)
:param arguments: A list of arguments for the command. Note that
the colon character (':') cannot be used.
:param channel: If not None, specifies the name of a message
channel to listen for responses on. Will wait
indefinitely; to terminate the listening loop, use
the keyboard interrupt sequence.
:param debug: If True, debugging messages are emitted while
sending the command.
"""
# Connect to the database...
conf = config.Config(conf_file=conf_file)
db = conf.get_database()
control_channel = conf['control'].get('channel', 'control')
# Now, set up the command
command = command.lower()
ts_conv = False
if command == 'ping':
# We handle 'ping' specially; first, figure out the channel
if arguments:
channel = arguments[0]
else:
channel = str(uuid.uuid4())
arguments = [channel]
# Next, add on a timestamp
if len(arguments) < 2:
arguments.append(time.time())
ts_conv = True
# Limit the argument list length
arguments = arguments[:2]
# OK, the command is all set up. Let us now send the command...
if debug:
cmd = [command] + arguments
print >>sys.stderr, ("Issuing command: %s" %
' '.join(cmd))
database.command(db, control_channel, command, *arguments)
# Were we asked to listen on a channel?
if not channel:
return
# OK, let's subscribe to the channel...
pubsub = db.pubsub()
pubsub.subscribe(channel)
# Now we listen...
try:
count = 0
for msg in pubsub.listen():
# Make sure the message is one we're interested in
if debug:
formatted = pprint.pformat(msg)
print >>sys.stderr, "Received message: %s" % formatted
if (msg['type'] not in ('pmessage', 'message') or
msg['channel'] != channel):
continue
count += 1
# Figure out the response
response = msg['data'].split(':')
# If this is a 'pong' and ts_conv is true, add an RTT to
# the response
if ts_conv and response[0] == 'pong':
try:
rtt = (time.time() - float(response[2])) * 100
response.append('(RTT %.2fms)' % rtt)
except Exception:
# IndexError or ValueError, probably; ignore it
pass
# Print out the response
print "Response % 5d: %s" % (count, ' '.join(response))
except KeyboardInterrupt:
# We want to break out of the loop, but not return any error
# to the caller...
pass | python | def turnstile_command(conf_file, command, arguments=[], channel=None,
debug=False):
"""
Issue a command to all running control daemons.
:param conf_file: Name of the configuration file.
:param command: The command to execute. Note that 'ping' is
handled specially; in particular, the "channel"
parameter is implied. (A random value will be
used for the channel to listen on.)
:param arguments: A list of arguments for the command. Note that
the colon character (':') cannot be used.
:param channel: If not None, specifies the name of a message
channel to listen for responses on. Will wait
indefinitely; to terminate the listening loop, use
the keyboard interrupt sequence.
:param debug: If True, debugging messages are emitted while
sending the command.
"""
# Connect to the database...
conf = config.Config(conf_file=conf_file)
db = conf.get_database()
control_channel = conf['control'].get('channel', 'control')
# Now, set up the command
command = command.lower()
ts_conv = False
if command == 'ping':
# We handle 'ping' specially; first, figure out the channel
if arguments:
channel = arguments[0]
else:
channel = str(uuid.uuid4())
arguments = [channel]
# Next, add on a timestamp
if len(arguments) < 2:
arguments.append(time.time())
ts_conv = True
# Limit the argument list length
arguments = arguments[:2]
# OK, the command is all set up. Let us now send the command...
if debug:
cmd = [command] + arguments
print >>sys.stderr, ("Issuing command: %s" %
' '.join(cmd))
database.command(db, control_channel, command, *arguments)
# Were we asked to listen on a channel?
if not channel:
return
# OK, let's subscribe to the channel...
pubsub = db.pubsub()
pubsub.subscribe(channel)
# Now we listen...
try:
count = 0
for msg in pubsub.listen():
# Make sure the message is one we're interested in
if debug:
formatted = pprint.pformat(msg)
print >>sys.stderr, "Received message: %s" % formatted
if (msg['type'] not in ('pmessage', 'message') or
msg['channel'] != channel):
continue
count += 1
# Figure out the response
response = msg['data'].split(':')
# If this is a 'pong' and ts_conv is true, add an RTT to
# the response
if ts_conv and response[0] == 'pong':
try:
rtt = (time.time() - float(response[2])) * 100
response.append('(RTT %.2fms)' % rtt)
except Exception:
# IndexError or ValueError, probably; ignore it
pass
# Print out the response
print "Response % 5d: %s" % (count, ' '.join(response))
except KeyboardInterrupt:
# We want to break out of the loop, but not return any error
# to the caller...
pass | [
"def",
"turnstile_command",
"(",
"conf_file",
",",
"command",
",",
"arguments",
"=",
"[",
"]",
",",
"channel",
"=",
"None",
",",
"debug",
"=",
"False",
")",
":",
"# Connect to the database...",
"conf",
"=",
"config",
".",
"Config",
"(",
"conf_file",
"=",
"conf_file",
")",
"db",
"=",
"conf",
".",
"get_database",
"(",
")",
"control_channel",
"=",
"conf",
"[",
"'control'",
"]",
".",
"get",
"(",
"'channel'",
",",
"'control'",
")",
"# Now, set up the command",
"command",
"=",
"command",
".",
"lower",
"(",
")",
"ts_conv",
"=",
"False",
"if",
"command",
"==",
"'ping'",
":",
"# We handle 'ping' specially; first, figure out the channel",
"if",
"arguments",
":",
"channel",
"=",
"arguments",
"[",
"0",
"]",
"else",
":",
"channel",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"arguments",
"=",
"[",
"channel",
"]",
"# Next, add on a timestamp",
"if",
"len",
"(",
"arguments",
")",
"<",
"2",
":",
"arguments",
".",
"append",
"(",
"time",
".",
"time",
"(",
")",
")",
"ts_conv",
"=",
"True",
"# Limit the argument list length",
"arguments",
"=",
"arguments",
"[",
":",
"2",
"]",
"# OK, the command is all set up. Let us now send the command...",
"if",
"debug",
":",
"cmd",
"=",
"[",
"command",
"]",
"+",
"arguments",
"print",
">>",
"sys",
".",
"stderr",
",",
"(",
"\"Issuing command: %s\"",
"%",
"' '",
".",
"join",
"(",
"cmd",
")",
")",
"database",
".",
"command",
"(",
"db",
",",
"control_channel",
",",
"command",
",",
"*",
"arguments",
")",
"# Were we asked to listen on a channel?",
"if",
"not",
"channel",
":",
"return",
"# OK, let's subscribe to the channel...",
"pubsub",
"=",
"db",
".",
"pubsub",
"(",
")",
"pubsub",
".",
"subscribe",
"(",
"channel",
")",
"# Now we listen...",
"try",
":",
"count",
"=",
"0",
"for",
"msg",
"in",
"pubsub",
".",
"listen",
"(",
")",
":",
"# Make sure the message is one we're interested in",
"if",
"debug",
":",
"formatted",
"=",
"pprint",
".",
"pformat",
"(",
"msg",
")",
"print",
">>",
"sys",
".",
"stderr",
",",
"\"Received message: %s\"",
"%",
"formatted",
"if",
"(",
"msg",
"[",
"'type'",
"]",
"not",
"in",
"(",
"'pmessage'",
",",
"'message'",
")",
"or",
"msg",
"[",
"'channel'",
"]",
"!=",
"channel",
")",
":",
"continue",
"count",
"+=",
"1",
"# Figure out the response",
"response",
"=",
"msg",
"[",
"'data'",
"]",
".",
"split",
"(",
"':'",
")",
"# If this is a 'pong' and ts_conv is true, add an RTT to",
"# the response",
"if",
"ts_conv",
"and",
"response",
"[",
"0",
"]",
"==",
"'pong'",
":",
"try",
":",
"rtt",
"=",
"(",
"time",
".",
"time",
"(",
")",
"-",
"float",
"(",
"response",
"[",
"2",
"]",
")",
")",
"*",
"100",
"response",
".",
"append",
"(",
"'(RTT %.2fms)'",
"%",
"rtt",
")",
"except",
"Exception",
":",
"# IndexError or ValueError, probably; ignore it",
"pass",
"# Print out the response",
"print",
"\"Response % 5d: %s\"",
"%",
"(",
"count",
",",
"' '",
".",
"join",
"(",
"response",
")",
")",
"except",
"KeyboardInterrupt",
":",
"# We want to break out of the loop, but not return any error",
"# to the caller...",
"pass"
] | Issue a command to all running control daemons.
:param conf_file: Name of the configuration file.
:param command: The command to execute. Note that 'ping' is
handled specially; in particular, the "channel"
parameter is implied. (A random value will be
used for the channel to listen on.)
:param arguments: A list of arguments for the command. Note that
the colon character (':') cannot be used.
:param channel: If not None, specifies the name of a message
channel to listen for responses on. Will wait
indefinitely; to terminate the listening loop, use
the keyboard interrupt sequence.
:param debug: If True, debugging messages are emitted while
sending the command. | [
"Issue",
"a",
"command",
"to",
"all",
"running",
"control",
"daemons",
"."
] | 8fe9a359b45e505d3192ab193ecf9be177ab1a17 | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/tools.py#L712-L803 | train |
klmitch/turnstile | turnstile/tools.py | compactor_daemon | def compactor_daemon(conf_file):
"""
Run the compactor daemon.
:param conf_file: Name of the configuration file.
"""
eventlet.monkey_patch()
conf = config.Config(conf_file=conf_file)
compactor.compactor(conf) | python | def compactor_daemon(conf_file):
"""
Run the compactor daemon.
:param conf_file: Name of the configuration file.
"""
eventlet.monkey_patch()
conf = config.Config(conf_file=conf_file)
compactor.compactor(conf) | [
"def",
"compactor_daemon",
"(",
"conf_file",
")",
":",
"eventlet",
".",
"monkey_patch",
"(",
")",
"conf",
"=",
"config",
".",
"Config",
"(",
"conf_file",
"=",
"conf_file",
")",
"compactor",
".",
"compactor",
"(",
"conf",
")"
] | Run the compactor daemon.
:param conf_file: Name of the configuration file. | [
"Run",
"the",
"compactor",
"daemon",
"."
] | 8fe9a359b45e505d3192ab193ecf9be177ab1a17 | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/tools.py#L820-L829 | train |
klmitch/turnstile | turnstile/tools.py | ScriptAdaptor._wrap | def _wrap(cls, func):
"""
Ensures that the function is wrapped in a ScriptAdaptor
object. If it is not, a new ScriptAdaptor will be returned.
If it is, the ScriptAdaptor is returned.
:param func: The function to be wrapped.
"""
if isinstance(func, cls):
return func
return functools.update_wrapper(cls(func), func) | python | def _wrap(cls, func):
"""
Ensures that the function is wrapped in a ScriptAdaptor
object. If it is not, a new ScriptAdaptor will be returned.
If it is, the ScriptAdaptor is returned.
:param func: The function to be wrapped.
"""
if isinstance(func, cls):
return func
return functools.update_wrapper(cls(func), func) | [
"def",
"_wrap",
"(",
"cls",
",",
"func",
")",
":",
"if",
"isinstance",
"(",
"func",
",",
"cls",
")",
":",
"return",
"func",
"return",
"functools",
".",
"update_wrapper",
"(",
"cls",
"(",
"func",
")",
",",
"func",
")"
] | Ensures that the function is wrapped in a ScriptAdaptor
object. If it is not, a new ScriptAdaptor will be returned.
If it is, the ScriptAdaptor is returned.
:param func: The function to be wrapped. | [
"Ensures",
"that",
"the",
"function",
"is",
"wrapped",
"in",
"a",
"ScriptAdaptor",
"object",
".",
"If",
"it",
"is",
"not",
"a",
"new",
"ScriptAdaptor",
"will",
"be",
"returned",
".",
"If",
"it",
"is",
"the",
"ScriptAdaptor",
"is",
"returned",
"."
] | 8fe9a359b45e505d3192ab193ecf9be177ab1a17 | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/tools.py#L50-L61 | train |
klmitch/turnstile | turnstile/tools.py | ScriptAdaptor.setup_args | def setup_args(self, parser):
"""
Set up an argparse.ArgumentParser object by adding all the
arguments taken by the function.
"""
# Add all the arguments to the argument parser
for args, kwargs in self._arguments:
parser.add_argument(*args, **kwargs) | python | def setup_args(self, parser):
"""
Set up an argparse.ArgumentParser object by adding all the
arguments taken by the function.
"""
# Add all the arguments to the argument parser
for args, kwargs in self._arguments:
parser.add_argument(*args, **kwargs) | [
"def",
"setup_args",
"(",
"self",
",",
"parser",
")",
":",
"# Add all the arguments to the argument parser",
"for",
"args",
",",
"kwargs",
"in",
"self",
".",
"_arguments",
":",
"parser",
".",
"add_argument",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Set up an argparse.ArgumentParser object by adding all the
arguments taken by the function. | [
"Set",
"up",
"an",
"argparse",
".",
"ArgumentParser",
"object",
"by",
"adding",
"all",
"the",
"arguments",
"taken",
"by",
"the",
"function",
"."
] | 8fe9a359b45e505d3192ab193ecf9be177ab1a17 | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/tools.py#L131-L139 | train |
klmitch/turnstile | turnstile/tools.py | ScriptAdaptor.get_kwargs | def get_kwargs(self, args):
"""
Given a Namespace object drawn from argparse, determines the
keyword arguments to pass to the underlying function. Note
that, if the underlying function accepts all keyword
arguments, the dictionary returned will contain the entire
contents of the Namespace object. Also note that an
AttributeError will be raised if any argument required by the
function is not set in the Namespace object.
:param args: A Namespace object from argparse.
"""
# Now we need to figure out which arguments the final function
# actually needs
kwargs = {}
argspec = inspect.getargspec(self._func)
required = set(argspec.args[:-len(argspec.defaults)]
if argspec.defaults else argspec.args)
for arg_name in argspec.args:
try:
kwargs[arg_name] = getattr(args, arg_name)
except AttributeError:
if arg_name in required:
# If this happens, that's a programming failure
raise
# If the function accepts any keyword argument, add whatever
# remains
if argspec.keywords:
for key, value in args.__dict__.items():
if key in kwargs:
# Already handled
continue
kwargs[key] = value
return kwargs | python | def get_kwargs(self, args):
"""
Given a Namespace object drawn from argparse, determines the
keyword arguments to pass to the underlying function. Note
that, if the underlying function accepts all keyword
arguments, the dictionary returned will contain the entire
contents of the Namespace object. Also note that an
AttributeError will be raised if any argument required by the
function is not set in the Namespace object.
:param args: A Namespace object from argparse.
"""
# Now we need to figure out which arguments the final function
# actually needs
kwargs = {}
argspec = inspect.getargspec(self._func)
required = set(argspec.args[:-len(argspec.defaults)]
if argspec.defaults else argspec.args)
for arg_name in argspec.args:
try:
kwargs[arg_name] = getattr(args, arg_name)
except AttributeError:
if arg_name in required:
# If this happens, that's a programming failure
raise
# If the function accepts any keyword argument, add whatever
# remains
if argspec.keywords:
for key, value in args.__dict__.items():
if key in kwargs:
# Already handled
continue
kwargs[key] = value
return kwargs | [
"def",
"get_kwargs",
"(",
"self",
",",
"args",
")",
":",
"# Now we need to figure out which arguments the final function",
"# actually needs",
"kwargs",
"=",
"{",
"}",
"argspec",
"=",
"inspect",
".",
"getargspec",
"(",
"self",
".",
"_func",
")",
"required",
"=",
"set",
"(",
"argspec",
".",
"args",
"[",
":",
"-",
"len",
"(",
"argspec",
".",
"defaults",
")",
"]",
"if",
"argspec",
".",
"defaults",
"else",
"argspec",
".",
"args",
")",
"for",
"arg_name",
"in",
"argspec",
".",
"args",
":",
"try",
":",
"kwargs",
"[",
"arg_name",
"]",
"=",
"getattr",
"(",
"args",
",",
"arg_name",
")",
"except",
"AttributeError",
":",
"if",
"arg_name",
"in",
"required",
":",
"# If this happens, that's a programming failure",
"raise",
"# If the function accepts any keyword argument, add whatever",
"# remains",
"if",
"argspec",
".",
"keywords",
":",
"for",
"key",
",",
"value",
"in",
"args",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"if",
"key",
"in",
"kwargs",
":",
"# Already handled",
"continue",
"kwargs",
"[",
"key",
"]",
"=",
"value",
"return",
"kwargs"
] | Given a Namespace object drawn from argparse, determines the
keyword arguments to pass to the underlying function. Note
that, if the underlying function accepts all keyword
arguments, the dictionary returned will contain the entire
contents of the Namespace object. Also note that an
AttributeError will be raised if any argument required by the
function is not set in the Namespace object.
:param args: A Namespace object from argparse. | [
"Given",
"a",
"Namespace",
"object",
"drawn",
"from",
"argparse",
"determines",
"the",
"keyword",
"arguments",
"to",
"pass",
"to",
"the",
"underlying",
"function",
".",
"Note",
"that",
"if",
"the",
"underlying",
"function",
"accepts",
"all",
"keyword",
"arguments",
"the",
"dictionary",
"returned",
"will",
"contain",
"the",
"entire",
"contents",
"of",
"the",
"Namespace",
"object",
".",
"Also",
"note",
"that",
"an",
"AttributeError",
"will",
"be",
"raised",
"if",
"any",
"argument",
"required",
"by",
"the",
"function",
"is",
"not",
"set",
"in",
"the",
"Namespace",
"object",
"."
] | 8fe9a359b45e505d3192ab193ecf9be177ab1a17 | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/tools.py#L141-L177 | train |
klmitch/turnstile | turnstile/tools.py | ScriptAdaptor.console | def console(self):
"""
Call the function as a console script. Command line arguments
are parsed, preprocessors are called, then the function is
called. If a 'debug' attribute is set by the command line
arguments, and it is True, any exception raised by the
underlying function will be reraised; otherwise, the return
value will be either the return value of the function or the
text contents of the exception.
"""
# First, let's parse the arguments
parser = argparse.ArgumentParser(description=self.description)
self.setup_args(parser)
args = parser.parse_args()
# Next, let's run the preprocessors in order
for proc in self._preprocess:
try:
proc(args)
except Exception as exc:
if getattr(args, 'debug', False):
raise
return str(exc)
# Finally, safely call the underlying function
result = self.safe_call(self.get_kwargs(args), args)
# Now, run the postprocessors in order
for proc in self._postprocess:
result = proc(args, result)
return result | python | def console(self):
"""
Call the function as a console script. Command line arguments
are parsed, preprocessors are called, then the function is
called. If a 'debug' attribute is set by the command line
arguments, and it is True, any exception raised by the
underlying function will be reraised; otherwise, the return
value will be either the return value of the function or the
text contents of the exception.
"""
# First, let's parse the arguments
parser = argparse.ArgumentParser(description=self.description)
self.setup_args(parser)
args = parser.parse_args()
# Next, let's run the preprocessors in order
for proc in self._preprocess:
try:
proc(args)
except Exception as exc:
if getattr(args, 'debug', False):
raise
return str(exc)
# Finally, safely call the underlying function
result = self.safe_call(self.get_kwargs(args), args)
# Now, run the postprocessors in order
for proc in self._postprocess:
result = proc(args, result)
return result | [
"def",
"console",
"(",
"self",
")",
":",
"# First, let's parse the arguments",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"self",
".",
"description",
")",
"self",
".",
"setup_args",
"(",
"parser",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"# Next, let's run the preprocessors in order",
"for",
"proc",
"in",
"self",
".",
"_preprocess",
":",
"try",
":",
"proc",
"(",
"args",
")",
"except",
"Exception",
"as",
"exc",
":",
"if",
"getattr",
"(",
"args",
",",
"'debug'",
",",
"False",
")",
":",
"raise",
"return",
"str",
"(",
"exc",
")",
"# Finally, safely call the underlying function",
"result",
"=",
"self",
".",
"safe_call",
"(",
"self",
".",
"get_kwargs",
"(",
"args",
")",
",",
"args",
")",
"# Now, run the postprocessors in order",
"for",
"proc",
"in",
"self",
".",
"_postprocess",
":",
"result",
"=",
"proc",
"(",
"args",
",",
"result",
")",
"return",
"result"
] | Call the function as a console script. Command line arguments
are parsed, preprocessors are called, then the function is
called. If a 'debug' attribute is set by the command line
arguments, and it is True, any exception raised by the
underlying function will be reraised; otherwise, the return
value will be either the return value of the function or the
text contents of the exception. | [
"Call",
"the",
"function",
"as",
"a",
"console",
"script",
".",
"Command",
"line",
"arguments",
"are",
"parsed",
"preprocessors",
"are",
"called",
"then",
"the",
"function",
"is",
"called",
".",
"If",
"a",
"debug",
"attribute",
"is",
"set",
"by",
"the",
"command",
"line",
"arguments",
"and",
"it",
"is",
"True",
"any",
"exception",
"raised",
"by",
"the",
"underlying",
"function",
"will",
"be",
"reraised",
";",
"otherwise",
"the",
"return",
"value",
"will",
"be",
"either",
"the",
"return",
"value",
"of",
"the",
"function",
"or",
"the",
"text",
"contents",
"of",
"the",
"exception",
"."
] | 8fe9a359b45e505d3192ab193ecf9be177ab1a17 | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/tools.py#L206-L238 | train |
BernardFW/bernard | src/bernard/utils.py | import_class | def import_class(name: Text) -> Type:
"""
Import a class based on its full name.
:param name: name of the class
"""
parts = name.split('.')
module_name = parts[:-1]
class_name = parts[-1]
module_ = importlib.import_module('.'.join(module_name))
return getattr(module_, class_name) | python | def import_class(name: Text) -> Type:
"""
Import a class based on its full name.
:param name: name of the class
"""
parts = name.split('.')
module_name = parts[:-1]
class_name = parts[-1]
module_ = importlib.import_module('.'.join(module_name))
return getattr(module_, class_name) | [
"def",
"import_class",
"(",
"name",
":",
"Text",
")",
"->",
"Type",
":",
"parts",
"=",
"name",
".",
"split",
"(",
"'.'",
")",
"module_name",
"=",
"parts",
"[",
":",
"-",
"1",
"]",
"class_name",
"=",
"parts",
"[",
"-",
"1",
"]",
"module_",
"=",
"importlib",
".",
"import_module",
"(",
"'.'",
".",
"join",
"(",
"module_name",
")",
")",
"return",
"getattr",
"(",
"module_",
",",
"class_name",
")"
] | Import a class based on its full name.
:param name: name of the class | [
"Import",
"a",
"class",
"based",
"on",
"its",
"full",
"name",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/utils.py#L34-L45 | train |
BernardFW/bernard | src/bernard/utils.py | make_ro | def make_ro(obj: Any, forgive_type=False):
"""
Make a json-serializable type recursively read-only
:param obj: Any json-serializable type
:param forgive_type: If you can forgive a type to be unknown (instead of
raising an exception)
"""
if isinstance(obj, (str, bytes, int, float, bool, RoDict, RoList)) \
or obj is None:
return obj
elif isinstance(obj, Mapping):
return RoDict(obj, forgive_type)
elif isinstance(obj, Sequence):
return RoList(obj, forgive_type)
elif forgive_type:
return obj
else:
raise ValueError('Trying to make read-only an object of type "{}"'
.format(obj.__class__.__name__)) | python | def make_ro(obj: Any, forgive_type=False):
"""
Make a json-serializable type recursively read-only
:param obj: Any json-serializable type
:param forgive_type: If you can forgive a type to be unknown (instead of
raising an exception)
"""
if isinstance(obj, (str, bytes, int, float, bool, RoDict, RoList)) \
or obj is None:
return obj
elif isinstance(obj, Mapping):
return RoDict(obj, forgive_type)
elif isinstance(obj, Sequence):
return RoList(obj, forgive_type)
elif forgive_type:
return obj
else:
raise ValueError('Trying to make read-only an object of type "{}"'
.format(obj.__class__.__name__)) | [
"def",
"make_ro",
"(",
"obj",
":",
"Any",
",",
"forgive_type",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"str",
",",
"bytes",
",",
"int",
",",
"float",
",",
"bool",
",",
"RoDict",
",",
"RoList",
")",
")",
"or",
"obj",
"is",
"None",
":",
"return",
"obj",
"elif",
"isinstance",
"(",
"obj",
",",
"Mapping",
")",
":",
"return",
"RoDict",
"(",
"obj",
",",
"forgive_type",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"Sequence",
")",
":",
"return",
"RoList",
"(",
"obj",
",",
"forgive_type",
")",
"elif",
"forgive_type",
":",
"return",
"obj",
"else",
":",
"raise",
"ValueError",
"(",
"'Trying to make read-only an object of type \"{}\"'",
".",
"format",
"(",
"obj",
".",
"__class__",
".",
"__name__",
")",
")"
] | Make a json-serializable type recursively read-only
:param obj: Any json-serializable type
:param forgive_type: If you can forgive a type to be unknown (instead of
raising an exception) | [
"Make",
"a",
"json",
"-",
"serializable",
"type",
"recursively",
"read",
"-",
"only"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/utils.py#L127-L147 | train |
BernardFW/bernard | src/bernard/utils.py | make_rw | def make_rw(obj: Any):
"""
Copy a RO object into a RW structure made with standard Python classes.
WARNING there is no protection against recursion.
"""
if isinstance(obj, RoDict):
return {k: make_rw(v) for k, v in obj.items()}
elif isinstance(obj, RoList):
return [make_rw(x) for x in obj]
else:
return obj | python | def make_rw(obj: Any):
"""
Copy a RO object into a RW structure made with standard Python classes.
WARNING there is no protection against recursion.
"""
if isinstance(obj, RoDict):
return {k: make_rw(v) for k, v in obj.items()}
elif isinstance(obj, RoList):
return [make_rw(x) for x in obj]
else:
return obj | [
"def",
"make_rw",
"(",
"obj",
":",
"Any",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"RoDict",
")",
":",
"return",
"{",
"k",
":",
"make_rw",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"obj",
".",
"items",
"(",
")",
"}",
"elif",
"isinstance",
"(",
"obj",
",",
"RoList",
")",
":",
"return",
"[",
"make_rw",
"(",
"x",
")",
"for",
"x",
"in",
"obj",
"]",
"else",
":",
"return",
"obj"
] | Copy a RO object into a RW structure made with standard Python classes.
WARNING there is no protection against recursion. | [
"Copy",
"a",
"RO",
"object",
"into",
"a",
"RW",
"structure",
"made",
"with",
"standard",
"Python",
"classes",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/utils.py#L150-L162 | train |
BernardFW/bernard | src/bernard/utils.py | patch_qs | def patch_qs(url: Text, data: Dict[Text, Text]) -> Text:
"""
Given an URL, change the query string to include the values specified in
the dictionary.
If the keys of the dictionary can be found in the query string of the URL,
then they will be removed.
It is guaranteed that all other values of the query string will keep their
order.
"""
qs_id = 4
p = list(urlparse(url))
qs = parse_qsl(p[qs_id]) # type: List[Tuple[Text, Text]]
patched_qs = list(chain(
filter(lambda x: x[0] not in data, qs),
data.items(),
))
p[qs_id] = urlencode(patched_qs)
return urlunparse(p) | python | def patch_qs(url: Text, data: Dict[Text, Text]) -> Text:
"""
Given an URL, change the query string to include the values specified in
the dictionary.
If the keys of the dictionary can be found in the query string of the URL,
then they will be removed.
It is guaranteed that all other values of the query string will keep their
order.
"""
qs_id = 4
p = list(urlparse(url))
qs = parse_qsl(p[qs_id]) # type: List[Tuple[Text, Text]]
patched_qs = list(chain(
filter(lambda x: x[0] not in data, qs),
data.items(),
))
p[qs_id] = urlencode(patched_qs)
return urlunparse(p) | [
"def",
"patch_qs",
"(",
"url",
":",
"Text",
",",
"data",
":",
"Dict",
"[",
"Text",
",",
"Text",
"]",
")",
"->",
"Text",
":",
"qs_id",
"=",
"4",
"p",
"=",
"list",
"(",
"urlparse",
"(",
"url",
")",
")",
"qs",
"=",
"parse_qsl",
"(",
"p",
"[",
"qs_id",
"]",
")",
"# type: List[Tuple[Text, Text]]",
"patched_qs",
"=",
"list",
"(",
"chain",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
"not",
"in",
"data",
",",
"qs",
")",
",",
"data",
".",
"items",
"(",
")",
",",
")",
")",
"p",
"[",
"qs_id",
"]",
"=",
"urlencode",
"(",
"patched_qs",
")",
"return",
"urlunparse",
"(",
"p",
")"
] | Given an URL, change the query string to include the values specified in
the dictionary.
If the keys of the dictionary can be found in the query string of the URL,
then they will be removed.
It is guaranteed that all other values of the query string will keep their
order. | [
"Given",
"an",
"URL",
"change",
"the",
"query",
"string",
"to",
"include",
"the",
"values",
"specified",
"in",
"the",
"dictionary",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/utils.py#L203-L225 | train |
BernardFW/bernard | src/bernard/utils.py | dict_is_subset | def dict_is_subset(subset: Any, full_set: Any) -> bool:
"""
Checks that all keys present in `subset` are present and have the same
value in `full_set`. If a key is in `full_set` but not in `subset` then
True will be returned anyways.
"""
if not isinstance(subset, full_set.__class__):
return False
elif isinstance(subset, dict):
for k, v in subset.items():
if k not in full_set or not dict_is_subset(v, full_set[k]):
return False
return True
elif isinstance(subset, list):
if len(subset) != len(full_set):
return False
for a, b in zip(subset, full_set):
if not dict_is_subset(a, b):
return False
return True
else:
return subset == full_set | python | def dict_is_subset(subset: Any, full_set: Any) -> bool:
"""
Checks that all keys present in `subset` are present and have the same
value in `full_set`. If a key is in `full_set` but not in `subset` then
True will be returned anyways.
"""
if not isinstance(subset, full_set.__class__):
return False
elif isinstance(subset, dict):
for k, v in subset.items():
if k not in full_set or not dict_is_subset(v, full_set[k]):
return False
return True
elif isinstance(subset, list):
if len(subset) != len(full_set):
return False
for a, b in zip(subset, full_set):
if not dict_is_subset(a, b):
return False
return True
else:
return subset == full_set | [
"def",
"dict_is_subset",
"(",
"subset",
":",
"Any",
",",
"full_set",
":",
"Any",
")",
"->",
"bool",
":",
"if",
"not",
"isinstance",
"(",
"subset",
",",
"full_set",
".",
"__class__",
")",
":",
"return",
"False",
"elif",
"isinstance",
"(",
"subset",
",",
"dict",
")",
":",
"for",
"k",
",",
"v",
"in",
"subset",
".",
"items",
"(",
")",
":",
"if",
"k",
"not",
"in",
"full_set",
"or",
"not",
"dict_is_subset",
"(",
"v",
",",
"full_set",
"[",
"k",
"]",
")",
":",
"return",
"False",
"return",
"True",
"elif",
"isinstance",
"(",
"subset",
",",
"list",
")",
":",
"if",
"len",
"(",
"subset",
")",
"!=",
"len",
"(",
"full_set",
")",
":",
"return",
"False",
"for",
"a",
",",
"b",
"in",
"zip",
"(",
"subset",
",",
"full_set",
")",
":",
"if",
"not",
"dict_is_subset",
"(",
"a",
",",
"b",
")",
":",
"return",
"False",
"return",
"True",
"else",
":",
"return",
"subset",
"==",
"full_set"
] | Checks that all keys present in `subset` are present and have the same
value in `full_set`. If a key is in `full_set` but not in `subset` then
True will be returned anyways. | [
"Checks",
"that",
"all",
"keys",
"present",
"in",
"subset",
"are",
"present",
"and",
"have",
"the",
"same",
"value",
"in",
"full_set",
".",
"If",
"a",
"key",
"is",
"in",
"full_set",
"but",
"not",
"in",
"subset",
"then",
"True",
"will",
"be",
"returned",
"anyways",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/utils.py#L234-L259 | train |
BernardFW/bernard | src/bernard/utils.py | ClassExp._compile | def _compile(self, expression):
"""
Transform a class exp into an actual regex
"""
x = self.RE_PYTHON_VAR.sub('(?:\\1,)', expression)
x = self.RE_SPACES.sub('', x)
return re.compile(x) | python | def _compile(self, expression):
"""
Transform a class exp into an actual regex
"""
x = self.RE_PYTHON_VAR.sub('(?:\\1,)', expression)
x = self.RE_SPACES.sub('', x)
return re.compile(x) | [
"def",
"_compile",
"(",
"self",
",",
"expression",
")",
":",
"x",
"=",
"self",
".",
"RE_PYTHON_VAR",
".",
"sub",
"(",
"'(?:\\\\1,)'",
",",
"expression",
")",
"x",
"=",
"self",
".",
"RE_SPACES",
".",
"sub",
"(",
"''",
",",
"x",
")",
"return",
"re",
".",
"compile",
"(",
"x",
")"
] | Transform a class exp into an actual regex | [
"Transform",
"a",
"class",
"exp",
"into",
"an",
"actual",
"regex"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/utils.py#L177-L184 | train |
BernardFW/bernard | src/bernard/utils.py | ClassExp._make_string | def _make_string(self, objects: List[Any]) -> Text:
"""
Transforms a list of objects into a matchable string
"""
return ''.join(x.__class__.__name__ + ',' for x in objects) | python | def _make_string(self, objects: List[Any]) -> Text:
"""
Transforms a list of objects into a matchable string
"""
return ''.join(x.__class__.__name__ + ',' for x in objects) | [
"def",
"_make_string",
"(",
"self",
",",
"objects",
":",
"List",
"[",
"Any",
"]",
")",
"->",
"Text",
":",
"return",
"''",
".",
"join",
"(",
"x",
".",
"__class__",
".",
"__name__",
"+",
"','",
"for",
"x",
"in",
"objects",
")"
] | Transforms a list of objects into a matchable string | [
"Transforms",
"a",
"list",
"of",
"objects",
"into",
"a",
"matchable",
"string"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/utils.py#L186-L191 | train |
BernardFW/bernard | src/bernard/utils.py | ClassExp.match | def match(self, objects: List[Any]) -> bool:
"""
Return True if the list of objects matches the expression.
"""
s = self._make_string(objects)
m = self._compiled_expression.match(s)
return m is not None | python | def match(self, objects: List[Any]) -> bool:
"""
Return True if the list of objects matches the expression.
"""
s = self._make_string(objects)
m = self._compiled_expression.match(s)
return m is not None | [
"def",
"match",
"(",
"self",
",",
"objects",
":",
"List",
"[",
"Any",
"]",
")",
"->",
"bool",
":",
"s",
"=",
"self",
".",
"_make_string",
"(",
"objects",
")",
"m",
"=",
"self",
".",
"_compiled_expression",
".",
"match",
"(",
"s",
")",
"return",
"m",
"is",
"not",
"None"
] | Return True if the list of objects matches the expression. | [
"Return",
"True",
"if",
"the",
"list",
"of",
"objects",
"matches",
"the",
"expression",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/utils.py#L193-L200 | train |
jstitch/MambuPy | MambuPy/mambuconfig.py | get_conf | def get_conf(conf, sect, opt):
""" Gets a config 'opt' from 'conf' file, under section 'sect'.
If no 'opt' exists under 'sect', it looks for option on the default_configs
dictionary
If there exists an environmental variable named MAMBUPY_{upper_case_opt},
it overrides whatever the conf files or default_configs dict says.
But if you send a command line argument named mambupy_{lower_case_opt},
it overrides anything else.
Args:
conf (ConfigParser): ConfigParser that reads from certain config file (INI
format)
sect (string): section under the config file
opt (string): option to read
Returns:
string: configuration option. If not found on conf, returns a value from
default_configs dict. If environmental variable exists with name
MAMBUPY_{upper_case_opt} it overrides anything else
"""
argu = getattr(args, "mambupy_"+opt.lower())
if not argu:
envir = os.environ.get("MAMBUPY_"+opt.upper())
if not envir:
try:
return conf.get(sect,opt)
except NoSectionError:
return default_configs[opt]
return envir
return argu | python | def get_conf(conf, sect, opt):
""" Gets a config 'opt' from 'conf' file, under section 'sect'.
If no 'opt' exists under 'sect', it looks for option on the default_configs
dictionary
If there exists an environmental variable named MAMBUPY_{upper_case_opt},
it overrides whatever the conf files or default_configs dict says.
But if you send a command line argument named mambupy_{lower_case_opt},
it overrides anything else.
Args:
conf (ConfigParser): ConfigParser that reads from certain config file (INI
format)
sect (string): section under the config file
opt (string): option to read
Returns:
string: configuration option. If not found on conf, returns a value from
default_configs dict. If environmental variable exists with name
MAMBUPY_{upper_case_opt} it overrides anything else
"""
argu = getattr(args, "mambupy_"+opt.lower())
if not argu:
envir = os.environ.get("MAMBUPY_"+opt.upper())
if not envir:
try:
return conf.get(sect,opt)
except NoSectionError:
return default_configs[opt]
return envir
return argu | [
"def",
"get_conf",
"(",
"conf",
",",
"sect",
",",
"opt",
")",
":",
"argu",
"=",
"getattr",
"(",
"args",
",",
"\"mambupy_\"",
"+",
"opt",
".",
"lower",
"(",
")",
")",
"if",
"not",
"argu",
":",
"envir",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"MAMBUPY_\"",
"+",
"opt",
".",
"upper",
"(",
")",
")",
"if",
"not",
"envir",
":",
"try",
":",
"return",
"conf",
".",
"get",
"(",
"sect",
",",
"opt",
")",
"except",
"NoSectionError",
":",
"return",
"default_configs",
"[",
"opt",
"]",
"return",
"envir",
"return",
"argu"
] | Gets a config 'opt' from 'conf' file, under section 'sect'.
If no 'opt' exists under 'sect', it looks for option on the default_configs
dictionary
If there exists an environmental variable named MAMBUPY_{upper_case_opt},
it overrides whatever the conf files or default_configs dict says.
But if you send a command line argument named mambupy_{lower_case_opt},
it overrides anything else.
Args:
conf (ConfigParser): ConfigParser that reads from certain config file (INI
format)
sect (string): section under the config file
opt (string): option to read
Returns:
string: configuration option. If not found on conf, returns a value from
default_configs dict. If environmental variable exists with name
MAMBUPY_{upper_case_opt} it overrides anything else | [
"Gets",
"a",
"config",
"opt",
"from",
"conf",
"file",
"under",
"section",
"sect",
"."
] | 2af98cc12e7ed5ec183b3e97644e880e70b79ee8 | https://github.com/jstitch/MambuPy/blob/2af98cc12e7ed5ec183b3e97644e880e70b79ee8/MambuPy/mambuconfig.py#L119-L152 | train |
obilaniu/Nauka | src/nauka/fhs.py | iso8601timestamp | def iso8601timestamp(T=None, nanos=True, utc=False):
"""Get ISO8601-formatted timestamp string."""
T = time.time() if T is None else T
Ti = math.floor(T)
Tn = round((T-Ti)*1e9)
if Tn >= 1e9:
Ti += 1
Tn = 0
s = time.gmtime(Ti) if utc else time.localtime(Ti)
f = time.strftime("%Y%m%dT%H%M%S", s)
n = ".{:09d}".format(Tn) if nanos else ""
tz = "Z" if utc else time.strftime("%z", s)
return f+n+tz | python | def iso8601timestamp(T=None, nanos=True, utc=False):
"""Get ISO8601-formatted timestamp string."""
T = time.time() if T is None else T
Ti = math.floor(T)
Tn = round((T-Ti)*1e9)
if Tn >= 1e9:
Ti += 1
Tn = 0
s = time.gmtime(Ti) if utc else time.localtime(Ti)
f = time.strftime("%Y%m%dT%H%M%S", s)
n = ".{:09d}".format(Tn) if nanos else ""
tz = "Z" if utc else time.strftime("%z", s)
return f+n+tz | [
"def",
"iso8601timestamp",
"(",
"T",
"=",
"None",
",",
"nanos",
"=",
"True",
",",
"utc",
"=",
"False",
")",
":",
"T",
"=",
"time",
".",
"time",
"(",
")",
"if",
"T",
"is",
"None",
"else",
"T",
"Ti",
"=",
"math",
".",
"floor",
"(",
"T",
")",
"Tn",
"=",
"round",
"(",
"(",
"T",
"-",
"Ti",
")",
"*",
"1e9",
")",
"if",
"Tn",
">=",
"1e9",
":",
"Ti",
"+=",
"1",
"Tn",
"=",
"0",
"s",
"=",
"time",
".",
"gmtime",
"(",
"Ti",
")",
"if",
"utc",
"else",
"time",
".",
"localtime",
"(",
"Ti",
")",
"f",
"=",
"time",
".",
"strftime",
"(",
"\"%Y%m%dT%H%M%S\"",
",",
"s",
")",
"n",
"=",
"\".{:09d}\"",
".",
"format",
"(",
"Tn",
")",
"if",
"nanos",
"else",
"\"\"",
"tz",
"=",
"\"Z\"",
"if",
"utc",
"else",
"time",
".",
"strftime",
"(",
"\"%z\"",
",",
"s",
")",
"return",
"f",
"+",
"n",
"+",
"tz"
] | Get ISO8601-formatted timestamp string. | [
"Get",
"ISO8601",
"-",
"formatted",
"timestamp",
"string",
"."
] | 1492a4f9d204a868c1a8a1d327bd108490b856b4 | https://github.com/obilaniu/Nauka/blob/1492a4f9d204a868c1a8a1d327bd108490b856b4/src/nauka/fhs.py#L5-L18 | train |
obilaniu/Nauka | src/nauka/fhs.py | createWorkDir | def createWorkDir(baseDir,
projName,
expUUID,
expNames = [],
nanos = True,
utc = False):
"""Create working directory for experiment if not existing already."""
#
# First, ensure the project's top-level hierarchy, especially by-uuid/,
# exists, so that the only possible failure is due to the creation of
# one additional directory.
#
projDir = os.path.join(baseDir, projName)
byuuidDir = os.path.join(projDir, "by-uuid")
bytimeDir = os.path.join(projDir, "by-time")
bynameDir = os.path.join(projDir, "by-name", *expNames)
byuuidPath = os.path.join(byuuidDir, expUUID)
os.makedirs(byuuidDir, mode=0o755, exist_ok=True)
os.makedirs(bytimeDir, mode=0o755, exist_ok=True)
os.makedirs(bynameDir, mode=0o755, exist_ok=True)
#
# Attempt the creation of the experiment workDir by its UUID. Record
# whether we were the original creators.
#
try:
preexisting = False
os.makedirs(byuuidPath,
mode = 0o755,
exist_ok = False)
except FileExistsError:
preexisting = True
#
# If we were the first to create this working directory, additionally
# make symlinks pointing to it from the auxiliary directories.
#
if not preexisting:
expTime = iso8601timestamp(nanos=nanos, utc=utc)
expTimeUUID = expTime+"-"+expUUID
bytimePath = os.path.join(bytimeDir, expTimeUUID)
bynamePath = os.path.join(bynameDir, expUUID)
os.symlink(os.path.relpath(byuuidPath, bytimeDir), bytimePath, True)
os.symlink(os.path.relpath(byuuidPath, bynameDir), bynamePath, True)
#
# Create handy .rsync-filter files.
#
with contextlib.suppress(OSError):
with open(os.path.join(baseDir, ".rsync-filter"), "x") as f:
f.write("#\n"
"# rsync filter rules.\n"
"#\n"
"# When the argument -F is given to rsync, the rules within will be obeyed.\n"
"#\n")
with contextlib.suppress(OSError):
with open(os.path.join(projDir, ".rsync-filter"), "x") as f:
f.write("#\n"
"# rsync filter rules.\n"
"#\n"
"# When the argument -F is given to rsync, the rules within will be obeyed.\n"
"#\n")
#
# Return the constructed workDir.
#
return byuuidPath | python | def createWorkDir(baseDir,
projName,
expUUID,
expNames = [],
nanos = True,
utc = False):
"""Create working directory for experiment if not existing already."""
#
# First, ensure the project's top-level hierarchy, especially by-uuid/,
# exists, so that the only possible failure is due to the creation of
# one additional directory.
#
projDir = os.path.join(baseDir, projName)
byuuidDir = os.path.join(projDir, "by-uuid")
bytimeDir = os.path.join(projDir, "by-time")
bynameDir = os.path.join(projDir, "by-name", *expNames)
byuuidPath = os.path.join(byuuidDir, expUUID)
os.makedirs(byuuidDir, mode=0o755, exist_ok=True)
os.makedirs(bytimeDir, mode=0o755, exist_ok=True)
os.makedirs(bynameDir, mode=0o755, exist_ok=True)
#
# Attempt the creation of the experiment workDir by its UUID. Record
# whether we were the original creators.
#
try:
preexisting = False
os.makedirs(byuuidPath,
mode = 0o755,
exist_ok = False)
except FileExistsError:
preexisting = True
#
# If we were the first to create this working directory, additionally
# make symlinks pointing to it from the auxiliary directories.
#
if not preexisting:
expTime = iso8601timestamp(nanos=nanos, utc=utc)
expTimeUUID = expTime+"-"+expUUID
bytimePath = os.path.join(bytimeDir, expTimeUUID)
bynamePath = os.path.join(bynameDir, expUUID)
os.symlink(os.path.relpath(byuuidPath, bytimeDir), bytimePath, True)
os.symlink(os.path.relpath(byuuidPath, bynameDir), bynamePath, True)
#
# Create handy .rsync-filter files.
#
with contextlib.suppress(OSError):
with open(os.path.join(baseDir, ".rsync-filter"), "x") as f:
f.write("#\n"
"# rsync filter rules.\n"
"#\n"
"# When the argument -F is given to rsync, the rules within will be obeyed.\n"
"#\n")
with contextlib.suppress(OSError):
with open(os.path.join(projDir, ".rsync-filter"), "x") as f:
f.write("#\n"
"# rsync filter rules.\n"
"#\n"
"# When the argument -F is given to rsync, the rules within will be obeyed.\n"
"#\n")
#
# Return the constructed workDir.
#
return byuuidPath | [
"def",
"createWorkDir",
"(",
"baseDir",
",",
"projName",
",",
"expUUID",
",",
"expNames",
"=",
"[",
"]",
",",
"nanos",
"=",
"True",
",",
"utc",
"=",
"False",
")",
":",
"#",
"# First, ensure the project's top-level hierarchy, especially by-uuid/,",
"# exists, so that the only possible failure is due to the creation of",
"# one additional directory.",
"#",
"projDir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"baseDir",
",",
"projName",
")",
"byuuidDir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"projDir",
",",
"\"by-uuid\"",
")",
"bytimeDir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"projDir",
",",
"\"by-time\"",
")",
"bynameDir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"projDir",
",",
"\"by-name\"",
",",
"*",
"expNames",
")",
"byuuidPath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"byuuidDir",
",",
"expUUID",
")",
"os",
".",
"makedirs",
"(",
"byuuidDir",
",",
"mode",
"=",
"0o755",
",",
"exist_ok",
"=",
"True",
")",
"os",
".",
"makedirs",
"(",
"bytimeDir",
",",
"mode",
"=",
"0o755",
",",
"exist_ok",
"=",
"True",
")",
"os",
".",
"makedirs",
"(",
"bynameDir",
",",
"mode",
"=",
"0o755",
",",
"exist_ok",
"=",
"True",
")",
"#",
"# Attempt the creation of the experiment workDir by its UUID. Record",
"# whether we were the original creators.",
"#",
"try",
":",
"preexisting",
"=",
"False",
"os",
".",
"makedirs",
"(",
"byuuidPath",
",",
"mode",
"=",
"0o755",
",",
"exist_ok",
"=",
"False",
")",
"except",
"FileExistsError",
":",
"preexisting",
"=",
"True",
"#",
"# If we were the first to create this working directory, additionally",
"# make symlinks pointing to it from the auxiliary directories.",
"#",
"if",
"not",
"preexisting",
":",
"expTime",
"=",
"iso8601timestamp",
"(",
"nanos",
"=",
"nanos",
",",
"utc",
"=",
"utc",
")",
"expTimeUUID",
"=",
"expTime",
"+",
"\"-\"",
"+",
"expUUID",
"bytimePath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"bytimeDir",
",",
"expTimeUUID",
")",
"bynamePath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"bynameDir",
",",
"expUUID",
")",
"os",
".",
"symlink",
"(",
"os",
".",
"path",
".",
"relpath",
"(",
"byuuidPath",
",",
"bytimeDir",
")",
",",
"bytimePath",
",",
"True",
")",
"os",
".",
"symlink",
"(",
"os",
".",
"path",
".",
"relpath",
"(",
"byuuidPath",
",",
"bynameDir",
")",
",",
"bynamePath",
",",
"True",
")",
"#",
"# Create handy .rsync-filter files.",
"#",
"with",
"contextlib",
".",
"suppress",
"(",
"OSError",
")",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"baseDir",
",",
"\".rsync-filter\"",
")",
",",
"\"x\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"#\\n\"",
"\"# rsync filter rules.\\n\"",
"\"#\\n\"",
"\"# When the argument -F is given to rsync, the rules within will be obeyed.\\n\"",
"\"#\\n\"",
")",
"with",
"contextlib",
".",
"suppress",
"(",
"OSError",
")",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"projDir",
",",
"\".rsync-filter\"",
")",
",",
"\"x\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"#\\n\"",
"\"# rsync filter rules.\\n\"",
"\"#\\n\"",
"\"# When the argument -F is given to rsync, the rules within will be obeyed.\\n\"",
"\"#\\n\"",
")",
"#",
"# Return the constructed workDir.",
"#",
"return",
"byuuidPath"
] | Create working directory for experiment if not existing already. | [
"Create",
"working",
"directory",
"for",
"experiment",
"if",
"not",
"existing",
"already",
"."
] | 1492a4f9d204a868c1a8a1d327bd108490b856b4 | https://github.com/obilaniu/Nauka/blob/1492a4f9d204a868c1a8a1d327bd108490b856b4/src/nauka/fhs.py#L21-L89 | train |
polyaxon/hestia | hestia/humanize.py | humanize_timesince | def humanize_timesince(start_time): # pylint:disable=too-many-return-statements
"""Creates a string representation of time since the given `start_time`."""
if not start_time:
return start_time
delta = local_now() - start_time
# assumption: negative delta values originate from clock
# differences on different app server machines
if delta.total_seconds() < 0:
return 'a few seconds ago'
num_years = delta.days // 365
if num_years > 0:
return '{} year{} ago'.format(
*((num_years, 's') if num_years > 1 else (num_years, '')))
num_weeks = delta.days // 7
if num_weeks > 0:
return '{} week{} ago'.format(
*((num_weeks, 's') if num_weeks > 1 else (num_weeks, '')))
num_days = delta.days
if num_days > 0:
return '{} day{} ago'.format(
*((num_days, 's') if num_days > 1 else (num_days, '')))
num_hours = delta.seconds // 3600
if num_hours > 0:
return '{} hour{} ago'.format(*((num_hours, 's') if num_hours > 1 else (num_hours, '')))
num_minutes = delta.seconds // 60
if num_minutes > 0:
return '{} minute{} ago'.format(
*((num_minutes, 's') if num_minutes > 1 else (num_minutes, '')))
return 'a few seconds ago' | python | def humanize_timesince(start_time): # pylint:disable=too-many-return-statements
"""Creates a string representation of time since the given `start_time`."""
if not start_time:
return start_time
delta = local_now() - start_time
# assumption: negative delta values originate from clock
# differences on different app server machines
if delta.total_seconds() < 0:
return 'a few seconds ago'
num_years = delta.days // 365
if num_years > 0:
return '{} year{} ago'.format(
*((num_years, 's') if num_years > 1 else (num_years, '')))
num_weeks = delta.days // 7
if num_weeks > 0:
return '{} week{} ago'.format(
*((num_weeks, 's') if num_weeks > 1 else (num_weeks, '')))
num_days = delta.days
if num_days > 0:
return '{} day{} ago'.format(
*((num_days, 's') if num_days > 1 else (num_days, '')))
num_hours = delta.seconds // 3600
if num_hours > 0:
return '{} hour{} ago'.format(*((num_hours, 's') if num_hours > 1 else (num_hours, '')))
num_minutes = delta.seconds // 60
if num_minutes > 0:
return '{} minute{} ago'.format(
*((num_minutes, 's') if num_minutes > 1 else (num_minutes, '')))
return 'a few seconds ago' | [
"def",
"humanize_timesince",
"(",
"start_time",
")",
":",
"# pylint:disable=too-many-return-statements",
"if",
"not",
"start_time",
":",
"return",
"start_time",
"delta",
"=",
"local_now",
"(",
")",
"-",
"start_time",
"# assumption: negative delta values originate from clock",
"# differences on different app server machines",
"if",
"delta",
".",
"total_seconds",
"(",
")",
"<",
"0",
":",
"return",
"'a few seconds ago'",
"num_years",
"=",
"delta",
".",
"days",
"//",
"365",
"if",
"num_years",
">",
"0",
":",
"return",
"'{} year{} ago'",
".",
"format",
"(",
"*",
"(",
"(",
"num_years",
",",
"'s'",
")",
"if",
"num_years",
">",
"1",
"else",
"(",
"num_years",
",",
"''",
")",
")",
")",
"num_weeks",
"=",
"delta",
".",
"days",
"//",
"7",
"if",
"num_weeks",
">",
"0",
":",
"return",
"'{} week{} ago'",
".",
"format",
"(",
"*",
"(",
"(",
"num_weeks",
",",
"'s'",
")",
"if",
"num_weeks",
">",
"1",
"else",
"(",
"num_weeks",
",",
"''",
")",
")",
")",
"num_days",
"=",
"delta",
".",
"days",
"if",
"num_days",
">",
"0",
":",
"return",
"'{} day{} ago'",
".",
"format",
"(",
"*",
"(",
"(",
"num_days",
",",
"'s'",
")",
"if",
"num_days",
">",
"1",
"else",
"(",
"num_days",
",",
"''",
")",
")",
")",
"num_hours",
"=",
"delta",
".",
"seconds",
"//",
"3600",
"if",
"num_hours",
">",
"0",
":",
"return",
"'{} hour{} ago'",
".",
"format",
"(",
"*",
"(",
"(",
"num_hours",
",",
"'s'",
")",
"if",
"num_hours",
">",
"1",
"else",
"(",
"num_hours",
",",
"''",
")",
")",
")",
"num_minutes",
"=",
"delta",
".",
"seconds",
"//",
"60",
"if",
"num_minutes",
">",
"0",
":",
"return",
"'{} minute{} ago'",
".",
"format",
"(",
"*",
"(",
"(",
"num_minutes",
",",
"'s'",
")",
"if",
"num_minutes",
">",
"1",
"else",
"(",
"num_minutes",
",",
"''",
")",
")",
")",
"return",
"'a few seconds ago'"
] | Creates a string representation of time since the given `start_time`. | [
"Creates",
"a",
"string",
"representation",
"of",
"time",
"since",
"the",
"given",
"start_time",
"."
] | 382ed139cff8bf35c987cfc30a31b72c0d6b808e | https://github.com/polyaxon/hestia/blob/382ed139cff8bf35c987cfc30a31b72c0d6b808e/hestia/humanize.py#L7-L43 | train |
polyaxon/hestia | hestia/humanize.py | humanize_timedelta | def humanize_timedelta(seconds):
"""Creates a string representation of timedelta."""
hours, remainder = divmod(seconds, 3600)
days, hours = divmod(hours, 24)
minutes, seconds = divmod(remainder, 60)
if days:
result = '{}d'.format(days)
if hours:
result += ' {}h'.format(hours)
if minutes:
result += ' {}m'.format(minutes)
return result
if hours:
result = '{}h'.format(hours)
if minutes:
result += ' {}m'.format(minutes)
return result
if minutes:
result = '{}m'.format(minutes)
if seconds:
result += ' {}s'.format(seconds)
return result
return '{}s'.format(seconds) | python | def humanize_timedelta(seconds):
"""Creates a string representation of timedelta."""
hours, remainder = divmod(seconds, 3600)
days, hours = divmod(hours, 24)
minutes, seconds = divmod(remainder, 60)
if days:
result = '{}d'.format(days)
if hours:
result += ' {}h'.format(hours)
if minutes:
result += ' {}m'.format(minutes)
return result
if hours:
result = '{}h'.format(hours)
if minutes:
result += ' {}m'.format(minutes)
return result
if minutes:
result = '{}m'.format(minutes)
if seconds:
result += ' {}s'.format(seconds)
return result
return '{}s'.format(seconds) | [
"def",
"humanize_timedelta",
"(",
"seconds",
")",
":",
"hours",
",",
"remainder",
"=",
"divmod",
"(",
"seconds",
",",
"3600",
")",
"days",
",",
"hours",
"=",
"divmod",
"(",
"hours",
",",
"24",
")",
"minutes",
",",
"seconds",
"=",
"divmod",
"(",
"remainder",
",",
"60",
")",
"if",
"days",
":",
"result",
"=",
"'{}d'",
".",
"format",
"(",
"days",
")",
"if",
"hours",
":",
"result",
"+=",
"' {}h'",
".",
"format",
"(",
"hours",
")",
"if",
"minutes",
":",
"result",
"+=",
"' {}m'",
".",
"format",
"(",
"minutes",
")",
"return",
"result",
"if",
"hours",
":",
"result",
"=",
"'{}h'",
".",
"format",
"(",
"hours",
")",
"if",
"minutes",
":",
"result",
"+=",
"' {}m'",
".",
"format",
"(",
"minutes",
")",
"return",
"result",
"if",
"minutes",
":",
"result",
"=",
"'{}m'",
".",
"format",
"(",
"minutes",
")",
"if",
"seconds",
":",
"result",
"+=",
"' {}s'",
".",
"format",
"(",
"seconds",
")",
"return",
"result",
"return",
"'{}s'",
".",
"format",
"(",
"seconds",
")"
] | Creates a string representation of timedelta. | [
"Creates",
"a",
"string",
"representation",
"of",
"timedelta",
"."
] | 382ed139cff8bf35c987cfc30a31b72c0d6b808e | https://github.com/polyaxon/hestia/blob/382ed139cff8bf35c987cfc30a31b72c0d6b808e/hestia/humanize.py#L46-L72 | train |
peopledoc/mock-services | mock_services/http_mock.py | HttpMock.start | def start(self):
"""Overrides default start behaviour by raising ConnectionError instead
of custom requests_mock.exceptions.NoMockAddress.
"""
if self._http_last_send is not None:
raise RuntimeError('HttpMock has already been started')
# 1) save request.Session.send in self._last_send
# 2) replace request.Session.send with MockerCore send function
super(HttpMock, self).start()
# 3) save MockerCore send function in self._http_last_send
# 4) replace request.Session.send with HttpMock send function
self._patch_last_send() | python | def start(self):
"""Overrides default start behaviour by raising ConnectionError instead
of custom requests_mock.exceptions.NoMockAddress.
"""
if self._http_last_send is not None:
raise RuntimeError('HttpMock has already been started')
# 1) save request.Session.send in self._last_send
# 2) replace request.Session.send with MockerCore send function
super(HttpMock, self).start()
# 3) save MockerCore send function in self._http_last_send
# 4) replace request.Session.send with HttpMock send function
self._patch_last_send() | [
"def",
"start",
"(",
"self",
")",
":",
"if",
"self",
".",
"_http_last_send",
"is",
"not",
"None",
":",
"raise",
"RuntimeError",
"(",
"'HttpMock has already been started'",
")",
"# 1) save request.Session.send in self._last_send",
"# 2) replace request.Session.send with MockerCore send function",
"super",
"(",
"HttpMock",
",",
"self",
")",
".",
"start",
"(",
")",
"# 3) save MockerCore send function in self._http_last_send",
"# 4) replace request.Session.send with HttpMock send function",
"self",
".",
"_patch_last_send",
"(",
")"
] | Overrides default start behaviour by raising ConnectionError instead
of custom requests_mock.exceptions.NoMockAddress. | [
"Overrides",
"default",
"start",
"behaviour",
"by",
"raising",
"ConnectionError",
"instead",
"of",
"custom",
"requests_mock",
".",
"exceptions",
".",
"NoMockAddress",
"."
] | fd3838280df8869725b538768357435eedf299c1 | https://github.com/peopledoc/mock-services/blob/fd3838280df8869725b538768357435eedf299c1/mock_services/http_mock.py#L63-L76 | train |
axel-events/axel | axel/axel.py | Event.unhandle | def unhandle(self, handler):
""" Unregisters a handler """
h, _, _ = self._extract(handler)
key = hash(h)
with self._hlock:
if key not in self.handlers:
raise ValueError('Handler "%s" was not found' % str(h))
handlers = self.handlers.copy()
del handlers[key]
self.handlers = handlers
return self | python | def unhandle(self, handler):
""" Unregisters a handler """
h, _, _ = self._extract(handler)
key = hash(h)
with self._hlock:
if key not in self.handlers:
raise ValueError('Handler "%s" was not found' % str(h))
handlers = self.handlers.copy()
del handlers[key]
self.handlers = handlers
return self | [
"def",
"unhandle",
"(",
"self",
",",
"handler",
")",
":",
"h",
",",
"_",
",",
"_",
"=",
"self",
".",
"_extract",
"(",
"handler",
")",
"key",
"=",
"hash",
"(",
"h",
")",
"with",
"self",
".",
"_hlock",
":",
"if",
"key",
"not",
"in",
"self",
".",
"handlers",
":",
"raise",
"ValueError",
"(",
"'Handler \"%s\" was not found'",
"%",
"str",
"(",
"h",
")",
")",
"handlers",
"=",
"self",
".",
"handlers",
".",
"copy",
"(",
")",
"del",
"handlers",
"[",
"key",
"]",
"self",
".",
"handlers",
"=",
"handlers",
"return",
"self"
] | Unregisters a handler | [
"Unregisters",
"a",
"handler"
] | 08a663347ef21614b96f92f60f4de57a502db73c | https://github.com/axel-events/axel/blob/08a663347ef21614b96f92f60f4de57a502db73c/axel/axel.py#L163-L173 | train |
axel-events/axel | axel/axel.py | Event.fire | def fire(self, *args, **kw):
""" Stores all registered handlers in a queue for processing """
result = []
with self._hlock:
handlers = self.handlers
if self.threads == 0: # same-thread execution - synchronized
for k in handlers:
# handler, memoize, timeout
h, m, t = handlers[k]
try:
r = self._memoize(h, m, t, *args, **kw)
result.append(tuple(r))
except:
result.append((False, self._error(sys.exc_info()), h))
elif self.threads > 0: # multi-thread execution - desynchronized if self.threads > 1
queue = Queue()
# result lock just in case [].append() is not
# thread-safe in other Python implementations
rlock = RLock()
def _execute(*args, **kw):
""" Executes all handlers stored in the queue """
while True:
try:
item = queue.get()
if item is None:
queue.task_done()
break
# handler, memoize, timeout
h, m, t = handlers[item] # call under active lock
try:
r = self._memoize(h, m, t, *args, **kw)
if not self.asynch:
with rlock:
result.append(tuple(r))
except:
if not self.asynch:
with rlock:
result.append((False, self._error(sys.exc_info()), h))
queue.task_done()
except Empty: # never triggered, just to be safe
break
if handlers:
threads = self._threads(handlers=handlers)
for _ in range(threads):
t = Thread(target=_execute, args=args, kwargs=kw)
t.daemon = True
t.start()
for k in handlers:
queue.put(k)
if self.asynch: # main thread, no locking required
h, _, _ = handlers[k]
result.append((None, None, h))
for _ in range(threads):
queue.put(None) # stop each worker
if not self.asynch:
queue.join()
return tuple(result) or None | python | def fire(self, *args, **kw):
""" Stores all registered handlers in a queue for processing """
result = []
with self._hlock:
handlers = self.handlers
if self.threads == 0: # same-thread execution - synchronized
for k in handlers:
# handler, memoize, timeout
h, m, t = handlers[k]
try:
r = self._memoize(h, m, t, *args, **kw)
result.append(tuple(r))
except:
result.append((False, self._error(sys.exc_info()), h))
elif self.threads > 0: # multi-thread execution - desynchronized if self.threads > 1
queue = Queue()
# result lock just in case [].append() is not
# thread-safe in other Python implementations
rlock = RLock()
def _execute(*args, **kw):
""" Executes all handlers stored in the queue """
while True:
try:
item = queue.get()
if item is None:
queue.task_done()
break
# handler, memoize, timeout
h, m, t = handlers[item] # call under active lock
try:
r = self._memoize(h, m, t, *args, **kw)
if not self.asynch:
with rlock:
result.append(tuple(r))
except:
if not self.asynch:
with rlock:
result.append((False, self._error(sys.exc_info()), h))
queue.task_done()
except Empty: # never triggered, just to be safe
break
if handlers:
threads = self._threads(handlers=handlers)
for _ in range(threads):
t = Thread(target=_execute, args=args, kwargs=kw)
t.daemon = True
t.start()
for k in handlers:
queue.put(k)
if self.asynch: # main thread, no locking required
h, _, _ = handlers[k]
result.append((None, None, h))
for _ in range(threads):
queue.put(None) # stop each worker
if not self.asynch:
queue.join()
return tuple(result) or None | [
"def",
"fire",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"result",
"=",
"[",
"]",
"with",
"self",
".",
"_hlock",
":",
"handlers",
"=",
"self",
".",
"handlers",
"if",
"self",
".",
"threads",
"==",
"0",
":",
"# same-thread execution - synchronized\r",
"for",
"k",
"in",
"handlers",
":",
"# handler, memoize, timeout\r",
"h",
",",
"m",
",",
"t",
"=",
"handlers",
"[",
"k",
"]",
"try",
":",
"r",
"=",
"self",
".",
"_memoize",
"(",
"h",
",",
"m",
",",
"t",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"result",
".",
"append",
"(",
"tuple",
"(",
"r",
")",
")",
"except",
":",
"result",
".",
"append",
"(",
"(",
"False",
",",
"self",
".",
"_error",
"(",
"sys",
".",
"exc_info",
"(",
")",
")",
",",
"h",
")",
")",
"elif",
"self",
".",
"threads",
">",
"0",
":",
"# multi-thread execution - desynchronized if self.threads > 1\r",
"queue",
"=",
"Queue",
"(",
")",
"# result lock just in case [].append() is not \r",
"# thread-safe in other Python implementations\r",
"rlock",
"=",
"RLock",
"(",
")",
"def",
"_execute",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"\"\"\" Executes all handlers stored in the queue \"\"\"",
"while",
"True",
":",
"try",
":",
"item",
"=",
"queue",
".",
"get",
"(",
")",
"if",
"item",
"is",
"None",
":",
"queue",
".",
"task_done",
"(",
")",
"break",
"# handler, memoize, timeout\r",
"h",
",",
"m",
",",
"t",
"=",
"handlers",
"[",
"item",
"]",
"# call under active lock\r",
"try",
":",
"r",
"=",
"self",
".",
"_memoize",
"(",
"h",
",",
"m",
",",
"t",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"if",
"not",
"self",
".",
"asynch",
":",
"with",
"rlock",
":",
"result",
".",
"append",
"(",
"tuple",
"(",
"r",
")",
")",
"except",
":",
"if",
"not",
"self",
".",
"asynch",
":",
"with",
"rlock",
":",
"result",
".",
"append",
"(",
"(",
"False",
",",
"self",
".",
"_error",
"(",
"sys",
".",
"exc_info",
"(",
")",
")",
",",
"h",
")",
")",
"queue",
".",
"task_done",
"(",
")",
"except",
"Empty",
":",
"# never triggered, just to be safe\r",
"break",
"if",
"handlers",
":",
"threads",
"=",
"self",
".",
"_threads",
"(",
"handlers",
"=",
"handlers",
")",
"for",
"_",
"in",
"range",
"(",
"threads",
")",
":",
"t",
"=",
"Thread",
"(",
"target",
"=",
"_execute",
",",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kw",
")",
"t",
".",
"daemon",
"=",
"True",
"t",
".",
"start",
"(",
")",
"for",
"k",
"in",
"handlers",
":",
"queue",
".",
"put",
"(",
"k",
")",
"if",
"self",
".",
"asynch",
":",
"# main thread, no locking required\r",
"h",
",",
"_",
",",
"_",
"=",
"handlers",
"[",
"k",
"]",
"result",
".",
"append",
"(",
"(",
"None",
",",
"None",
",",
"h",
")",
")",
"for",
"_",
"in",
"range",
"(",
"threads",
")",
":",
"queue",
".",
"put",
"(",
"None",
")",
"# stop each worker\r",
"if",
"not",
"self",
".",
"asynch",
":",
"queue",
".",
"join",
"(",
")",
"return",
"tuple",
"(",
"result",
")",
"or",
"None"
] | Stores all registered handlers in a queue for processing | [
"Stores",
"all",
"registered",
"handlers",
"in",
"a",
"queue",
"for",
"processing"
] | 08a663347ef21614b96f92f60f4de57a502db73c | https://github.com/axel-events/axel/blob/08a663347ef21614b96f92f60f4de57a502db73c/axel/axel.py#L175-L247 | train |
axel-events/axel | axel/axel.py | Event.clear | def clear(self):
""" Discards all registered handlers and cached results """
with self._hlock:
self.handlers.clear()
with self._mlock:
self.memoize.clear() | python | def clear(self):
""" Discards all registered handlers and cached results """
with self._hlock:
self.handlers.clear()
with self._mlock:
self.memoize.clear() | [
"def",
"clear",
"(",
"self",
")",
":",
"with",
"self",
".",
"_hlock",
":",
"self",
".",
"handlers",
".",
"clear",
"(",
")",
"with",
"self",
".",
"_mlock",
":",
"self",
".",
"memoize",
".",
"clear",
"(",
")"
] | Discards all registered handlers and cached results | [
"Discards",
"all",
"registered",
"handlers",
"and",
"cached",
"results"
] | 08a663347ef21614b96f92f60f4de57a502db73c | https://github.com/axel-events/axel/blob/08a663347ef21614b96f92f60f4de57a502db73c/axel/axel.py#L254-L259 | train |
axel-events/axel | axel/axel.py | Event._timeout | def _timeout(self, timeout, handler, *args, **kw):
""" Controls the time allocated for the execution of a method """
t = spawn_thread(target=handler, args=args, kw=kw)
t.daemon = True
t.start()
t.join(timeout)
if not t.is_alive():
if t.exc_info:
return t.exc_info
return t.result
else:
try:
msg = '[%s] Execution was forcefully terminated'
raise RuntimeError(msg % t.name)
except:
return sys.exc_info() | python | def _timeout(self, timeout, handler, *args, **kw):
""" Controls the time allocated for the execution of a method """
t = spawn_thread(target=handler, args=args, kw=kw)
t.daemon = True
t.start()
t.join(timeout)
if not t.is_alive():
if t.exc_info:
return t.exc_info
return t.result
else:
try:
msg = '[%s] Execution was forcefully terminated'
raise RuntimeError(msg % t.name)
except:
return sys.exc_info() | [
"def",
"_timeout",
"(",
"self",
",",
"timeout",
",",
"handler",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"t",
"=",
"spawn_thread",
"(",
"target",
"=",
"handler",
",",
"args",
"=",
"args",
",",
"kw",
"=",
"kw",
")",
"t",
".",
"daemon",
"=",
"True",
"t",
".",
"start",
"(",
")",
"t",
".",
"join",
"(",
"timeout",
")",
"if",
"not",
"t",
".",
"is_alive",
"(",
")",
":",
"if",
"t",
".",
"exc_info",
":",
"return",
"t",
".",
"exc_info",
"return",
"t",
".",
"result",
"else",
":",
"try",
":",
"msg",
"=",
"'[%s] Execution was forcefully terminated'",
"raise",
"RuntimeError",
"(",
"msg",
"%",
"t",
".",
"name",
")",
"except",
":",
"return",
"sys",
".",
"exc_info",
"(",
")"
] | Controls the time allocated for the execution of a method | [
"Controls",
"the",
"time",
"allocated",
"for",
"the",
"execution",
"of",
"a",
"method"
] | 08a663347ef21614b96f92f60f4de57a502db73c | https://github.com/axel-events/axel/blob/08a663347ef21614b96f92f60f4de57a502db73c/axel/axel.py#L336-L352 | train |
axel-events/axel | axel/axel.py | Event._threads | def _threads(self, handlers):
""" Calculates maximum number of threads that will be started """
if self.threads < len(handlers):
return self.threads
return len(handlers) | python | def _threads(self, handlers):
""" Calculates maximum number of threads that will be started """
if self.threads < len(handlers):
return self.threads
return len(handlers) | [
"def",
"_threads",
"(",
"self",
",",
"handlers",
")",
":",
"if",
"self",
".",
"threads",
"<",
"len",
"(",
"handlers",
")",
":",
"return",
"self",
".",
"threads",
"return",
"len",
"(",
"handlers",
")"
] | Calculates maximum number of threads that will be started | [
"Calculates",
"maximum",
"number",
"of",
"threads",
"that",
"will",
"be",
"started"
] | 08a663347ef21614b96f92f60f4de57a502db73c | https://github.com/axel-events/axel/blob/08a663347ef21614b96f92f60f4de57a502db73c/axel/axel.py#L354-L358 | train |
azogue/i2csense | i2csense/htu21d.py | HTU21D.update | def update(self):
"""Read raw data and calculate temperature and humidity."""
if not self._ok:
self.log_error("Trying to restore OK mode w/ soft reset")
self._ok = self._soft_reset()
try:
self._bus.write_byte(self._i2c_add, CMD_READ_TEMP_NOHOLD)
sleep(MEASUREMENT_WAIT_TIME)
buf_t = self._bus.read_i2c_block_data(
self._i2c_add, CMD_READ_TEMP_HOLD, 3)
self._bus.write_byte(self._i2c_add, CMD_READ_HUM_NOHOLD)
sleep(MEASUREMENT_WAIT_TIME)
buf_h = self._bus.read_i2c_block_data(
self._i2c_add, CMD_READ_HUM_HOLD, 3)
except OSError as exc:
self._ok = False
self.log_error("Bad reading: %s", exc)
return
if self._crc8check(buf_t):
temp = (buf_t[0] << 8 | buf_t[1]) & 0xFFFC
self._temperature = self._calc_temp(temp)
if self._crc8check(buf_h):
humid = (buf_h[0] << 8 | buf_h[1]) & 0xFFFC
rh_actual = self._calc_humid(humid)
# For temperature coefficient compensation
rh_final = self._temp_coefficient(rh_actual, self._temperature)
rh_final = 100.0 if rh_final > 100 else rh_final # Clamp > 100
rh_final = 0.0 if rh_final < 0 else rh_final # Clamp < 0
self._humidity = rh_final
else:
self._humidity = -255
self._ok = False
self.log_error("Bad CRC error with humidity")
else:
self._temperature = -255
self._ok = False
self.log_error("Bad CRC error with temperature") | python | def update(self):
"""Read raw data and calculate temperature and humidity."""
if not self._ok:
self.log_error("Trying to restore OK mode w/ soft reset")
self._ok = self._soft_reset()
try:
self._bus.write_byte(self._i2c_add, CMD_READ_TEMP_NOHOLD)
sleep(MEASUREMENT_WAIT_TIME)
buf_t = self._bus.read_i2c_block_data(
self._i2c_add, CMD_READ_TEMP_HOLD, 3)
self._bus.write_byte(self._i2c_add, CMD_READ_HUM_NOHOLD)
sleep(MEASUREMENT_WAIT_TIME)
buf_h = self._bus.read_i2c_block_data(
self._i2c_add, CMD_READ_HUM_HOLD, 3)
except OSError as exc:
self._ok = False
self.log_error("Bad reading: %s", exc)
return
if self._crc8check(buf_t):
temp = (buf_t[0] << 8 | buf_t[1]) & 0xFFFC
self._temperature = self._calc_temp(temp)
if self._crc8check(buf_h):
humid = (buf_h[0] << 8 | buf_h[1]) & 0xFFFC
rh_actual = self._calc_humid(humid)
# For temperature coefficient compensation
rh_final = self._temp_coefficient(rh_actual, self._temperature)
rh_final = 100.0 if rh_final > 100 else rh_final # Clamp > 100
rh_final = 0.0 if rh_final < 0 else rh_final # Clamp < 0
self._humidity = rh_final
else:
self._humidity = -255
self._ok = False
self.log_error("Bad CRC error with humidity")
else:
self._temperature = -255
self._ok = False
self.log_error("Bad CRC error with temperature") | [
"def",
"update",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ok",
":",
"self",
".",
"log_error",
"(",
"\"Trying to restore OK mode w/ soft reset\"",
")",
"self",
".",
"_ok",
"=",
"self",
".",
"_soft_reset",
"(",
")",
"try",
":",
"self",
".",
"_bus",
".",
"write_byte",
"(",
"self",
".",
"_i2c_add",
",",
"CMD_READ_TEMP_NOHOLD",
")",
"sleep",
"(",
"MEASUREMENT_WAIT_TIME",
")",
"buf_t",
"=",
"self",
".",
"_bus",
".",
"read_i2c_block_data",
"(",
"self",
".",
"_i2c_add",
",",
"CMD_READ_TEMP_HOLD",
",",
"3",
")",
"self",
".",
"_bus",
".",
"write_byte",
"(",
"self",
".",
"_i2c_add",
",",
"CMD_READ_HUM_NOHOLD",
")",
"sleep",
"(",
"MEASUREMENT_WAIT_TIME",
")",
"buf_h",
"=",
"self",
".",
"_bus",
".",
"read_i2c_block_data",
"(",
"self",
".",
"_i2c_add",
",",
"CMD_READ_HUM_HOLD",
",",
"3",
")",
"except",
"OSError",
"as",
"exc",
":",
"self",
".",
"_ok",
"=",
"False",
"self",
".",
"log_error",
"(",
"\"Bad reading: %s\"",
",",
"exc",
")",
"return",
"if",
"self",
".",
"_crc8check",
"(",
"buf_t",
")",
":",
"temp",
"=",
"(",
"buf_t",
"[",
"0",
"]",
"<<",
"8",
"|",
"buf_t",
"[",
"1",
"]",
")",
"&",
"0xFFFC",
"self",
".",
"_temperature",
"=",
"self",
".",
"_calc_temp",
"(",
"temp",
")",
"if",
"self",
".",
"_crc8check",
"(",
"buf_h",
")",
":",
"humid",
"=",
"(",
"buf_h",
"[",
"0",
"]",
"<<",
"8",
"|",
"buf_h",
"[",
"1",
"]",
")",
"&",
"0xFFFC",
"rh_actual",
"=",
"self",
".",
"_calc_humid",
"(",
"humid",
")",
"# For temperature coefficient compensation",
"rh_final",
"=",
"self",
".",
"_temp_coefficient",
"(",
"rh_actual",
",",
"self",
".",
"_temperature",
")",
"rh_final",
"=",
"100.0",
"if",
"rh_final",
">",
"100",
"else",
"rh_final",
"# Clamp > 100",
"rh_final",
"=",
"0.0",
"if",
"rh_final",
"<",
"0",
"else",
"rh_final",
"# Clamp < 0",
"self",
".",
"_humidity",
"=",
"rh_final",
"else",
":",
"self",
".",
"_humidity",
"=",
"-",
"255",
"self",
".",
"_ok",
"=",
"False",
"self",
".",
"log_error",
"(",
"\"Bad CRC error with humidity\"",
")",
"else",
":",
"self",
".",
"_temperature",
"=",
"-",
"255",
"self",
".",
"_ok",
"=",
"False",
"self",
".",
"log_error",
"(",
"\"Bad CRC error with temperature\"",
")"
] | Read raw data and calculate temperature and humidity. | [
"Read",
"raw",
"data",
"and",
"calculate",
"temperature",
"and",
"humidity",
"."
] | ecc6806dcee9de827a5414a9e836d271fedca9b9 | https://github.com/azogue/i2csense/blob/ecc6806dcee9de827a5414a9e836d271fedca9b9/i2csense/htu21d.py#L87-L126 | train |
reanahub/reana-db | reana_db/models.py | Workflow.get_owner_access_token | def get_owner_access_token(self):
"""Return workflow owner access token."""
from .database import Session
db_session = Session.object_session(self)
owner = db_session.query(User).filter_by(
id_=self.owner_id).first()
return owner.access_token | python | def get_owner_access_token(self):
"""Return workflow owner access token."""
from .database import Session
db_session = Session.object_session(self)
owner = db_session.query(User).filter_by(
id_=self.owner_id).first()
return owner.access_token | [
"def",
"get_owner_access_token",
"(",
"self",
")",
":",
"from",
".",
"database",
"import",
"Session",
"db_session",
"=",
"Session",
".",
"object_session",
"(",
"self",
")",
"owner",
"=",
"db_session",
".",
"query",
"(",
"User",
")",
".",
"filter_by",
"(",
"id_",
"=",
"self",
".",
"owner_id",
")",
".",
"first",
"(",
")",
"return",
"owner",
".",
"access_token"
] | Return workflow owner access token. | [
"Return",
"workflow",
"owner",
"access",
"token",
"."
] | 4efcb46d23af035689964d8c25a804c5a8f1dfc3 | https://github.com/reanahub/reana-db/blob/4efcb46d23af035689964d8c25a804c5a8f1dfc3/reana_db/models.py#L167-L173 | train |
reanahub/reana-db | reana_db/models.py | Workflow.update_workflow_status | def update_workflow_status(db_session, workflow_uuid, status,
new_logs='', message=None):
"""Update database workflow status.
:param workflow_uuid: UUID which represents the workflow.
:param status: String that represents the workflow status.
:param new_logs: New logs from workflow execution.
:param message: Unused.
"""
try:
workflow = \
db_session.query(Workflow).filter_by(id_=workflow_uuid).first()
if not workflow:
raise Exception('Workflow {0} doesn\'t exist in database.'.
format(workflow_uuid))
if status:
workflow.status = status
if new_logs:
workflow.logs = (workflow.logs or '') + new_logs + '\n'
db_session.commit()
except Exception as e:
raise e | python | def update_workflow_status(db_session, workflow_uuid, status,
new_logs='', message=None):
"""Update database workflow status.
:param workflow_uuid: UUID which represents the workflow.
:param status: String that represents the workflow status.
:param new_logs: New logs from workflow execution.
:param message: Unused.
"""
try:
workflow = \
db_session.query(Workflow).filter_by(id_=workflow_uuid).first()
if not workflow:
raise Exception('Workflow {0} doesn\'t exist in database.'.
format(workflow_uuid))
if status:
workflow.status = status
if new_logs:
workflow.logs = (workflow.logs or '') + new_logs + '\n'
db_session.commit()
except Exception as e:
raise e | [
"def",
"update_workflow_status",
"(",
"db_session",
",",
"workflow_uuid",
",",
"status",
",",
"new_logs",
"=",
"''",
",",
"message",
"=",
"None",
")",
":",
"try",
":",
"workflow",
"=",
"db_session",
".",
"query",
"(",
"Workflow",
")",
".",
"filter_by",
"(",
"id_",
"=",
"workflow_uuid",
")",
".",
"first",
"(",
")",
"if",
"not",
"workflow",
":",
"raise",
"Exception",
"(",
"'Workflow {0} doesn\\'t exist in database.'",
".",
"format",
"(",
"workflow_uuid",
")",
")",
"if",
"status",
":",
"workflow",
".",
"status",
"=",
"status",
"if",
"new_logs",
":",
"workflow",
".",
"logs",
"=",
"(",
"workflow",
".",
"logs",
"or",
"''",
")",
"+",
"new_logs",
"+",
"'\\n'",
"db_session",
".",
"commit",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e"
] | Update database workflow status.
:param workflow_uuid: UUID which represents the workflow.
:param status: String that represents the workflow status.
:param new_logs: New logs from workflow execution.
:param message: Unused. | [
"Update",
"database",
"workflow",
"status",
"."
] | 4efcb46d23af035689964d8c25a804c5a8f1dfc3 | https://github.com/reanahub/reana-db/blob/4efcb46d23af035689964d8c25a804c5a8f1dfc3/reana_db/models.py#L176-L198 | train |
bacher09/xrcon | xrcon/utils.py | parse_server_addr | def parse_server_addr(str_addr, default_port=26000):
"""Parse address and returns host and port
Args:
str_addr --- string that contains server ip or hostname and optionaly
port
Returns: tuple (host, port)
Examples:
>>> parse_server_addr('127.0.0.1:26006')
('127.0.0.1', 26006)
>>> parse_server_addr('[2001:db8:85a3:8d3:1319:8a2e:370:7348]:26006')
('2001:db8:85a3:8d3:1319:8a2e:370:7348', 26006)
>>> parse_server_addr('[2001:db8:85a3:8d3:1319:8a2e:370:7348]')
('2001:db8:85a3:8d3:1319:8a2e:370:7348', 26000)
>>> parse_server_addr('localhost:123')
('localhost', 123)
>>> parse_server_addr('localhost:1d23')
Traceback (most recent call last):
...
ValueError: Bad address string "localhost:1d23"
"""
m = ADDR_STR_RE.match(str_addr)
if m is None:
raise ValueError('Bad address string "{0}"'.format(str_addr))
dct = m.groupdict()
port = dct.get('port')
if port is None:
port = default_port
else:
port = int(port) # Caution: could raise ValueEror or TypeError
if port == 0:
raise ValueError("Port can't be zero")
host = dct['host'] if dct['host'] else dct['host6']
return host, port | python | def parse_server_addr(str_addr, default_port=26000):
"""Parse address and returns host and port
Args:
str_addr --- string that contains server ip or hostname and optionaly
port
Returns: tuple (host, port)
Examples:
>>> parse_server_addr('127.0.0.1:26006')
('127.0.0.1', 26006)
>>> parse_server_addr('[2001:db8:85a3:8d3:1319:8a2e:370:7348]:26006')
('2001:db8:85a3:8d3:1319:8a2e:370:7348', 26006)
>>> parse_server_addr('[2001:db8:85a3:8d3:1319:8a2e:370:7348]')
('2001:db8:85a3:8d3:1319:8a2e:370:7348', 26000)
>>> parse_server_addr('localhost:123')
('localhost', 123)
>>> parse_server_addr('localhost:1d23')
Traceback (most recent call last):
...
ValueError: Bad address string "localhost:1d23"
"""
m = ADDR_STR_RE.match(str_addr)
if m is None:
raise ValueError('Bad address string "{0}"'.format(str_addr))
dct = m.groupdict()
port = dct.get('port')
if port is None:
port = default_port
else:
port = int(port) # Caution: could raise ValueEror or TypeError
if port == 0:
raise ValueError("Port can't be zero")
host = dct['host'] if dct['host'] else dct['host6']
return host, port | [
"def",
"parse_server_addr",
"(",
"str_addr",
",",
"default_port",
"=",
"26000",
")",
":",
"m",
"=",
"ADDR_STR_RE",
".",
"match",
"(",
"str_addr",
")",
"if",
"m",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Bad address string \"{0}\"'",
".",
"format",
"(",
"str_addr",
")",
")",
"dct",
"=",
"m",
".",
"groupdict",
"(",
")",
"port",
"=",
"dct",
".",
"get",
"(",
"'port'",
")",
"if",
"port",
"is",
"None",
":",
"port",
"=",
"default_port",
"else",
":",
"port",
"=",
"int",
"(",
"port",
")",
"# Caution: could raise ValueEror or TypeError",
"if",
"port",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"Port can't be zero\"",
")",
"host",
"=",
"dct",
"[",
"'host'",
"]",
"if",
"dct",
"[",
"'host'",
"]",
"else",
"dct",
"[",
"'host6'",
"]",
"return",
"host",
",",
"port"
] | Parse address and returns host and port
Args:
str_addr --- string that contains server ip or hostname and optionaly
port
Returns: tuple (host, port)
Examples:
>>> parse_server_addr('127.0.0.1:26006')
('127.0.0.1', 26006)
>>> parse_server_addr('[2001:db8:85a3:8d3:1319:8a2e:370:7348]:26006')
('2001:db8:85a3:8d3:1319:8a2e:370:7348', 26006)
>>> parse_server_addr('[2001:db8:85a3:8d3:1319:8a2e:370:7348]')
('2001:db8:85a3:8d3:1319:8a2e:370:7348', 26000)
>>> parse_server_addr('localhost:123')
('localhost', 123)
>>> parse_server_addr('localhost:1d23')
Traceback (most recent call last):
...
ValueError: Bad address string "localhost:1d23" | [
"Parse",
"address",
"and",
"returns",
"host",
"and",
"port"
] | 6a883f780265cbca31af7a379dc7cb28fdd8b73f | https://github.com/bacher09/xrcon/blob/6a883f780265cbca31af7a379dc7cb28fdd8b73f/xrcon/utils.py#L103-L142 | train |
pyQode/pyqode.cobol | pyqode/cobol/modes/goto.py | GoToDefinitionMode.request_goto | def request_goto(self, tc=None):
"""
Request a go to assignment.
:param tc: Text cursor which contains the text that we must look for
its assignment. Can be None to go to the text that is under
the text cursor.
:type tc: QtGui.QTextCursor
"""
if not tc:
tc = TextHelper(self.editor).word_under_cursor(
select_whole_word=True)
if not self._definition or isinstance(self.sender(), QAction):
self.select_word(tc)
if self._definition is not None:
QTimer.singleShot(100, self._goto_def) | python | def request_goto(self, tc=None):
"""
Request a go to assignment.
:param tc: Text cursor which contains the text that we must look for
its assignment. Can be None to go to the text that is under
the text cursor.
:type tc: QtGui.QTextCursor
"""
if not tc:
tc = TextHelper(self.editor).word_under_cursor(
select_whole_word=True)
if not self._definition or isinstance(self.sender(), QAction):
self.select_word(tc)
if self._definition is not None:
QTimer.singleShot(100, self._goto_def) | [
"def",
"request_goto",
"(",
"self",
",",
"tc",
"=",
"None",
")",
":",
"if",
"not",
"tc",
":",
"tc",
"=",
"TextHelper",
"(",
"self",
".",
"editor",
")",
".",
"word_under_cursor",
"(",
"select_whole_word",
"=",
"True",
")",
"if",
"not",
"self",
".",
"_definition",
"or",
"isinstance",
"(",
"self",
".",
"sender",
"(",
")",
",",
"QAction",
")",
":",
"self",
".",
"select_word",
"(",
"tc",
")",
"if",
"self",
".",
"_definition",
"is",
"not",
"None",
":",
"QTimer",
".",
"singleShot",
"(",
"100",
",",
"self",
".",
"_goto_def",
")"
] | Request a go to assignment.
:param tc: Text cursor which contains the text that we must look for
its assignment. Can be None to go to the text that is under
the text cursor.
:type tc: QtGui.QTextCursor | [
"Request",
"a",
"go",
"to",
"assignment",
"."
] | eedae4e320a4b2d0c44abb2c3061091321648fb7 | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/modes/goto.py#L122-L137 | train |
jasonrbriggs/proton | python/proton/template.py | get_template | def get_template(name):
"""
Return a copy of the template with the specified name. If not found, or an error occurs
during the load, return None.
"""
path = os.path.join(base_dir, name)
if path not in templates:
try:
templates[path] = Template(path)
except IOError:
return None
return copy.deepcopy(templates[path]) | python | def get_template(name):
"""
Return a copy of the template with the specified name. If not found, or an error occurs
during the load, return None.
"""
path = os.path.join(base_dir, name)
if path not in templates:
try:
templates[path] = Template(path)
except IOError:
return None
return copy.deepcopy(templates[path]) | [
"def",
"get_template",
"(",
"name",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"name",
")",
"if",
"path",
"not",
"in",
"templates",
":",
"try",
":",
"templates",
"[",
"path",
"]",
"=",
"Template",
"(",
"path",
")",
"except",
"IOError",
":",
"return",
"None",
"return",
"copy",
".",
"deepcopy",
"(",
"templates",
"[",
"path",
"]",
")"
] | Return a copy of the template with the specified name. If not found, or an error occurs
during the load, return None. | [
"Return",
"a",
"copy",
"of",
"the",
"template",
"with",
"the",
"specified",
"name",
".",
"If",
"not",
"found",
"or",
"an",
"error",
"occurs",
"during",
"the",
"load",
"return",
"None",
"."
] | e734734750797ef0caaa1680379e07b86d7a53e3 | https://github.com/jasonrbriggs/proton/blob/e734734750797ef0caaa1680379e07b86d7a53e3/python/proton/template.py#L366-L379 | train |
jasonrbriggs/proton | python/proton/template.py | Template.set_value | def set_value(self, eid, val, idx='*'):
"""
Set the content of an xml element marked with the matching eid attribute.
"""
if eid in self.__element_ids:
elems = self.__element_ids[eid]
if type(val) in SEQ_TYPES:
idx = 0
if idx == '*':
for elem in elems:
self.__set_value(eid, elem, val, idx)
elif idx < len(elems):
self.__set_value(eid, elems[idx], val, idx) | python | def set_value(self, eid, val, idx='*'):
"""
Set the content of an xml element marked with the matching eid attribute.
"""
if eid in self.__element_ids:
elems = self.__element_ids[eid]
if type(val) in SEQ_TYPES:
idx = 0
if idx == '*':
for elem in elems:
self.__set_value(eid, elem, val, idx)
elif idx < len(elems):
self.__set_value(eid, elems[idx], val, idx) | [
"def",
"set_value",
"(",
"self",
",",
"eid",
",",
"val",
",",
"idx",
"=",
"'*'",
")",
":",
"if",
"eid",
"in",
"self",
".",
"__element_ids",
":",
"elems",
"=",
"self",
".",
"__element_ids",
"[",
"eid",
"]",
"if",
"type",
"(",
"val",
")",
"in",
"SEQ_TYPES",
":",
"idx",
"=",
"0",
"if",
"idx",
"==",
"'*'",
":",
"for",
"elem",
"in",
"elems",
":",
"self",
".",
"__set_value",
"(",
"eid",
",",
"elem",
",",
"val",
",",
"idx",
")",
"elif",
"idx",
"<",
"len",
"(",
"elems",
")",
":",
"self",
".",
"__set_value",
"(",
"eid",
",",
"elems",
"[",
"idx",
"]",
",",
"val",
",",
"idx",
")"
] | Set the content of an xml element marked with the matching eid attribute. | [
"Set",
"the",
"content",
"of",
"an",
"xml",
"element",
"marked",
"with",
"the",
"matching",
"eid",
"attribute",
"."
] | e734734750797ef0caaa1680379e07b86d7a53e3 | https://github.com/jasonrbriggs/proton/blob/e734734750797ef0caaa1680379e07b86d7a53e3/python/proton/template.py#L220-L232 | train |
jasonrbriggs/proton | python/proton/template.py | Template.set_attribute | def set_attribute(self, aid, attrib, val, idx='*'):
"""
Set the value of an xml attribute marked with the matching aid attribute.
"""
if aid in self.__attrib_ids:
elems = self.__attrib_ids[aid]
if idx == '*':
for elem in elems:
self.__set_attribute(elem, attrib, val)
elif idx < len(elems):
elem = elems[idx]
self.__set_attribute(elem, attrib, val) | python | def set_attribute(self, aid, attrib, val, idx='*'):
"""
Set the value of an xml attribute marked with the matching aid attribute.
"""
if aid in self.__attrib_ids:
elems = self.__attrib_ids[aid]
if idx == '*':
for elem in elems:
self.__set_attribute(elem, attrib, val)
elif idx < len(elems):
elem = elems[idx]
self.__set_attribute(elem, attrib, val) | [
"def",
"set_attribute",
"(",
"self",
",",
"aid",
",",
"attrib",
",",
"val",
",",
"idx",
"=",
"'*'",
")",
":",
"if",
"aid",
"in",
"self",
".",
"__attrib_ids",
":",
"elems",
"=",
"self",
".",
"__attrib_ids",
"[",
"aid",
"]",
"if",
"idx",
"==",
"'*'",
":",
"for",
"elem",
"in",
"elems",
":",
"self",
".",
"__set_attribute",
"(",
"elem",
",",
"attrib",
",",
"val",
")",
"elif",
"idx",
"<",
"len",
"(",
"elems",
")",
":",
"elem",
"=",
"elems",
"[",
"idx",
"]",
"self",
".",
"__set_attribute",
"(",
"elem",
",",
"attrib",
",",
"val",
")"
] | Set the value of an xml attribute marked with the matching aid attribute. | [
"Set",
"the",
"value",
"of",
"an",
"xml",
"attribute",
"marked",
"with",
"the",
"matching",
"aid",
"attribute",
"."
] | e734734750797ef0caaa1680379e07b86d7a53e3 | https://github.com/jasonrbriggs/proton/blob/e734734750797ef0caaa1680379e07b86d7a53e3/python/proton/template.py#L244-L255 | train |
jasonrbriggs/proton | python/proton/template.py | Template.hide | def hide(self, eid, index=0):
"""
Hide the element with the matching eid. If no match, look for an element with a matching rid.
"""
elems = None
if eid in self.__element_ids:
elems = self.__element_ids[eid]
elif eid in self.__repeat_ids:
elems = self.__repeat_ids[eid]
if elems and index < len(elems):
elem = elems[index]
elem.parent.children.remove(elem) | python | def hide(self, eid, index=0):
"""
Hide the element with the matching eid. If no match, look for an element with a matching rid.
"""
elems = None
if eid in self.__element_ids:
elems = self.__element_ids[eid]
elif eid in self.__repeat_ids:
elems = self.__repeat_ids[eid]
if elems and index < len(elems):
elem = elems[index]
elem.parent.children.remove(elem) | [
"def",
"hide",
"(",
"self",
",",
"eid",
",",
"index",
"=",
"0",
")",
":",
"elems",
"=",
"None",
"if",
"eid",
"in",
"self",
".",
"__element_ids",
":",
"elems",
"=",
"self",
".",
"__element_ids",
"[",
"eid",
"]",
"elif",
"eid",
"in",
"self",
".",
"__repeat_ids",
":",
"elems",
"=",
"self",
".",
"__repeat_ids",
"[",
"eid",
"]",
"if",
"elems",
"and",
"index",
"<",
"len",
"(",
"elems",
")",
":",
"elem",
"=",
"elems",
"[",
"index",
"]",
"elem",
".",
"parent",
".",
"children",
".",
"remove",
"(",
"elem",
")"
] | Hide the element with the matching eid. If no match, look for an element with a matching rid. | [
"Hide",
"the",
"element",
"with",
"the",
"matching",
"eid",
".",
"If",
"no",
"match",
"look",
"for",
"an",
"element",
"with",
"a",
"matching",
"rid",
"."
] | e734734750797ef0caaa1680379e07b86d7a53e3 | https://github.com/jasonrbriggs/proton/blob/e734734750797ef0caaa1680379e07b86d7a53e3/python/proton/template.py#L279-L291 | train |
jasonrbriggs/proton | python/proton/template.py | Template.repeat | def repeat(self, rid, count, index=0):
"""
Repeat an xml element marked with the matching rid.
"""
elems = None
if rid in self.__repeat_ids:
elems = self.__repeat_ids[rid]
elif rid in self.__element_ids:
elems = self.__element_ids
if elems and index < len(elems):
elem = elems[index]
self.__repeat(elem, count) | python | def repeat(self, rid, count, index=0):
"""
Repeat an xml element marked with the matching rid.
"""
elems = None
if rid in self.__repeat_ids:
elems = self.__repeat_ids[rid]
elif rid in self.__element_ids:
elems = self.__element_ids
if elems and index < len(elems):
elem = elems[index]
self.__repeat(elem, count) | [
"def",
"repeat",
"(",
"self",
",",
"rid",
",",
"count",
",",
"index",
"=",
"0",
")",
":",
"elems",
"=",
"None",
"if",
"rid",
"in",
"self",
".",
"__repeat_ids",
":",
"elems",
"=",
"self",
".",
"__repeat_ids",
"[",
"rid",
"]",
"elif",
"rid",
"in",
"self",
".",
"__element_ids",
":",
"elems",
"=",
"self",
".",
"__element_ids",
"if",
"elems",
"and",
"index",
"<",
"len",
"(",
"elems",
")",
":",
"elem",
"=",
"elems",
"[",
"index",
"]",
"self",
".",
"__repeat",
"(",
"elem",
",",
"count",
")"
] | Repeat an xml element marked with the matching rid. | [
"Repeat",
"an",
"xml",
"element",
"marked",
"with",
"the",
"matching",
"rid",
"."
] | e734734750797ef0caaa1680379e07b86d7a53e3 | https://github.com/jasonrbriggs/proton/blob/e734734750797ef0caaa1680379e07b86d7a53e3/python/proton/template.py#L293-L305 | train |
jasonrbriggs/proton | python/proton/template.py | Template.replace | def replace(self, eid, replacement, index=0):
"""
Replace an xml element marked with the matching eid. If the replacement value is an Element or TextElement,
it's swapped in untouched. If it's a Template, the children of the root element in the template are used.
Otherwise the replacement value is wrapped with a TextElement.
"""
if eid in self.__element_ids:
elems = self.__element_ids[eid]
elif eid in self.__repeat_ids:
elems = self.__repeat_ids[eid]
else:
return
if index < len(elems):
elem = elems[index]
current_pos = elem.parent.children.index(elem)
elem.parent.children.remove(elem)
replacement_type = type(replacement)
if replacement_type in (Element, TextElement):
self.check_element(replacement, True)
elem.parent.children.insert(current_pos, replacement)
replacement.parent = elem.parent
elif replacement_type == Template:
for child in replacement.root.children:
elem.parent.children.insert(current_pos, child)
child.parent = elem.parent
current_pos += 1
self.__merge_ids(self.__element_ids, replacement.__element_ids)
self.__merge_ids(self.__attrib_ids, replacement.__attrib_ids)
self.__merge_ids(self.__repeat_ids, replacement.__repeat_ids)
else:
elem.parent.children.insert(current_pos, TextElement(replacement)) | python | def replace(self, eid, replacement, index=0):
"""
Replace an xml element marked with the matching eid. If the replacement value is an Element or TextElement,
it's swapped in untouched. If it's a Template, the children of the root element in the template are used.
Otherwise the replacement value is wrapped with a TextElement.
"""
if eid in self.__element_ids:
elems = self.__element_ids[eid]
elif eid in self.__repeat_ids:
elems = self.__repeat_ids[eid]
else:
return
if index < len(elems):
elem = elems[index]
current_pos = elem.parent.children.index(elem)
elem.parent.children.remove(elem)
replacement_type = type(replacement)
if replacement_type in (Element, TextElement):
self.check_element(replacement, True)
elem.parent.children.insert(current_pos, replacement)
replacement.parent = elem.parent
elif replacement_type == Template:
for child in replacement.root.children:
elem.parent.children.insert(current_pos, child)
child.parent = elem.parent
current_pos += 1
self.__merge_ids(self.__element_ids, replacement.__element_ids)
self.__merge_ids(self.__attrib_ids, replacement.__attrib_ids)
self.__merge_ids(self.__repeat_ids, replacement.__repeat_ids)
else:
elem.parent.children.insert(current_pos, TextElement(replacement)) | [
"def",
"replace",
"(",
"self",
",",
"eid",
",",
"replacement",
",",
"index",
"=",
"0",
")",
":",
"if",
"eid",
"in",
"self",
".",
"__element_ids",
":",
"elems",
"=",
"self",
".",
"__element_ids",
"[",
"eid",
"]",
"elif",
"eid",
"in",
"self",
".",
"__repeat_ids",
":",
"elems",
"=",
"self",
".",
"__repeat_ids",
"[",
"eid",
"]",
"else",
":",
"return",
"if",
"index",
"<",
"len",
"(",
"elems",
")",
":",
"elem",
"=",
"elems",
"[",
"index",
"]",
"current_pos",
"=",
"elem",
".",
"parent",
".",
"children",
".",
"index",
"(",
"elem",
")",
"elem",
".",
"parent",
".",
"children",
".",
"remove",
"(",
"elem",
")",
"replacement_type",
"=",
"type",
"(",
"replacement",
")",
"if",
"replacement_type",
"in",
"(",
"Element",
",",
"TextElement",
")",
":",
"self",
".",
"check_element",
"(",
"replacement",
",",
"True",
")",
"elem",
".",
"parent",
".",
"children",
".",
"insert",
"(",
"current_pos",
",",
"replacement",
")",
"replacement",
".",
"parent",
"=",
"elem",
".",
"parent",
"elif",
"replacement_type",
"==",
"Template",
":",
"for",
"child",
"in",
"replacement",
".",
"root",
".",
"children",
":",
"elem",
".",
"parent",
".",
"children",
".",
"insert",
"(",
"current_pos",
",",
"child",
")",
"child",
".",
"parent",
"=",
"elem",
".",
"parent",
"current_pos",
"+=",
"1",
"self",
".",
"__merge_ids",
"(",
"self",
".",
"__element_ids",
",",
"replacement",
".",
"__element_ids",
")",
"self",
".",
"__merge_ids",
"(",
"self",
".",
"__attrib_ids",
",",
"replacement",
".",
"__attrib_ids",
")",
"self",
".",
"__merge_ids",
"(",
"self",
".",
"__repeat_ids",
",",
"replacement",
".",
"__repeat_ids",
")",
"else",
":",
"elem",
".",
"parent",
".",
"children",
".",
"insert",
"(",
"current_pos",
",",
"TextElement",
"(",
"replacement",
")",
")"
] | Replace an xml element marked with the matching eid. If the replacement value is an Element or TextElement,
it's swapped in untouched. If it's a Template, the children of the root element in the template are used.
Otherwise the replacement value is wrapped with a TextElement. | [
"Replace",
"an",
"xml",
"element",
"marked",
"with",
"the",
"matching",
"eid",
".",
"If",
"the",
"replacement",
"value",
"is",
"an",
"Element",
"or",
"TextElement",
"it",
"s",
"swapped",
"in",
"untouched",
".",
"If",
"it",
"s",
"a",
"Template",
"the",
"children",
"of",
"the",
"root",
"element",
"in",
"the",
"template",
"are",
"used",
".",
"Otherwise",
"the",
"replacement",
"value",
"is",
"wrapped",
"with",
"a",
"TextElement",
"."
] | e734734750797ef0caaa1680379e07b86d7a53e3 | https://github.com/jasonrbriggs/proton/blob/e734734750797ef0caaa1680379e07b86d7a53e3/python/proton/template.py#L316-L347 | train |
jpscaletti/authcode | authcode/auth.py | Auth.set_hasher | def set_hasher(self, hash, rounds=None):
"""Updates the has algorithm and, optionally, the number of rounds
to use.
Raises:
`~WrongHashAlgorithm` if new algorithm isn't one of the three
recomended options.
"""
hash = hash.replace('-', '_')
if hash not in VALID_HASHERS:
raise WrongHashAlgorithm(WRONG_HASH_MESSAGE)
hasher = getattr(ph, hash)
utils.test_hasher(hasher)
default_rounds = getattr(hasher, 'default_rounds', 1)
min_rounds = getattr(hasher, 'min_rounds', 1)
max_rounds = getattr(hasher, 'max_rounds', float("inf"))
rounds = min(max(rounds or default_rounds, min_rounds), max_rounds)
op = {
'schemes': VALID_HASHERS + DEPRECATED_HASHERS,
'deprecated': DEPRECATED_HASHERS,
'default': hash,
hash + '__default_rounds': rounds
}
self.hasher = CryptContext(**op)
self.hash = hash.replace('_', '-') # For testing
self.rounds = rounds | python | def set_hasher(self, hash, rounds=None):
"""Updates the has algorithm and, optionally, the number of rounds
to use.
Raises:
`~WrongHashAlgorithm` if new algorithm isn't one of the three
recomended options.
"""
hash = hash.replace('-', '_')
if hash not in VALID_HASHERS:
raise WrongHashAlgorithm(WRONG_HASH_MESSAGE)
hasher = getattr(ph, hash)
utils.test_hasher(hasher)
default_rounds = getattr(hasher, 'default_rounds', 1)
min_rounds = getattr(hasher, 'min_rounds', 1)
max_rounds = getattr(hasher, 'max_rounds', float("inf"))
rounds = min(max(rounds or default_rounds, min_rounds), max_rounds)
op = {
'schemes': VALID_HASHERS + DEPRECATED_HASHERS,
'deprecated': DEPRECATED_HASHERS,
'default': hash,
hash + '__default_rounds': rounds
}
self.hasher = CryptContext(**op)
self.hash = hash.replace('_', '-') # For testing
self.rounds = rounds | [
"def",
"set_hasher",
"(",
"self",
",",
"hash",
",",
"rounds",
"=",
"None",
")",
":",
"hash",
"=",
"hash",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
"if",
"hash",
"not",
"in",
"VALID_HASHERS",
":",
"raise",
"WrongHashAlgorithm",
"(",
"WRONG_HASH_MESSAGE",
")",
"hasher",
"=",
"getattr",
"(",
"ph",
",",
"hash",
")",
"utils",
".",
"test_hasher",
"(",
"hasher",
")",
"default_rounds",
"=",
"getattr",
"(",
"hasher",
",",
"'default_rounds'",
",",
"1",
")",
"min_rounds",
"=",
"getattr",
"(",
"hasher",
",",
"'min_rounds'",
",",
"1",
")",
"max_rounds",
"=",
"getattr",
"(",
"hasher",
",",
"'max_rounds'",
",",
"float",
"(",
"\"inf\"",
")",
")",
"rounds",
"=",
"min",
"(",
"max",
"(",
"rounds",
"or",
"default_rounds",
",",
"min_rounds",
")",
",",
"max_rounds",
")",
"op",
"=",
"{",
"'schemes'",
":",
"VALID_HASHERS",
"+",
"DEPRECATED_HASHERS",
",",
"'deprecated'",
":",
"DEPRECATED_HASHERS",
",",
"'default'",
":",
"hash",
",",
"hash",
"+",
"'__default_rounds'",
":",
"rounds",
"}",
"self",
".",
"hasher",
"=",
"CryptContext",
"(",
"*",
"*",
"op",
")",
"self",
".",
"hash",
"=",
"hash",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
"# For testing",
"self",
".",
"rounds",
"=",
"rounds"
] | Updates the has algorithm and, optionally, the number of rounds
to use.
Raises:
`~WrongHashAlgorithm` if new algorithm isn't one of the three
recomended options. | [
"Updates",
"the",
"has",
"algorithm",
"and",
"optionally",
"the",
"number",
"of",
"rounds",
"to",
"use",
"."
] | 91529b6d0caec07d1452758d937e1e0745826139 | https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/auth.py#L140-L167 | train |
klmitch/turnstile | turnstile/config.py | Config.to_bool | def to_bool(value, do_raise=True):
"""Convert a string to a boolean value.
If the string consists of digits, the integer value of the string
is coerced to a boolean value. Otherwise, any of the strings "t",
"true", "on", "y", and "yes" are considered True and any of the
strings "f", "false", "off", "n", and "no" are considered False.
A ValueError will be raised for any other value.
"""
value = value.lower()
# Try it as an integer
if value.isdigit():
return bool(int(value))
# OK, check it against the true/false values...
if value in _str_true:
return True
elif value in _str_false:
return False
# Not recognized
if do_raise:
raise ValueError("invalid literal for to_bool(): %r" % value)
return False | python | def to_bool(value, do_raise=True):
"""Convert a string to a boolean value.
If the string consists of digits, the integer value of the string
is coerced to a boolean value. Otherwise, any of the strings "t",
"true", "on", "y", and "yes" are considered True and any of the
strings "f", "false", "off", "n", and "no" are considered False.
A ValueError will be raised for any other value.
"""
value = value.lower()
# Try it as an integer
if value.isdigit():
return bool(int(value))
# OK, check it against the true/false values...
if value in _str_true:
return True
elif value in _str_false:
return False
# Not recognized
if do_raise:
raise ValueError("invalid literal for to_bool(): %r" % value)
return False | [
"def",
"to_bool",
"(",
"value",
",",
"do_raise",
"=",
"True",
")",
":",
"value",
"=",
"value",
".",
"lower",
"(",
")",
"# Try it as an integer",
"if",
"value",
".",
"isdigit",
"(",
")",
":",
"return",
"bool",
"(",
"int",
"(",
"value",
")",
")",
"# OK, check it against the true/false values...",
"if",
"value",
"in",
"_str_true",
":",
"return",
"True",
"elif",
"value",
"in",
"_str_false",
":",
"return",
"False",
"# Not recognized",
"if",
"do_raise",
":",
"raise",
"ValueError",
"(",
"\"invalid literal for to_bool(): %r\"",
"%",
"value",
")",
"return",
"False"
] | Convert a string to a boolean value.
If the string consists of digits, the integer value of the string
is coerced to a boolean value. Otherwise, any of the strings "t",
"true", "on", "y", and "yes" are considered True and any of the
strings "f", "false", "off", "n", and "no" are considered False.
A ValueError will be raised for any other value. | [
"Convert",
"a",
"string",
"to",
"a",
"boolean",
"value",
"."
] | 8fe9a359b45e505d3192ab193ecf9be177ab1a17 | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/config.py#L228-L254 | train |
BernardFW/bernard | src/bernard/layers/definitions.py | BaseLayer.become | async def become(self, layer_type: Type[L], request: 'Request') -> L:
"""
Transform this layer into another layer type
"""
raise ValueError('Cannot become "{}"'.format(layer_type.__name__)) | python | async def become(self, layer_type: Type[L], request: 'Request') -> L:
"""
Transform this layer into another layer type
"""
raise ValueError('Cannot become "{}"'.format(layer_type.__name__)) | [
"async",
"def",
"become",
"(",
"self",
",",
"layer_type",
":",
"Type",
"[",
"L",
"]",
",",
"request",
":",
"'Request'",
")",
"->",
"L",
":",
"raise",
"ValueError",
"(",
"'Cannot become \"{}\"'",
".",
"format",
"(",
"layer_type",
".",
"__name__",
")",
")"
] | Transform this layer into another layer type | [
"Transform",
"this",
"layer",
"into",
"another",
"layer",
"type"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/layers/definitions.py#L75-L80 | train |
BernardFW/bernard | src/bernard/layers/definitions.py | Text.become | async def become(self, layer_type: Type[L], request: 'Request'):
"""
Transforms the translatable string into an actual string and put it
inside a RawText.
"""
if layer_type != RawText:
super(Text, self).become(layer_type, request)
return RawText(await render(self.text, request)) | python | async def become(self, layer_type: Type[L], request: 'Request'):
"""
Transforms the translatable string into an actual string and put it
inside a RawText.
"""
if layer_type != RawText:
super(Text, self).become(layer_type, request)
return RawText(await render(self.text, request)) | [
"async",
"def",
"become",
"(",
"self",
",",
"layer_type",
":",
"Type",
"[",
"L",
"]",
",",
"request",
":",
"'Request'",
")",
":",
"if",
"layer_type",
"!=",
"RawText",
":",
"super",
"(",
"Text",
",",
"self",
")",
".",
"become",
"(",
"layer_type",
",",
"request",
")",
"return",
"RawText",
"(",
"await",
"render",
"(",
"self",
".",
"text",
",",
"request",
")",
")"
] | Transforms the translatable string into an actual string and put it
inside a RawText. | [
"Transforms",
"the",
"translatable",
"string",
"into",
"an",
"actual",
"string",
"and",
"put",
"it",
"inside",
"a",
"RawText",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/layers/definitions.py#L110-L118 | train |
BernardFW/bernard | src/bernard/engine/fsm.py | FSM._make_register | def _make_register(self) -> BaseRegisterStore:
"""
Make the register storage.
"""
s = settings.REGISTER_STORE
store_class = import_class(s['class'])
return store_class(**s['params']) | python | def _make_register(self) -> BaseRegisterStore:
"""
Make the register storage.
"""
s = settings.REGISTER_STORE
store_class = import_class(s['class'])
return store_class(**s['params']) | [
"def",
"_make_register",
"(",
"self",
")",
"->",
"BaseRegisterStore",
":",
"s",
"=",
"settings",
".",
"REGISTER_STORE",
"store_class",
"=",
"import_class",
"(",
"s",
"[",
"'class'",
"]",
")",
"return",
"store_class",
"(",
"*",
"*",
"s",
"[",
"'params'",
"]",
")"
] | Make the register storage. | [
"Make",
"the",
"register",
"storage",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/fsm.py#L141-L148 | train |
BernardFW/bernard | src/bernard/engine/fsm.py | FSM._make_transitions | def _make_transitions(self) -> List[Transition]:
"""
Load the transitions file.
"""
module_name = settings.TRANSITIONS_MODULE
module_ = importlib.import_module(module_name)
return module_.transitions | python | def _make_transitions(self) -> List[Transition]:
"""
Load the transitions file.
"""
module_name = settings.TRANSITIONS_MODULE
module_ = importlib.import_module(module_name)
return module_.transitions | [
"def",
"_make_transitions",
"(",
"self",
")",
"->",
"List",
"[",
"Transition",
"]",
":",
"module_name",
"=",
"settings",
".",
"TRANSITIONS_MODULE",
"module_",
"=",
"importlib",
".",
"import_module",
"(",
"module_name",
")",
"return",
"module_",
".",
"transitions"
] | Load the transitions file. | [
"Load",
"the",
"transitions",
"file",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/fsm.py#L150-L157 | train |
BernardFW/bernard | src/bernard/engine/fsm.py | FSM._make_allowed_states | def _make_allowed_states(self) -> Iterator[Text]:
"""
Sometimes we load states from the database. In order to avoid loading
an arbitrary class, we list here the state classes that are allowed.
"""
for trans in self.transitions:
yield trans.dest.name()
if trans.origin:
yield trans.origin.name() | python | def _make_allowed_states(self) -> Iterator[Text]:
"""
Sometimes we load states from the database. In order to avoid loading
an arbitrary class, we list here the state classes that are allowed.
"""
for trans in self.transitions:
yield trans.dest.name()
if trans.origin:
yield trans.origin.name() | [
"def",
"_make_allowed_states",
"(",
"self",
")",
"->",
"Iterator",
"[",
"Text",
"]",
":",
"for",
"trans",
"in",
"self",
".",
"transitions",
":",
"yield",
"trans",
".",
"dest",
".",
"name",
"(",
")",
"if",
"trans",
".",
"origin",
":",
"yield",
"trans",
".",
"origin",
".",
"name",
"(",
")"
] | Sometimes we load states from the database. In order to avoid loading
an arbitrary class, we list here the state classes that are allowed. | [
"Sometimes",
"we",
"load",
"states",
"from",
"the",
"database",
".",
"In",
"order",
"to",
"avoid",
"loading",
"an",
"arbitrary",
"class",
"we",
"list",
"here",
"the",
"state",
"classes",
"that",
"are",
"allowed",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/fsm.py#L159-L169 | train |
BernardFW/bernard | src/bernard/engine/fsm.py | FSM._find_trigger | async def _find_trigger(self,
request: Request,
origin: Optional[Text]=None,
internal: bool=False) \
-> Tuple[
Optional[BaseTrigger],
Optional[Type[BaseState]],
Optional[bool],
]:
"""
Find the best trigger for this request, or go away.
"""
reg = request.register
if not origin:
origin = reg.get(Register.STATE)
logger.debug('From state: %s', origin)
results = await asyncio.gather(*(
x.rank(request, origin)
for x
in self.transitions
if x.internal == internal
))
if len(results):
score, trigger, state, dnr = max(results, key=lambda x: x[0])
if score >= settings.MINIMAL_TRIGGER_SCORE:
return trigger, state, dnr
return None, None, None | python | async def _find_trigger(self,
request: Request,
origin: Optional[Text]=None,
internal: bool=False) \
-> Tuple[
Optional[BaseTrigger],
Optional[Type[BaseState]],
Optional[bool],
]:
"""
Find the best trigger for this request, or go away.
"""
reg = request.register
if not origin:
origin = reg.get(Register.STATE)
logger.debug('From state: %s', origin)
results = await asyncio.gather(*(
x.rank(request, origin)
for x
in self.transitions
if x.internal == internal
))
if len(results):
score, trigger, state, dnr = max(results, key=lambda x: x[0])
if score >= settings.MINIMAL_TRIGGER_SCORE:
return trigger, state, dnr
return None, None, None | [
"async",
"def",
"_find_trigger",
"(",
"self",
",",
"request",
":",
"Request",
",",
"origin",
":",
"Optional",
"[",
"Text",
"]",
"=",
"None",
",",
"internal",
":",
"bool",
"=",
"False",
")",
"->",
"Tuple",
"[",
"Optional",
"[",
"BaseTrigger",
"]",
",",
"Optional",
"[",
"Type",
"[",
"BaseState",
"]",
"]",
",",
"Optional",
"[",
"bool",
"]",
",",
"]",
":",
"reg",
"=",
"request",
".",
"register",
"if",
"not",
"origin",
":",
"origin",
"=",
"reg",
".",
"get",
"(",
"Register",
".",
"STATE",
")",
"logger",
".",
"debug",
"(",
"'From state: %s'",
",",
"origin",
")",
"results",
"=",
"await",
"asyncio",
".",
"gather",
"(",
"*",
"(",
"x",
".",
"rank",
"(",
"request",
",",
"origin",
")",
"for",
"x",
"in",
"self",
".",
"transitions",
"if",
"x",
".",
"internal",
"==",
"internal",
")",
")",
"if",
"len",
"(",
"results",
")",
":",
"score",
",",
"trigger",
",",
"state",
",",
"dnr",
"=",
"max",
"(",
"results",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
"if",
"score",
">=",
"settings",
".",
"MINIMAL_TRIGGER_SCORE",
":",
"return",
"trigger",
",",
"state",
",",
"dnr",
"return",
"None",
",",
"None",
",",
"None"
] | Find the best trigger for this request, or go away. | [
"Find",
"the",
"best",
"trigger",
"for",
"this",
"request",
"or",
"go",
"away",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/fsm.py#L171-L203 | train |
BernardFW/bernard | src/bernard/engine/fsm.py | FSM._confused_state | def _confused_state(self, request: Request) -> Type[BaseState]:
"""
If we're confused, find which state to call.
"""
origin = request.register.get(Register.STATE)
if origin in self._allowed_states:
try:
return import_class(origin)
except (AttributeError, ImportError):
pass
return import_class(settings.DEFAULT_STATE) | python | def _confused_state(self, request: Request) -> Type[BaseState]:
"""
If we're confused, find which state to call.
"""
origin = request.register.get(Register.STATE)
if origin in self._allowed_states:
try:
return import_class(origin)
except (AttributeError, ImportError):
pass
return import_class(settings.DEFAULT_STATE) | [
"def",
"_confused_state",
"(",
"self",
",",
"request",
":",
"Request",
")",
"->",
"Type",
"[",
"BaseState",
"]",
":",
"origin",
"=",
"request",
".",
"register",
".",
"get",
"(",
"Register",
".",
"STATE",
")",
"if",
"origin",
"in",
"self",
".",
"_allowed_states",
":",
"try",
":",
"return",
"import_class",
"(",
"origin",
")",
"except",
"(",
"AttributeError",
",",
"ImportError",
")",
":",
"pass",
"return",
"import_class",
"(",
"settings",
".",
"DEFAULT_STATE",
")"
] | If we're confused, find which state to call. | [
"If",
"we",
"re",
"confused",
"find",
"which",
"state",
"to",
"call",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/fsm.py#L206-L219 | train |
BernardFW/bernard | src/bernard/engine/fsm.py | FSM._build_state | async def _build_state(self,
request: Request,
message: BaseMessage,
responder: Responder) \
-> Tuple[
Optional[BaseState],
Optional[BaseTrigger],
Optional[bool],
]:
"""
Build the state for this request.
"""
trigger, state_class, dnr = await self._find_trigger(request)
if trigger is None:
if not message.should_confuse():
return None, None, None
state_class = self._confused_state(request)
logger.debug('Next state: %s (confused)', state_class.name())
else:
logger.debug('Next state: %s', state_class.name())
state = state_class(request, responder, trigger, trigger)
return state, trigger, dnr | python | async def _build_state(self,
request: Request,
message: BaseMessage,
responder: Responder) \
-> Tuple[
Optional[BaseState],
Optional[BaseTrigger],
Optional[bool],
]:
"""
Build the state for this request.
"""
trigger, state_class, dnr = await self._find_trigger(request)
if trigger is None:
if not message.should_confuse():
return None, None, None
state_class = self._confused_state(request)
logger.debug('Next state: %s (confused)', state_class.name())
else:
logger.debug('Next state: %s', state_class.name())
state = state_class(request, responder, trigger, trigger)
return state, trigger, dnr | [
"async",
"def",
"_build_state",
"(",
"self",
",",
"request",
":",
"Request",
",",
"message",
":",
"BaseMessage",
",",
"responder",
":",
"Responder",
")",
"->",
"Tuple",
"[",
"Optional",
"[",
"BaseState",
"]",
",",
"Optional",
"[",
"BaseTrigger",
"]",
",",
"Optional",
"[",
"bool",
"]",
",",
"]",
":",
"trigger",
",",
"state_class",
",",
"dnr",
"=",
"await",
"self",
".",
"_find_trigger",
"(",
"request",
")",
"if",
"trigger",
"is",
"None",
":",
"if",
"not",
"message",
".",
"should_confuse",
"(",
")",
":",
"return",
"None",
",",
"None",
",",
"None",
"state_class",
"=",
"self",
".",
"_confused_state",
"(",
"request",
")",
"logger",
".",
"debug",
"(",
"'Next state: %s (confused)'",
",",
"state_class",
".",
"name",
"(",
")",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"'Next state: %s'",
",",
"state_class",
".",
"name",
"(",
")",
")",
"state",
"=",
"state_class",
"(",
"request",
",",
"responder",
",",
"trigger",
",",
"trigger",
")",
"return",
"state",
",",
"trigger",
",",
"dnr"
] | Build the state for this request. | [
"Build",
"the",
"state",
"for",
"this",
"request",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/fsm.py#L221-L245 | train |
BernardFW/bernard | src/bernard/engine/fsm.py | FSM._run_state | async def _run_state(self, responder, state, trigger, request) \
-> BaseState:
"""
Execute the state, or if execution fails handle it.
"""
user_trigger = trigger
# noinspection PyBroadException
try:
if trigger:
await state.handle()
else:
await state.confused()
for i in range(0, settings.MAX_INTERNAL_JUMPS + 1):
if i == settings.MAX_INTERNAL_JUMPS:
raise MaxInternalJump()
trigger, state_class, dnr = \
await self._find_trigger(request, state.name(), True)
if not trigger:
break
logger.debug('Jumping to state: %s', state_class.name())
state = state_class(request, responder, trigger, user_trigger)
await state.handle()
except Exception:
logger.exception('Error while handling state "%s"', state.name())
responder.clear()
reporter.report(request, state.name())
await state.error()
return state | python | async def _run_state(self, responder, state, trigger, request) \
-> BaseState:
"""
Execute the state, or if execution fails handle it.
"""
user_trigger = trigger
# noinspection PyBroadException
try:
if trigger:
await state.handle()
else:
await state.confused()
for i in range(0, settings.MAX_INTERNAL_JUMPS + 1):
if i == settings.MAX_INTERNAL_JUMPS:
raise MaxInternalJump()
trigger, state_class, dnr = \
await self._find_trigger(request, state.name(), True)
if not trigger:
break
logger.debug('Jumping to state: %s', state_class.name())
state = state_class(request, responder, trigger, user_trigger)
await state.handle()
except Exception:
logger.exception('Error while handling state "%s"', state.name())
responder.clear()
reporter.report(request, state.name())
await state.error()
return state | [
"async",
"def",
"_run_state",
"(",
"self",
",",
"responder",
",",
"state",
",",
"trigger",
",",
"request",
")",
"->",
"BaseState",
":",
"user_trigger",
"=",
"trigger",
"# noinspection PyBroadException",
"try",
":",
"if",
"trigger",
":",
"await",
"state",
".",
"handle",
"(",
")",
"else",
":",
"await",
"state",
".",
"confused",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"settings",
".",
"MAX_INTERNAL_JUMPS",
"+",
"1",
")",
":",
"if",
"i",
"==",
"settings",
".",
"MAX_INTERNAL_JUMPS",
":",
"raise",
"MaxInternalJump",
"(",
")",
"trigger",
",",
"state_class",
",",
"dnr",
"=",
"await",
"self",
".",
"_find_trigger",
"(",
"request",
",",
"state",
".",
"name",
"(",
")",
",",
"True",
")",
"if",
"not",
"trigger",
":",
"break",
"logger",
".",
"debug",
"(",
"'Jumping to state: %s'",
",",
"state_class",
".",
"name",
"(",
")",
")",
"state",
"=",
"state_class",
"(",
"request",
",",
"responder",
",",
"trigger",
",",
"user_trigger",
")",
"await",
"state",
".",
"handle",
"(",
")",
"except",
"Exception",
":",
"logger",
".",
"exception",
"(",
"'Error while handling state \"%s\"'",
",",
"state",
".",
"name",
"(",
")",
")",
"responder",
".",
"clear",
"(",
")",
"reporter",
".",
"report",
"(",
"request",
",",
"state",
".",
"name",
"(",
")",
")",
"await",
"state",
".",
"error",
"(",
")",
"return",
"state"
] | Execute the state, or if execution fails handle it. | [
"Execute",
"the",
"state",
"or",
"if",
"execution",
"fails",
"handle",
"it",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/fsm.py#L247-L281 | train |
BernardFW/bernard | src/bernard/engine/fsm.py | FSM._build_state_register | async def _build_state_register(self,
state: BaseState,
request: Request,
responder: Responder) -> Dict:
"""
Build the next register to store.
- The state is the name of the current state
- The transition is made by all successive layers present in the
response.
"""
return {
Register.STATE: state.name(),
Register.TRANSITION:
await responder.make_transition_register(request),
} | python | async def _build_state_register(self,
state: BaseState,
request: Request,
responder: Responder) -> Dict:
"""
Build the next register to store.
- The state is the name of the current state
- The transition is made by all successive layers present in the
response.
"""
return {
Register.STATE: state.name(),
Register.TRANSITION:
await responder.make_transition_register(request),
} | [
"async",
"def",
"_build_state_register",
"(",
"self",
",",
"state",
":",
"BaseState",
",",
"request",
":",
"Request",
",",
"responder",
":",
"Responder",
")",
"->",
"Dict",
":",
"return",
"{",
"Register",
".",
"STATE",
":",
"state",
".",
"name",
"(",
")",
",",
"Register",
".",
"TRANSITION",
":",
"await",
"responder",
".",
"make_transition_register",
"(",
"request",
")",
",",
"}"
] | Build the next register to store.
- The state is the name of the current state
- The transition is made by all successive layers present in the
response. | [
"Build",
"the",
"next",
"register",
"to",
"store",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/fsm.py#L283-L299 | train |
kata198/python-subprocess2 | subprocess2/simple.py | Simple.runGetResults | def runGetResults(cmd, stdout=True, stderr=True, encoding=sys.getdefaultencoding()):
'''
runGetResults - Simple method to run a command and return the results of the execution as a dict.
@param cmd <str/list> - String of command and arguments, or list of command and arguments
If cmd is a string, the command will be executed as if ran exactly as written in a shell. This mode supports shell-isms like '&&' and '|'
If cmd is a list, the first element will be the executable, and further elements are arguments that will be passed to that executable.
@param stdout <True/False> - Default True, Whether to gather and include program's stdout data in results.
If False, that data the program prints to stdout will just be output to the current tty and not recorded.
If True, it will NOT be output to the tty, and will be recorded under the key "stdout" in the return dict.
@param stderr <True/False or "stdout"/subprocess.STDOUT> - Default True, Whether to gather and include program's stderr data in results, or to combine with "stdout" data.
If False, the data the program prints to stderr will just be output to the current tty and not recorded
If True, it will NOT be output to the tty, and will be recorded under the key "stderr" in the return dict.
If "stdout" or subprocess.STDOUT - stderr data will be blended with stdout data. This requires that stdout=True.
@param encoding <None/str> - Default sys.getdefaultencoding(), the program's output will automatically be decoded using the provided codec (e.x. "utf-8" or "ascii").
If None or False-ish, data will not be decoded (i.e. in python3 will be "bytes" type)
If unsure, leave this as it's default value, or provide "utf-8"
@return <dict> - Dict of results. Has following keys:
'returnCode' - <int> - Always present, included the integer return-code from the command.
'stdout' <unciode/str/bytes (depending on #encoding)> - Present if stdout=True, contains data output by program to stdout, or stdout+stderr if stderr param is "stdout"/subprocess.STDOUT
'stderr' <unicode/str/bytes (depending on #encoding)> - Present if stderr=True, contains data output by program to stderr.
@raises - SimpleCommandFailure if it cannot launch the given command, for reasons such as: cannot find the executable, or no permission to execute, etc
'''
if stderr in ('stdout', subprocess.STDOUT):
stderr = subprocess.STDOUT
elif stderr == True or stderr == subprocess.PIPE:
stderr = subprocess.PIPE
else:
stderr = None
if stdout == True or stdout == subprocess.STDOUT:
stdout = subprocess.PIPE
else:
stdout = None
if stderr == subprocess.PIPE:
raise ValueError('Cannot redirect stderr to stdout if stdout is not captured.')
if issubclass(cmd.__class__, (list, tuple)):
shell = False
else:
shell = True
try:
pipe = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, shell=shell)
except Exception as e:
try:
if shell is True:
cmdStr = ' '.join(cmd)
else:
cmdStr = cmd
except:
cmdStr = repr(cmd)
raise SimpleCommandFailure('Failed to execute "%s": %s' %(cmdStr, str(e)), returnCode=255)
streams = []
fileNoToKey = {}
ret = {}
if stdout == subprocess.PIPE:
streams.append(pipe.stdout)
fileNoToKey[pipe.stdout.fileno()] = 'stdout'
ret['stdout'] = []
if stderr == subprocess.PIPE:
streams.append(pipe.stderr)
fileNoToKey[pipe.stderr.fileno()] = 'stderr'
ret['stderr'] = []
returnCode = None
time.sleep(.02)
while returnCode is None or streams:
returnCode = pipe.poll()
while True:
(readyToRead, junk1, junk2) = select.select(streams, [], [], .005)
if not readyToRead:
# Don't strangle CPU
time.sleep(.01)
break
for readyStream in readyToRead:
retKey = fileNoToKey[readyStream.fileno()]
curRead = readyStream.read()
if curRead in (b'', ''):
streams.remove(readyStream)
continue
ret[retKey].append(curRead)
for key in list(ret.keys()):
ret[key] = b''.join(ret[key])
if encoding:
ret[key] = ret[key].decode(encoding)
ret['returnCode'] = returnCode
return ret | python | def runGetResults(cmd, stdout=True, stderr=True, encoding=sys.getdefaultencoding()):
'''
runGetResults - Simple method to run a command and return the results of the execution as a dict.
@param cmd <str/list> - String of command and arguments, or list of command and arguments
If cmd is a string, the command will be executed as if ran exactly as written in a shell. This mode supports shell-isms like '&&' and '|'
If cmd is a list, the first element will be the executable, and further elements are arguments that will be passed to that executable.
@param stdout <True/False> - Default True, Whether to gather and include program's stdout data in results.
If False, that data the program prints to stdout will just be output to the current tty and not recorded.
If True, it will NOT be output to the tty, and will be recorded under the key "stdout" in the return dict.
@param stderr <True/False or "stdout"/subprocess.STDOUT> - Default True, Whether to gather and include program's stderr data in results, or to combine with "stdout" data.
If False, the data the program prints to stderr will just be output to the current tty and not recorded
If True, it will NOT be output to the tty, and will be recorded under the key "stderr" in the return dict.
If "stdout" or subprocess.STDOUT - stderr data will be blended with stdout data. This requires that stdout=True.
@param encoding <None/str> - Default sys.getdefaultencoding(), the program's output will automatically be decoded using the provided codec (e.x. "utf-8" or "ascii").
If None or False-ish, data will not be decoded (i.e. in python3 will be "bytes" type)
If unsure, leave this as it's default value, or provide "utf-8"
@return <dict> - Dict of results. Has following keys:
'returnCode' - <int> - Always present, included the integer return-code from the command.
'stdout' <unciode/str/bytes (depending on #encoding)> - Present if stdout=True, contains data output by program to stdout, or stdout+stderr if stderr param is "stdout"/subprocess.STDOUT
'stderr' <unicode/str/bytes (depending on #encoding)> - Present if stderr=True, contains data output by program to stderr.
@raises - SimpleCommandFailure if it cannot launch the given command, for reasons such as: cannot find the executable, or no permission to execute, etc
'''
if stderr in ('stdout', subprocess.STDOUT):
stderr = subprocess.STDOUT
elif stderr == True or stderr == subprocess.PIPE:
stderr = subprocess.PIPE
else:
stderr = None
if stdout == True or stdout == subprocess.STDOUT:
stdout = subprocess.PIPE
else:
stdout = None
if stderr == subprocess.PIPE:
raise ValueError('Cannot redirect stderr to stdout if stdout is not captured.')
if issubclass(cmd.__class__, (list, tuple)):
shell = False
else:
shell = True
try:
pipe = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, shell=shell)
except Exception as e:
try:
if shell is True:
cmdStr = ' '.join(cmd)
else:
cmdStr = cmd
except:
cmdStr = repr(cmd)
raise SimpleCommandFailure('Failed to execute "%s": %s' %(cmdStr, str(e)), returnCode=255)
streams = []
fileNoToKey = {}
ret = {}
if stdout == subprocess.PIPE:
streams.append(pipe.stdout)
fileNoToKey[pipe.stdout.fileno()] = 'stdout'
ret['stdout'] = []
if stderr == subprocess.PIPE:
streams.append(pipe.stderr)
fileNoToKey[pipe.stderr.fileno()] = 'stderr'
ret['stderr'] = []
returnCode = None
time.sleep(.02)
while returnCode is None or streams:
returnCode = pipe.poll()
while True:
(readyToRead, junk1, junk2) = select.select(streams, [], [], .005)
if not readyToRead:
# Don't strangle CPU
time.sleep(.01)
break
for readyStream in readyToRead:
retKey = fileNoToKey[readyStream.fileno()]
curRead = readyStream.read()
if curRead in (b'', ''):
streams.remove(readyStream)
continue
ret[retKey].append(curRead)
for key in list(ret.keys()):
ret[key] = b''.join(ret[key])
if encoding:
ret[key] = ret[key].decode(encoding)
ret['returnCode'] = returnCode
return ret | [
"def",
"runGetResults",
"(",
"cmd",
",",
"stdout",
"=",
"True",
",",
"stderr",
"=",
"True",
",",
"encoding",
"=",
"sys",
".",
"getdefaultencoding",
"(",
")",
")",
":",
"if",
"stderr",
"in",
"(",
"'stdout'",
",",
"subprocess",
".",
"STDOUT",
")",
":",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
"elif",
"stderr",
"==",
"True",
"or",
"stderr",
"==",
"subprocess",
".",
"PIPE",
":",
"stderr",
"=",
"subprocess",
".",
"PIPE",
"else",
":",
"stderr",
"=",
"None",
"if",
"stdout",
"==",
"True",
"or",
"stdout",
"==",
"subprocess",
".",
"STDOUT",
":",
"stdout",
"=",
"subprocess",
".",
"PIPE",
"else",
":",
"stdout",
"=",
"None",
"if",
"stderr",
"==",
"subprocess",
".",
"PIPE",
":",
"raise",
"ValueError",
"(",
"'Cannot redirect stderr to stdout if stdout is not captured.'",
")",
"if",
"issubclass",
"(",
"cmd",
".",
"__class__",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"shell",
"=",
"False",
"else",
":",
"shell",
"=",
"True",
"try",
":",
"pipe",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"stdout",
",",
"stderr",
"=",
"stderr",
",",
"shell",
"=",
"shell",
")",
"except",
"Exception",
"as",
"e",
":",
"try",
":",
"if",
"shell",
"is",
"True",
":",
"cmdStr",
"=",
"' '",
".",
"join",
"(",
"cmd",
")",
"else",
":",
"cmdStr",
"=",
"cmd",
"except",
":",
"cmdStr",
"=",
"repr",
"(",
"cmd",
")",
"raise",
"SimpleCommandFailure",
"(",
"'Failed to execute \"%s\": %s'",
"%",
"(",
"cmdStr",
",",
"str",
"(",
"e",
")",
")",
",",
"returnCode",
"=",
"255",
")",
"streams",
"=",
"[",
"]",
"fileNoToKey",
"=",
"{",
"}",
"ret",
"=",
"{",
"}",
"if",
"stdout",
"==",
"subprocess",
".",
"PIPE",
":",
"streams",
".",
"append",
"(",
"pipe",
".",
"stdout",
")",
"fileNoToKey",
"[",
"pipe",
".",
"stdout",
".",
"fileno",
"(",
")",
"]",
"=",
"'stdout'",
"ret",
"[",
"'stdout'",
"]",
"=",
"[",
"]",
"if",
"stderr",
"==",
"subprocess",
".",
"PIPE",
":",
"streams",
".",
"append",
"(",
"pipe",
".",
"stderr",
")",
"fileNoToKey",
"[",
"pipe",
".",
"stderr",
".",
"fileno",
"(",
")",
"]",
"=",
"'stderr'",
"ret",
"[",
"'stderr'",
"]",
"=",
"[",
"]",
"returnCode",
"=",
"None",
"time",
".",
"sleep",
"(",
".02",
")",
"while",
"returnCode",
"is",
"None",
"or",
"streams",
":",
"returnCode",
"=",
"pipe",
".",
"poll",
"(",
")",
"while",
"True",
":",
"(",
"readyToRead",
",",
"junk1",
",",
"junk2",
")",
"=",
"select",
".",
"select",
"(",
"streams",
",",
"[",
"]",
",",
"[",
"]",
",",
".005",
")",
"if",
"not",
"readyToRead",
":",
"# Don't strangle CPU",
"time",
".",
"sleep",
"(",
".01",
")",
"break",
"for",
"readyStream",
"in",
"readyToRead",
":",
"retKey",
"=",
"fileNoToKey",
"[",
"readyStream",
".",
"fileno",
"(",
")",
"]",
"curRead",
"=",
"readyStream",
".",
"read",
"(",
")",
"if",
"curRead",
"in",
"(",
"b''",
",",
"''",
")",
":",
"streams",
".",
"remove",
"(",
"readyStream",
")",
"continue",
"ret",
"[",
"retKey",
"]",
".",
"append",
"(",
"curRead",
")",
"for",
"key",
"in",
"list",
"(",
"ret",
".",
"keys",
"(",
")",
")",
":",
"ret",
"[",
"key",
"]",
"=",
"b''",
".",
"join",
"(",
"ret",
"[",
"key",
"]",
")",
"if",
"encoding",
":",
"ret",
"[",
"key",
"]",
"=",
"ret",
"[",
"key",
"]",
".",
"decode",
"(",
"encoding",
")",
"ret",
"[",
"'returnCode'",
"]",
"=",
"returnCode",
"return",
"ret"
] | runGetResults - Simple method to run a command and return the results of the execution as a dict.
@param cmd <str/list> - String of command and arguments, or list of command and arguments
If cmd is a string, the command will be executed as if ran exactly as written in a shell. This mode supports shell-isms like '&&' and '|'
If cmd is a list, the first element will be the executable, and further elements are arguments that will be passed to that executable.
@param stdout <True/False> - Default True, Whether to gather and include program's stdout data in results.
If False, that data the program prints to stdout will just be output to the current tty and not recorded.
If True, it will NOT be output to the tty, and will be recorded under the key "stdout" in the return dict.
@param stderr <True/False or "stdout"/subprocess.STDOUT> - Default True, Whether to gather and include program's stderr data in results, or to combine with "stdout" data.
If False, the data the program prints to stderr will just be output to the current tty and not recorded
If True, it will NOT be output to the tty, and will be recorded under the key "stderr" in the return dict.
If "stdout" or subprocess.STDOUT - stderr data will be blended with stdout data. This requires that stdout=True.
@param encoding <None/str> - Default sys.getdefaultencoding(), the program's output will automatically be decoded using the provided codec (e.x. "utf-8" or "ascii").
If None or False-ish, data will not be decoded (i.e. in python3 will be "bytes" type)
If unsure, leave this as it's default value, or provide "utf-8"
@return <dict> - Dict of results. Has following keys:
'returnCode' - <int> - Always present, included the integer return-code from the command.
'stdout' <unciode/str/bytes (depending on #encoding)> - Present if stdout=True, contains data output by program to stdout, or stdout+stderr if stderr param is "stdout"/subprocess.STDOUT
'stderr' <unicode/str/bytes (depending on #encoding)> - Present if stderr=True, contains data output by program to stderr.
@raises - SimpleCommandFailure if it cannot launch the given command, for reasons such as: cannot find the executable, or no permission to execute, etc | [
"runGetResults",
"-",
"Simple",
"method",
"to",
"run",
"a",
"command",
"and",
"return",
"the",
"results",
"of",
"the",
"execution",
"as",
"a",
"dict",
"."
] | 8544b0b651d8e14de9fdd597baa704182e248b01 | https://github.com/kata198/python-subprocess2/blob/8544b0b651d8e14de9fdd597baa704182e248b01/subprocess2/simple.py#L31-L144 | train |
BernardFW/bernard | src/bernard/storage/context/base.py | create_context_store | def create_context_store(name='default',
ttl=settings.CONTEXT_DEFAULT_TTL,
store=settings.CONTEXT_STORE) -> 'BaseContextStore':
"""
Create a context store. By default using the default configured context
store, but you can use a custom class if you want to using the `store`
setting.
The time to live of each store (aka there is one per conversation) is
defined by the `ttl` value, which is also inferred by default from the
configuration.
You can have several stores existing in parallel. To make the distinction
between them you need to give them different names, using the `name`
parameter.
The usage looks like:
>>> cs = create_context_store()
>>> class Hello(BaseTestState):
>>> @cs.inject(['foo'])
>>> async def handle(self, context):
>>> logger.debug('foo is %s', context['foo'])
>>>
>>> async def missing_context(self):
>>> self.send(lyr.Text('`foo` is not in context'))
This requires that `foo` is present in the context in order to enter the
handler.
See `BaseContextStore.inject()` for more info.
"""
store_class = import_class(store['class'])
return store_class(name=name, ttl=ttl, **store['params']) | python | def create_context_store(name='default',
ttl=settings.CONTEXT_DEFAULT_TTL,
store=settings.CONTEXT_STORE) -> 'BaseContextStore':
"""
Create a context store. By default using the default configured context
store, but you can use a custom class if you want to using the `store`
setting.
The time to live of each store (aka there is one per conversation) is
defined by the `ttl` value, which is also inferred by default from the
configuration.
You can have several stores existing in parallel. To make the distinction
between them you need to give them different names, using the `name`
parameter.
The usage looks like:
>>> cs = create_context_store()
>>> class Hello(BaseTestState):
>>> @cs.inject(['foo'])
>>> async def handle(self, context):
>>> logger.debug('foo is %s', context['foo'])
>>>
>>> async def missing_context(self):
>>> self.send(lyr.Text('`foo` is not in context'))
This requires that `foo` is present in the context in order to enter the
handler.
See `BaseContextStore.inject()` for more info.
"""
store_class = import_class(store['class'])
return store_class(name=name, ttl=ttl, **store['params']) | [
"def",
"create_context_store",
"(",
"name",
"=",
"'default'",
",",
"ttl",
"=",
"settings",
".",
"CONTEXT_DEFAULT_TTL",
",",
"store",
"=",
"settings",
".",
"CONTEXT_STORE",
")",
"->",
"'BaseContextStore'",
":",
"store_class",
"=",
"import_class",
"(",
"store",
"[",
"'class'",
"]",
")",
"return",
"store_class",
"(",
"name",
"=",
"name",
",",
"ttl",
"=",
"ttl",
",",
"*",
"*",
"store",
"[",
"'params'",
"]",
")"
] | Create a context store. By default using the default configured context
store, but you can use a custom class if you want to using the `store`
setting.
The time to live of each store (aka there is one per conversation) is
defined by the `ttl` value, which is also inferred by default from the
configuration.
You can have several stores existing in parallel. To make the distinction
between them you need to give them different names, using the `name`
parameter.
The usage looks like:
>>> cs = create_context_store()
>>> class Hello(BaseTestState):
>>> @cs.inject(['foo'])
>>> async def handle(self, context):
>>> logger.debug('foo is %s', context['foo'])
>>>
>>> async def missing_context(self):
>>> self.send(lyr.Text('`foo` is not in context'))
This requires that `foo` is present in the context in order to enter the
handler.
See `BaseContextStore.inject()` for more info. | [
"Create",
"a",
"context",
"store",
".",
"By",
"default",
"using",
"the",
"default",
"configured",
"context",
"store",
"but",
"you",
"can",
"use",
"a",
"custom",
"class",
"if",
"you",
"want",
"to",
"using",
"the",
"store",
"setting",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/storage/context/base.py#L34-L68 | train |
AlejandroFrias/case-conversion | case_conversion/case_conversion.py | camelcase | def camelcase(text, acronyms=None):
"""Return text in camelCase style.
Args:
text: input string to convert case
detect_acronyms: should attempt to detect acronyms
acronyms: a list of acronyms to detect
>>> camelcase("hello world")
'helloWorld'
>>> camelcase("HELLO_HTML_WORLD", True, ["HTML"])
'helloHTMLWorld'
"""
words, _case, _sep = case_parse.parse_case(text, acronyms)
if words:
words[0] = words[0].lower()
return ''.join(words) | python | def camelcase(text, acronyms=None):
"""Return text in camelCase style.
Args:
text: input string to convert case
detect_acronyms: should attempt to detect acronyms
acronyms: a list of acronyms to detect
>>> camelcase("hello world")
'helloWorld'
>>> camelcase("HELLO_HTML_WORLD", True, ["HTML"])
'helloHTMLWorld'
"""
words, _case, _sep = case_parse.parse_case(text, acronyms)
if words:
words[0] = words[0].lower()
return ''.join(words) | [
"def",
"camelcase",
"(",
"text",
",",
"acronyms",
"=",
"None",
")",
":",
"words",
",",
"_case",
",",
"_sep",
"=",
"case_parse",
".",
"parse_case",
"(",
"text",
",",
"acronyms",
")",
"if",
"words",
":",
"words",
"[",
"0",
"]",
"=",
"words",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"return",
"''",
".",
"join",
"(",
"words",
")"
] | Return text in camelCase style.
Args:
text: input string to convert case
detect_acronyms: should attempt to detect acronyms
acronyms: a list of acronyms to detect
>>> camelcase("hello world")
'helloWorld'
>>> camelcase("HELLO_HTML_WORLD", True, ["HTML"])
'helloHTMLWorld' | [
"Return",
"text",
"in",
"camelCase",
"style",
"."
] | 79ebce1403fbdac949b2da21b8f6fbe3234ddb31 | https://github.com/AlejandroFrias/case-conversion/blob/79ebce1403fbdac949b2da21b8f6fbe3234ddb31/case_conversion/case_conversion.py#L13-L29 | train |
AlejandroFrias/case-conversion | case_conversion/case_conversion.py | dotcase | def dotcase(text, acronyms=None):
"""Return text in dot.case style.
Args:
text: input string to convert case
detect_acronyms: should attempt to detect acronyms
acronyms: a list of acronyms to detect
>>> dotcase("hello world")
'hello.world'
>>> dotcase("helloHTMLWorld", True, ["HTML"])
'hello.html.world'
"""
words, _case, _sep = case_parse.parse_case(text, acronyms)
return '.'.join([w.lower() for w in words]) | python | def dotcase(text, acronyms=None):
"""Return text in dot.case style.
Args:
text: input string to convert case
detect_acronyms: should attempt to detect acronyms
acronyms: a list of acronyms to detect
>>> dotcase("hello world")
'hello.world'
>>> dotcase("helloHTMLWorld", True, ["HTML"])
'hello.html.world'
"""
words, _case, _sep = case_parse.parse_case(text, acronyms)
return '.'.join([w.lower() for w in words]) | [
"def",
"dotcase",
"(",
"text",
",",
"acronyms",
"=",
"None",
")",
":",
"words",
",",
"_case",
",",
"_sep",
"=",
"case_parse",
".",
"parse_case",
"(",
"text",
",",
"acronyms",
")",
"return",
"'.'",
".",
"join",
"(",
"[",
"w",
".",
"lower",
"(",
")",
"for",
"w",
"in",
"words",
"]",
")"
] | Return text in dot.case style.
Args:
text: input string to convert case
detect_acronyms: should attempt to detect acronyms
acronyms: a list of acronyms to detect
>>> dotcase("hello world")
'hello.world'
>>> dotcase("helloHTMLWorld", True, ["HTML"])
'hello.html.world' | [
"Return",
"text",
"in",
"dot",
".",
"case",
"style",
"."
] | 79ebce1403fbdac949b2da21b8f6fbe3234ddb31 | https://github.com/AlejandroFrias/case-conversion/blob/79ebce1403fbdac949b2da21b8f6fbe3234ddb31/case_conversion/case_conversion.py#L148-L162 | train |
AlejandroFrias/case-conversion | case_conversion/case_conversion.py | separate_words | def separate_words(text, acronyms=None):
"""Return text in "seperate words" style.
Args:
text: input string to convert case
detect_acronyms: should attempt to detect acronyms
acronyms: a list of acronyms to detect
>>> separate_words("HELLO_WORLD")
'HELLO WORLD'
>>> separate_words("helloHTMLWorld", True, ["HTML"])
'hello HTML World'
"""
words, _case, _sep = case_parse.parse_case(text, acronyms, preserve_case=True)
return ' '.join(words) | python | def separate_words(text, acronyms=None):
"""Return text in "seperate words" style.
Args:
text: input string to convert case
detect_acronyms: should attempt to detect acronyms
acronyms: a list of acronyms to detect
>>> separate_words("HELLO_WORLD")
'HELLO WORLD'
>>> separate_words("helloHTMLWorld", True, ["HTML"])
'hello HTML World'
"""
words, _case, _sep = case_parse.parse_case(text, acronyms, preserve_case=True)
return ' '.join(words) | [
"def",
"separate_words",
"(",
"text",
",",
"acronyms",
"=",
"None",
")",
":",
"words",
",",
"_case",
",",
"_sep",
"=",
"case_parse",
".",
"parse_case",
"(",
"text",
",",
"acronyms",
",",
"preserve_case",
"=",
"True",
")",
"return",
"' '",
".",
"join",
"(",
"words",
")"
] | Return text in "seperate words" style.
Args:
text: input string to convert case
detect_acronyms: should attempt to detect acronyms
acronyms: a list of acronyms to detect
>>> separate_words("HELLO_WORLD")
'HELLO WORLD'
>>> separate_words("helloHTMLWorld", True, ["HTML"])
'hello HTML World' | [
"Return",
"text",
"in",
"seperate",
"words",
"style",
"."
] | 79ebce1403fbdac949b2da21b8f6fbe3234ddb31 | https://github.com/AlejandroFrias/case-conversion/blob/79ebce1403fbdac949b2da21b8f6fbe3234ddb31/case_conversion/case_conversion.py#L165-L179 | train |
reanahub/reana-db | reana_db/database.py | init_db | def init_db():
"""Initialize the DB."""
import reana_db.models
if not database_exists(engine.url):
create_database(engine.url)
Base.metadata.create_all(bind=engine) | python | def init_db():
"""Initialize the DB."""
import reana_db.models
if not database_exists(engine.url):
create_database(engine.url)
Base.metadata.create_all(bind=engine) | [
"def",
"init_db",
"(",
")",
":",
"import",
"reana_db",
".",
"models",
"if",
"not",
"database_exists",
"(",
"engine",
".",
"url",
")",
":",
"create_database",
"(",
"engine",
".",
"url",
")",
"Base",
".",
"metadata",
".",
"create_all",
"(",
"bind",
"=",
"engine",
")"
] | Initialize the DB. | [
"Initialize",
"the",
"DB",
"."
] | 4efcb46d23af035689964d8c25a804c5a8f1dfc3 | https://github.com/reanahub/reana-db/blob/4efcb46d23af035689964d8c25a804c5a8f1dfc3/reana_db/database.py#L28-L33 | train |
greenelab/PathCORE-T | pathcore/network.py | _load_significant_pathways_file | def _load_significant_pathways_file(path_to_file):
"""Read in the significant pathways file as a
pandas.DataFrame.
"""
feature_pathway_df = pd.read_table(
path_to_file, header=0,
usecols=["feature", "side", "pathway"])
feature_pathway_df = feature_pathway_df.sort_values(
by=["feature", "side"])
return feature_pathway_df | python | def _load_significant_pathways_file(path_to_file):
"""Read in the significant pathways file as a
pandas.DataFrame.
"""
feature_pathway_df = pd.read_table(
path_to_file, header=0,
usecols=["feature", "side", "pathway"])
feature_pathway_df = feature_pathway_df.sort_values(
by=["feature", "side"])
return feature_pathway_df | [
"def",
"_load_significant_pathways_file",
"(",
"path_to_file",
")",
":",
"feature_pathway_df",
"=",
"pd",
".",
"read_table",
"(",
"path_to_file",
",",
"header",
"=",
"0",
",",
"usecols",
"=",
"[",
"\"feature\"",
",",
"\"side\"",
",",
"\"pathway\"",
"]",
")",
"feature_pathway_df",
"=",
"feature_pathway_df",
".",
"sort_values",
"(",
"by",
"=",
"[",
"\"feature\"",
",",
"\"side\"",
"]",
")",
"return",
"feature_pathway_df"
] | Read in the significant pathways file as a
pandas.DataFrame. | [
"Read",
"in",
"the",
"significant",
"pathways",
"file",
"as",
"a",
"pandas",
".",
"DataFrame",
"."
] | 9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c | https://github.com/greenelab/PathCORE-T/blob/9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c/pathcore/network.py#L552-L561 | train |
greenelab/PathCORE-T | pathcore/network.py | _pathway_feature_permutation | def _pathway_feature_permutation(pathway_feature_tuples,
permutation_max_iters):
"""Permute the pathways across features for one side in the
network. Used in `permute_pathways_across_features`
Parameters
-----------
pathway_feature_tuples : list(tup(str, int))
a tuple list [(pathway, feature)] where the pathway, feature pairing
indicates that a pathway was overrepresented in that feature
permutation_max_iters : int
specify the maximum number of iterations, limit the number of attempts
we have to generate a permutation
Returns
-----------
list(tup(str, int)), the list of pathway, feature pairings after the
permutation
"""
pathways, features = [list(elements_at_position)
for elements_at_position in
zip(*pathway_feature_tuples)]
original_pathways = pathways[:]
random.shuffle(pathways)
feature_block_locations = {}
i = 0
while i < len(pathways):
starting_index = i
current_feature = features[i]
pathway_set = set()
# input is grouped by feature, so we want to keep track of the start
# and end of a given "block" of the same feature--this corresponds
# to all the pathways overrepresented in that feature.
while i < len(pathways) and features[i] == current_feature:
# check the results of the permutation. if `pathway_set` does
# not contain the current pathway, we are maintaining the
# necessary invariants in our permutation thus far.
if pathways[i] not in pathway_set:
pathway_set.add(pathways[i])
else:
k = 0
random_pathway = None
while True:
# select another random pathway from the list
# and get the feature to which it belongs
j = random.choice(range(0, len(pathways)))
random_pathway = pathways[j]
random_feature = features[j]
if (random_pathway != pathways[i] and
random_pathway not in pathway_set):
# if this is a feature we have not already seen,
# we are done.
if random_feature not in feature_block_locations:
break
# otherwise, look at the indices that correspond
# to that feature's block of pathways
feature_block_start, feature_block_end = \
feature_block_locations[random_feature]
pathway_block = pathways[feature_block_start:
feature_block_end]
# make sure that the current pathway is not in
# that block--ensures that we maintain the invariant
# after the swap
if pathways[i] not in pathway_block:
break
k += 1
if k > permutation_max_iters:
print("Permutation step: reached the maximum "
"number of iterations {0}.".format(
permutation_max_iters))
return None
pathway_set.add(random_pathway)
pathways[j] = pathways[i]
pathways[i] = random_pathway
i += 1
ending_index = i
feature_block_locations[current_feature] = (
starting_index, ending_index)
if original_pathways == pathways:
return None
return list(zip(pathways, features)) | python | def _pathway_feature_permutation(pathway_feature_tuples,
permutation_max_iters):
"""Permute the pathways across features for one side in the
network. Used in `permute_pathways_across_features`
Parameters
-----------
pathway_feature_tuples : list(tup(str, int))
a tuple list [(pathway, feature)] where the pathway, feature pairing
indicates that a pathway was overrepresented in that feature
permutation_max_iters : int
specify the maximum number of iterations, limit the number of attempts
we have to generate a permutation
Returns
-----------
list(tup(str, int)), the list of pathway, feature pairings after the
permutation
"""
pathways, features = [list(elements_at_position)
for elements_at_position in
zip(*pathway_feature_tuples)]
original_pathways = pathways[:]
random.shuffle(pathways)
feature_block_locations = {}
i = 0
while i < len(pathways):
starting_index = i
current_feature = features[i]
pathway_set = set()
# input is grouped by feature, so we want to keep track of the start
# and end of a given "block" of the same feature--this corresponds
# to all the pathways overrepresented in that feature.
while i < len(pathways) and features[i] == current_feature:
# check the results of the permutation. if `pathway_set` does
# not contain the current pathway, we are maintaining the
# necessary invariants in our permutation thus far.
if pathways[i] not in pathway_set:
pathway_set.add(pathways[i])
else:
k = 0
random_pathway = None
while True:
# select another random pathway from the list
# and get the feature to which it belongs
j = random.choice(range(0, len(pathways)))
random_pathway = pathways[j]
random_feature = features[j]
if (random_pathway != pathways[i] and
random_pathway not in pathway_set):
# if this is a feature we have not already seen,
# we are done.
if random_feature not in feature_block_locations:
break
# otherwise, look at the indices that correspond
# to that feature's block of pathways
feature_block_start, feature_block_end = \
feature_block_locations[random_feature]
pathway_block = pathways[feature_block_start:
feature_block_end]
# make sure that the current pathway is not in
# that block--ensures that we maintain the invariant
# after the swap
if pathways[i] not in pathway_block:
break
k += 1
if k > permutation_max_iters:
print("Permutation step: reached the maximum "
"number of iterations {0}.".format(
permutation_max_iters))
return None
pathway_set.add(random_pathway)
pathways[j] = pathways[i]
pathways[i] = random_pathway
i += 1
ending_index = i
feature_block_locations[current_feature] = (
starting_index, ending_index)
if original_pathways == pathways:
return None
return list(zip(pathways, features)) | [
"def",
"_pathway_feature_permutation",
"(",
"pathway_feature_tuples",
",",
"permutation_max_iters",
")",
":",
"pathways",
",",
"features",
"=",
"[",
"list",
"(",
"elements_at_position",
")",
"for",
"elements_at_position",
"in",
"zip",
"(",
"*",
"pathway_feature_tuples",
")",
"]",
"original_pathways",
"=",
"pathways",
"[",
":",
"]",
"random",
".",
"shuffle",
"(",
"pathways",
")",
"feature_block_locations",
"=",
"{",
"}",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"pathways",
")",
":",
"starting_index",
"=",
"i",
"current_feature",
"=",
"features",
"[",
"i",
"]",
"pathway_set",
"=",
"set",
"(",
")",
"# input is grouped by feature, so we want to keep track of the start",
"# and end of a given \"block\" of the same feature--this corresponds",
"# to all the pathways overrepresented in that feature.",
"while",
"i",
"<",
"len",
"(",
"pathways",
")",
"and",
"features",
"[",
"i",
"]",
"==",
"current_feature",
":",
"# check the results of the permutation. if `pathway_set` does",
"# not contain the current pathway, we are maintaining the",
"# necessary invariants in our permutation thus far.",
"if",
"pathways",
"[",
"i",
"]",
"not",
"in",
"pathway_set",
":",
"pathway_set",
".",
"add",
"(",
"pathways",
"[",
"i",
"]",
")",
"else",
":",
"k",
"=",
"0",
"random_pathway",
"=",
"None",
"while",
"True",
":",
"# select another random pathway from the list",
"# and get the feature to which it belongs",
"j",
"=",
"random",
".",
"choice",
"(",
"range",
"(",
"0",
",",
"len",
"(",
"pathways",
")",
")",
")",
"random_pathway",
"=",
"pathways",
"[",
"j",
"]",
"random_feature",
"=",
"features",
"[",
"j",
"]",
"if",
"(",
"random_pathway",
"!=",
"pathways",
"[",
"i",
"]",
"and",
"random_pathway",
"not",
"in",
"pathway_set",
")",
":",
"# if this is a feature we have not already seen,",
"# we are done.",
"if",
"random_feature",
"not",
"in",
"feature_block_locations",
":",
"break",
"# otherwise, look at the indices that correspond",
"# to that feature's block of pathways",
"feature_block_start",
",",
"feature_block_end",
"=",
"feature_block_locations",
"[",
"random_feature",
"]",
"pathway_block",
"=",
"pathways",
"[",
"feature_block_start",
":",
"feature_block_end",
"]",
"# make sure that the current pathway is not in",
"# that block--ensures that we maintain the invariant",
"# after the swap",
"if",
"pathways",
"[",
"i",
"]",
"not",
"in",
"pathway_block",
":",
"break",
"k",
"+=",
"1",
"if",
"k",
">",
"permutation_max_iters",
":",
"print",
"(",
"\"Permutation step: reached the maximum \"",
"\"number of iterations {0}.\"",
".",
"format",
"(",
"permutation_max_iters",
")",
")",
"return",
"None",
"pathway_set",
".",
"add",
"(",
"random_pathway",
")",
"pathways",
"[",
"j",
"]",
"=",
"pathways",
"[",
"i",
"]",
"pathways",
"[",
"i",
"]",
"=",
"random_pathway",
"i",
"+=",
"1",
"ending_index",
"=",
"i",
"feature_block_locations",
"[",
"current_feature",
"]",
"=",
"(",
"starting_index",
",",
"ending_index",
")",
"if",
"original_pathways",
"==",
"pathways",
":",
"return",
"None",
"return",
"list",
"(",
"zip",
"(",
"pathways",
",",
"features",
")",
")"
] | Permute the pathways across features for one side in the
network. Used in `permute_pathways_across_features`
Parameters
-----------
pathway_feature_tuples : list(tup(str, int))
a tuple list [(pathway, feature)] where the pathway, feature pairing
indicates that a pathway was overrepresented in that feature
permutation_max_iters : int
specify the maximum number of iterations, limit the number of attempts
we have to generate a permutation
Returns
-----------
list(tup(str, int)), the list of pathway, feature pairings after the
permutation | [
"Permute",
"the",
"pathways",
"across",
"features",
"for",
"one",
"side",
"in",
"the",
"network",
".",
"Used",
"in",
"permute_pathways_across_features"
] | 9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c | https://github.com/greenelab/PathCORE-T/blob/9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c/pathcore/network.py#L568-L652 | train |
greenelab/PathCORE-T | pathcore/network.py | CoNetwork.weight_by_edge_odds_ratios | def weight_by_edge_odds_ratios(self,
edges_expected_weight,
flag_as_significant):
"""Applied during the permutation test. Update the edges in the
network to be weighted by their odds ratios. The odds ratio measures
how unexpected the observed edge weight is based on the expected
weight.
Parameters
-----------
edges_expected_weight : list(tup(int, int), float)
A tuple list of (edge id, edge expected weight) generated from the
permutation test step.
flag_as_significant : [set|list](tup(int, int))
A set or list of edge ids that are considered significant against
the null model of random associations generated in the permutation
test
"""
for edge_id, expected_weight in edges_expected_weight:
edge_obj = self.edges[edge_id]
edge_obj.weight /= expected_weight
if edge_id in flag_as_significant:
edge_obj.significant = True
else:
edge_obj.significant = False | python | def weight_by_edge_odds_ratios(self,
edges_expected_weight,
flag_as_significant):
"""Applied during the permutation test. Update the edges in the
network to be weighted by their odds ratios. The odds ratio measures
how unexpected the observed edge weight is based on the expected
weight.
Parameters
-----------
edges_expected_weight : list(tup(int, int), float)
A tuple list of (edge id, edge expected weight) generated from the
permutation test step.
flag_as_significant : [set|list](tup(int, int))
A set or list of edge ids that are considered significant against
the null model of random associations generated in the permutation
test
"""
for edge_id, expected_weight in edges_expected_weight:
edge_obj = self.edges[edge_id]
edge_obj.weight /= expected_weight
if edge_id in flag_as_significant:
edge_obj.significant = True
else:
edge_obj.significant = False | [
"def",
"weight_by_edge_odds_ratios",
"(",
"self",
",",
"edges_expected_weight",
",",
"flag_as_significant",
")",
":",
"for",
"edge_id",
",",
"expected_weight",
"in",
"edges_expected_weight",
":",
"edge_obj",
"=",
"self",
".",
"edges",
"[",
"edge_id",
"]",
"edge_obj",
".",
"weight",
"/=",
"expected_weight",
"if",
"edge_id",
"in",
"flag_as_significant",
":",
"edge_obj",
".",
"significant",
"=",
"True",
"else",
":",
"edge_obj",
".",
"significant",
"=",
"False"
] | Applied during the permutation test. Update the edges in the
network to be weighted by their odds ratios. The odds ratio measures
how unexpected the observed edge weight is based on the expected
weight.
Parameters
-----------
edges_expected_weight : list(tup(int, int), float)
A tuple list of (edge id, edge expected weight) generated from the
permutation test step.
flag_as_significant : [set|list](tup(int, int))
A set or list of edge ids that are considered significant against
the null model of random associations generated in the permutation
test | [
"Applied",
"during",
"the",
"permutation",
"test",
".",
"Update",
"the",
"edges",
"in",
"the",
"network",
"to",
"be",
"weighted",
"by",
"their",
"odds",
"ratios",
".",
"The",
"odds",
"ratio",
"measures",
"how",
"unexpected",
"the",
"observed",
"edge",
"weight",
"is",
"based",
"on",
"the",
"expected",
"weight",
"."
] | 9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c | https://github.com/greenelab/PathCORE-T/blob/9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c/pathcore/network.py#L223-L247 | train |
greenelab/PathCORE-T | pathcore/network.py | CoNetwork.aggregate | def aggregate(self, merge):
"""Combine this network with another network. The aggregation step
takes the union of the edges in the two networks, where we take the
sum of weights for edges common to both networks.
Parameters
-----------
merge : CoNetwork
the CoNetwork object being merged into the current network.
"""
self.features = set()
self.n_features += merge.n_features
vertex_id_conversion = self.convert_pathway_mapping(merge.pathways)
for edge_id, edge in merge.edges.items():
edge_key = self.remapped_edge(
vertex_id_conversion, edge_id)
if edge_key in self.edges:
if self.edges[edge_key].which_features:
self.edges[edge_key].which_features = []
self.edges[edge_key].weight += edge.weight
else:
vertex0_id, vertex1_id = edge_key
new_edge_obj = Edge(vertex0_id, vertex1_id, [])
new_edge_obj.weight = edge.weight
self.edges[edge_key] = new_edge_obj
self._add_edge_to_vertex(vertex0_id, new_edge_obj)
self._add_edge_to_vertex(vertex1_id, new_edge_obj) | python | def aggregate(self, merge):
"""Combine this network with another network. The aggregation step
takes the union of the edges in the two networks, where we take the
sum of weights for edges common to both networks.
Parameters
-----------
merge : CoNetwork
the CoNetwork object being merged into the current network.
"""
self.features = set()
self.n_features += merge.n_features
vertex_id_conversion = self.convert_pathway_mapping(merge.pathways)
for edge_id, edge in merge.edges.items():
edge_key = self.remapped_edge(
vertex_id_conversion, edge_id)
if edge_key in self.edges:
if self.edges[edge_key].which_features:
self.edges[edge_key].which_features = []
self.edges[edge_key].weight += edge.weight
else:
vertex0_id, vertex1_id = edge_key
new_edge_obj = Edge(vertex0_id, vertex1_id, [])
new_edge_obj.weight = edge.weight
self.edges[edge_key] = new_edge_obj
self._add_edge_to_vertex(vertex0_id, new_edge_obj)
self._add_edge_to_vertex(vertex1_id, new_edge_obj) | [
"def",
"aggregate",
"(",
"self",
",",
"merge",
")",
":",
"self",
".",
"features",
"=",
"set",
"(",
")",
"self",
".",
"n_features",
"+=",
"merge",
".",
"n_features",
"vertex_id_conversion",
"=",
"self",
".",
"convert_pathway_mapping",
"(",
"merge",
".",
"pathways",
")",
"for",
"edge_id",
",",
"edge",
"in",
"merge",
".",
"edges",
".",
"items",
"(",
")",
":",
"edge_key",
"=",
"self",
".",
"remapped_edge",
"(",
"vertex_id_conversion",
",",
"edge_id",
")",
"if",
"edge_key",
"in",
"self",
".",
"edges",
":",
"if",
"self",
".",
"edges",
"[",
"edge_key",
"]",
".",
"which_features",
":",
"self",
".",
"edges",
"[",
"edge_key",
"]",
".",
"which_features",
"=",
"[",
"]",
"self",
".",
"edges",
"[",
"edge_key",
"]",
".",
"weight",
"+=",
"edge",
".",
"weight",
"else",
":",
"vertex0_id",
",",
"vertex1_id",
"=",
"edge_key",
"new_edge_obj",
"=",
"Edge",
"(",
"vertex0_id",
",",
"vertex1_id",
",",
"[",
"]",
")",
"new_edge_obj",
".",
"weight",
"=",
"edge",
".",
"weight",
"self",
".",
"edges",
"[",
"edge_key",
"]",
"=",
"new_edge_obj",
"self",
".",
"_add_edge_to_vertex",
"(",
"vertex0_id",
",",
"new_edge_obj",
")",
"self",
".",
"_add_edge_to_vertex",
"(",
"vertex1_id",
",",
"new_edge_obj",
")"
] | Combine this network with another network. The aggregation step
takes the union of the edges in the two networks, where we take the
sum of weights for edges common to both networks.
Parameters
-----------
merge : CoNetwork
the CoNetwork object being merged into the current network. | [
"Combine",
"this",
"network",
"with",
"another",
"network",
".",
"The",
"aggregation",
"step",
"takes",
"the",
"union",
"of",
"the",
"edges",
"in",
"the",
"two",
"networks",
"where",
"we",
"take",
"the",
"sum",
"of",
"weights",
"for",
"edges",
"common",
"to",
"both",
"networks",
"."
] | 9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c | https://github.com/greenelab/PathCORE-T/blob/9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c/pathcore/network.py#L249-L276 | train |
greenelab/PathCORE-T | pathcore/network.py | CoNetwork.edge_tuple | def edge_tuple(self, vertex0_id, vertex1_id):
"""To avoid duplicate edges where the vertex ids are reversed,
we maintain that the vertex ids are ordered so that the corresponding
pathway names are alphabetical.
Parameters
-----------
vertex0_id : int
one vertex in the edge
vertex1_id : int
the other vertex in the edge
Returns
-----------
tup(int, int)|None, the edge id or None if the vertices do not
exist in the network or they map to the same pathway (there should not
be any self-loops in the network)
"""
pw0 = self.__getitem__(vertex0_id)
pw1 = self.__getitem__(vertex1_id)
if not pw0 or not pw1:
return None
if pw0 < pw1:
return (vertex0_id, vertex1_id)
elif pw0 > pw1:
return (vertex1_id, vertex0_id)
else:
return None | python | def edge_tuple(self, vertex0_id, vertex1_id):
"""To avoid duplicate edges where the vertex ids are reversed,
we maintain that the vertex ids are ordered so that the corresponding
pathway names are alphabetical.
Parameters
-----------
vertex0_id : int
one vertex in the edge
vertex1_id : int
the other vertex in the edge
Returns
-----------
tup(int, int)|None, the edge id or None if the vertices do not
exist in the network or they map to the same pathway (there should not
be any self-loops in the network)
"""
pw0 = self.__getitem__(vertex0_id)
pw1 = self.__getitem__(vertex1_id)
if not pw0 or not pw1:
return None
if pw0 < pw1:
return (vertex0_id, vertex1_id)
elif pw0 > pw1:
return (vertex1_id, vertex0_id)
else:
return None | [
"def",
"edge_tuple",
"(",
"self",
",",
"vertex0_id",
",",
"vertex1_id",
")",
":",
"pw0",
"=",
"self",
".",
"__getitem__",
"(",
"vertex0_id",
")",
"pw1",
"=",
"self",
".",
"__getitem__",
"(",
"vertex1_id",
")",
"if",
"not",
"pw0",
"or",
"not",
"pw1",
":",
"return",
"None",
"if",
"pw0",
"<",
"pw1",
":",
"return",
"(",
"vertex0_id",
",",
"vertex1_id",
")",
"elif",
"pw0",
">",
"pw1",
":",
"return",
"(",
"vertex1_id",
",",
"vertex0_id",
")",
"else",
":",
"return",
"None"
] | To avoid duplicate edges where the vertex ids are reversed,
we maintain that the vertex ids are ordered so that the corresponding
pathway names are alphabetical.
Parameters
-----------
vertex0_id : int
one vertex in the edge
vertex1_id : int
the other vertex in the edge
Returns
-----------
tup(int, int)|None, the edge id or None if the vertices do not
exist in the network or they map to the same pathway (there should not
be any self-loops in the network) | [
"To",
"avoid",
"duplicate",
"edges",
"where",
"the",
"vertex",
"ids",
"are",
"reversed",
"we",
"maintain",
"that",
"the",
"vertex",
"ids",
"are",
"ordered",
"so",
"that",
"the",
"corresponding",
"pathway",
"names",
"are",
"alphabetical",
"."
] | 9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c | https://github.com/greenelab/PathCORE-T/blob/9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c/pathcore/network.py#L332-L360 | train |
greenelab/PathCORE-T | pathcore/network.py | CoNetwork.add_pathway | def add_pathway(self, pathway):
"""Updates `self.pathways` and `self.n_pathways.`
Parameters
-----------
pathway : str
the pathway to add to the network.
"""
if pathway not in self.pathways:
self.pathways[pathway] = self.n_pathways
self.n_pathways += 1
return self.pathways[pathway] | python | def add_pathway(self, pathway):
"""Updates `self.pathways` and `self.n_pathways.`
Parameters
-----------
pathway : str
the pathway to add to the network.
"""
if pathway not in self.pathways:
self.pathways[pathway] = self.n_pathways
self.n_pathways += 1
return self.pathways[pathway] | [
"def",
"add_pathway",
"(",
"self",
",",
"pathway",
")",
":",
"if",
"pathway",
"not",
"in",
"self",
".",
"pathways",
":",
"self",
".",
"pathways",
"[",
"pathway",
"]",
"=",
"self",
".",
"n_pathways",
"self",
".",
"n_pathways",
"+=",
"1",
"return",
"self",
".",
"pathways",
"[",
"pathway",
"]"
] | Updates `self.pathways` and `self.n_pathways.`
Parameters
-----------
pathway : str
the pathway to add to the network. | [
"Updates",
"self",
".",
"pathways",
"and",
"self",
".",
"n_pathways",
"."
] | 9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c | https://github.com/greenelab/PathCORE-T/blob/9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c/pathcore/network.py#L362-L373 | train |
greenelab/PathCORE-T | pathcore/network.py | CoNetwork.get_edge_pathways | def get_edge_pathways(self, edge_id):
"""Get the pathways associated with an edge.
Parameters
-----------
edge_id : tup(int, int)
Returns
-----------
tup(str, str)|None, the edge as a pair of 2 pathways if the edge id
is in this network
"""
vertex0_id, vertex1_id = edge_id
pw0 = self.get_pathway_from_vertex_id(vertex0_id)
pw1 = self.get_pathway_from_vertex_id(vertex1_id)
if not pw0 or not pw1:
return None
return (pw0, pw1) | python | def get_edge_pathways(self, edge_id):
"""Get the pathways associated with an edge.
Parameters
-----------
edge_id : tup(int, int)
Returns
-----------
tup(str, str)|None, the edge as a pair of 2 pathways if the edge id
is in this network
"""
vertex0_id, vertex1_id = edge_id
pw0 = self.get_pathway_from_vertex_id(vertex0_id)
pw1 = self.get_pathway_from_vertex_id(vertex1_id)
if not pw0 or not pw1:
return None
return (pw0, pw1) | [
"def",
"get_edge_pathways",
"(",
"self",
",",
"edge_id",
")",
":",
"vertex0_id",
",",
"vertex1_id",
"=",
"edge_id",
"pw0",
"=",
"self",
".",
"get_pathway_from_vertex_id",
"(",
"vertex0_id",
")",
"pw1",
"=",
"self",
".",
"get_pathway_from_vertex_id",
"(",
"vertex1_id",
")",
"if",
"not",
"pw0",
"or",
"not",
"pw1",
":",
"return",
"None",
"return",
"(",
"pw0",
",",
"pw1",
")"
] | Get the pathways associated with an edge.
Parameters
-----------
edge_id : tup(int, int)
Returns
-----------
tup(str, str)|None, the edge as a pair of 2 pathways if the edge id
is in this network | [
"Get",
"the",
"pathways",
"associated",
"with",
"an",
"edge",
"."
] | 9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c | https://github.com/greenelab/PathCORE-T/blob/9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c/pathcore/network.py#L388-L405 | train |
greenelab/PathCORE-T | pathcore/network.py | CoNetwork.get_vertex_obj_from_pathway | def get_vertex_obj_from_pathway(self, pathway):
"""Get the vertex object that corresponds to a pathway name
Parameters
-----------
pathway : str
Returns
-----------
Vertex|None, the Vertex obj if the pathway is in this network
"""
if pathway in self.pathways:
vertex_id = self.pathways[pathway]
return self.vertices[vertex_id]
else:
return None | python | def get_vertex_obj_from_pathway(self, pathway):
"""Get the vertex object that corresponds to a pathway name
Parameters
-----------
pathway : str
Returns
-----------
Vertex|None, the Vertex obj if the pathway is in this network
"""
if pathway in self.pathways:
vertex_id = self.pathways[pathway]
return self.vertices[vertex_id]
else:
return None | [
"def",
"get_vertex_obj_from_pathway",
"(",
"self",
",",
"pathway",
")",
":",
"if",
"pathway",
"in",
"self",
".",
"pathways",
":",
"vertex_id",
"=",
"self",
".",
"pathways",
"[",
"pathway",
"]",
"return",
"self",
".",
"vertices",
"[",
"vertex_id",
"]",
"else",
":",
"return",
"None"
] | Get the vertex object that corresponds to a pathway name
Parameters
-----------
pathway : str
Returns
-----------
Vertex|None, the Vertex obj if the pathway is in this network | [
"Get",
"the",
"vertex",
"object",
"that",
"corresponds",
"to",
"a",
"pathway",
"name"
] | 9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c | https://github.com/greenelab/PathCORE-T/blob/9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c/pathcore/network.py#L422-L437 | train |
greenelab/PathCORE-T | pathcore/network.py | CoNetwork.get_adjacent_pathways | def get_adjacent_pathways(self, pathway):
"""Get the pathways adjacent to this pathway in the network
Parameters
-----------
pathway : str
Returns
-----------
list(str), a list of pathways adjacent to the input pathway
"""
vertex_id = self.pathways[pathway]
adjacent = self.vertices[vertex_id].get_adjacent_vertex_ids()
adjacent_pathways = []
for adjacent_id in adjacent:
adjacent_pathways.append(self.get_pathway_from_vertex_id(
adjacent_id))
return adjacent_pathways | python | def get_adjacent_pathways(self, pathway):
"""Get the pathways adjacent to this pathway in the network
Parameters
-----------
pathway : str
Returns
-----------
list(str), a list of pathways adjacent to the input pathway
"""
vertex_id = self.pathways[pathway]
adjacent = self.vertices[vertex_id].get_adjacent_vertex_ids()
adjacent_pathways = []
for adjacent_id in adjacent:
adjacent_pathways.append(self.get_pathway_from_vertex_id(
adjacent_id))
return adjacent_pathways | [
"def",
"get_adjacent_pathways",
"(",
"self",
",",
"pathway",
")",
":",
"vertex_id",
"=",
"self",
".",
"pathways",
"[",
"pathway",
"]",
"adjacent",
"=",
"self",
".",
"vertices",
"[",
"vertex_id",
"]",
".",
"get_adjacent_vertex_ids",
"(",
")",
"adjacent_pathways",
"=",
"[",
"]",
"for",
"adjacent_id",
"in",
"adjacent",
":",
"adjacent_pathways",
".",
"append",
"(",
"self",
".",
"get_pathway_from_vertex_id",
"(",
"adjacent_id",
")",
")",
"return",
"adjacent_pathways"
] | Get the pathways adjacent to this pathway in the network
Parameters
-----------
pathway : str
Returns
-----------
list(str), a list of pathways adjacent to the input pathway | [
"Get",
"the",
"pathways",
"adjacent",
"to",
"this",
"pathway",
"in",
"the",
"network"
] | 9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c | https://github.com/greenelab/PathCORE-T/blob/9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c/pathcore/network.py#L439-L456 | train |
greenelab/PathCORE-T | pathcore/network.py | CoNetwork.to_dataframe | def to_dataframe(self, drop_weights_below=0, whitelist=None):
""" Conversion of the network to a pandas.DataFrame.
Parameters
-----------
drop_weights_below : int (default=0)
specify an edge weight threshold - remove all edges with weight
below this value
whitelist : [set|list](tup(int, int))|None (default=None)
option to pass in a set/list of edge ids (tup(int, int)) that should
be kept in the resulting dataframe
Returns
-----------
pandas.DataFrame
a pandas.DataFrame containing the network edge information.
columns = [pw0, pw1, weight]. an additional "features" column is
returned if this network is not an aggregate of multiple networks.
"""
network_df_cols = ["pw0", "pw1", "weight"]
if self.features:
network_df_cols.append("features")
network_df = pd.DataFrame(columns=network_df_cols)
idx = 0
edge_pathways = set()
for (v0, v1), edge_obj in self.edges.items():
if (edge_obj.weight > drop_weights_below and
(whitelist is None or (v0, v1) in whitelist)):
row = [self.__getitem__(v0),
self.__getitem__(v1),
edge_obj.weight]
edge_pathways.add(v0)
edge_pathways.add(v1)
if self.features:
features = edge_obj.features_to_string()
row.append(features)
network_df.loc[idx] = row
idx += 1 # faster to append by index.
network_df = network_df.sort_values(by=["weight"],
ascending=False)
print("The pathway co-occurrence network "
"contains {0} pathways.".format(
len(edge_pathways)))
return network_df | python | def to_dataframe(self, drop_weights_below=0, whitelist=None):
""" Conversion of the network to a pandas.DataFrame.
Parameters
-----------
drop_weights_below : int (default=0)
specify an edge weight threshold - remove all edges with weight
below this value
whitelist : [set|list](tup(int, int))|None (default=None)
option to pass in a set/list of edge ids (tup(int, int)) that should
be kept in the resulting dataframe
Returns
-----------
pandas.DataFrame
a pandas.DataFrame containing the network edge information.
columns = [pw0, pw1, weight]. an additional "features" column is
returned if this network is not an aggregate of multiple networks.
"""
network_df_cols = ["pw0", "pw1", "weight"]
if self.features:
network_df_cols.append("features")
network_df = pd.DataFrame(columns=network_df_cols)
idx = 0
edge_pathways = set()
for (v0, v1), edge_obj in self.edges.items():
if (edge_obj.weight > drop_weights_below and
(whitelist is None or (v0, v1) in whitelist)):
row = [self.__getitem__(v0),
self.__getitem__(v1),
edge_obj.weight]
edge_pathways.add(v0)
edge_pathways.add(v1)
if self.features:
features = edge_obj.features_to_string()
row.append(features)
network_df.loc[idx] = row
idx += 1 # faster to append by index.
network_df = network_df.sort_values(by=["weight"],
ascending=False)
print("The pathway co-occurrence network "
"contains {0} pathways.".format(
len(edge_pathways)))
return network_df | [
"def",
"to_dataframe",
"(",
"self",
",",
"drop_weights_below",
"=",
"0",
",",
"whitelist",
"=",
"None",
")",
":",
"network_df_cols",
"=",
"[",
"\"pw0\"",
",",
"\"pw1\"",
",",
"\"weight\"",
"]",
"if",
"self",
".",
"features",
":",
"network_df_cols",
".",
"append",
"(",
"\"features\"",
")",
"network_df",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"network_df_cols",
")",
"idx",
"=",
"0",
"edge_pathways",
"=",
"set",
"(",
")",
"for",
"(",
"v0",
",",
"v1",
")",
",",
"edge_obj",
"in",
"self",
".",
"edges",
".",
"items",
"(",
")",
":",
"if",
"(",
"edge_obj",
".",
"weight",
">",
"drop_weights_below",
"and",
"(",
"whitelist",
"is",
"None",
"or",
"(",
"v0",
",",
"v1",
")",
"in",
"whitelist",
")",
")",
":",
"row",
"=",
"[",
"self",
".",
"__getitem__",
"(",
"v0",
")",
",",
"self",
".",
"__getitem__",
"(",
"v1",
")",
",",
"edge_obj",
".",
"weight",
"]",
"edge_pathways",
".",
"add",
"(",
"v0",
")",
"edge_pathways",
".",
"add",
"(",
"v1",
")",
"if",
"self",
".",
"features",
":",
"features",
"=",
"edge_obj",
".",
"features_to_string",
"(",
")",
"row",
".",
"append",
"(",
"features",
")",
"network_df",
".",
"loc",
"[",
"idx",
"]",
"=",
"row",
"idx",
"+=",
"1",
"# faster to append by index.",
"network_df",
"=",
"network_df",
".",
"sort_values",
"(",
"by",
"=",
"[",
"\"weight\"",
"]",
",",
"ascending",
"=",
"False",
")",
"print",
"(",
"\"The pathway co-occurrence network \"",
"\"contains {0} pathways.\"",
".",
"format",
"(",
"len",
"(",
"edge_pathways",
")",
")",
")",
"return",
"network_df"
] | Conversion of the network to a pandas.DataFrame.
Parameters
-----------
drop_weights_below : int (default=0)
specify an edge weight threshold - remove all edges with weight
below this value
whitelist : [set|list](tup(int, int))|None (default=None)
option to pass in a set/list of edge ids (tup(int, int)) that should
be kept in the resulting dataframe
Returns
-----------
pandas.DataFrame
a pandas.DataFrame containing the network edge information.
columns = [pw0, pw1, weight]. an additional "features" column is
returned if this network is not an aggregate of multiple networks. | [
"Conversion",
"of",
"the",
"network",
"to",
"a",
"pandas",
".",
"DataFrame",
"."
] | 9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c | https://github.com/greenelab/PathCORE-T/blob/9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c/pathcore/network.py#L458-L501 | train |
greenelab/PathCORE-T | pathcore/network.py | CoNetwork._add_edge_to_vertex | def _add_edge_to_vertex(self, vertex_id, edge):
"""Adds the edge to the Vertex object's `edges` dictionary
"""
connected_to = edge.connected_to(vertex_id)
if vertex_id not in self.vertices:
vertex_obj = Vertex(vertex_id)
self.vertices[vertex_id] = vertex_obj
self.vertices[vertex_id].edges[connected_to] = edge.weight | python | def _add_edge_to_vertex(self, vertex_id, edge):
"""Adds the edge to the Vertex object's `edges` dictionary
"""
connected_to = edge.connected_to(vertex_id)
if vertex_id not in self.vertices:
vertex_obj = Vertex(vertex_id)
self.vertices[vertex_id] = vertex_obj
self.vertices[vertex_id].edges[connected_to] = edge.weight | [
"def",
"_add_edge_to_vertex",
"(",
"self",
",",
"vertex_id",
",",
"edge",
")",
":",
"connected_to",
"=",
"edge",
".",
"connected_to",
"(",
"vertex_id",
")",
"if",
"vertex_id",
"not",
"in",
"self",
".",
"vertices",
":",
"vertex_obj",
"=",
"Vertex",
"(",
"vertex_id",
")",
"self",
".",
"vertices",
"[",
"vertex_id",
"]",
"=",
"vertex_obj",
"self",
".",
"vertices",
"[",
"vertex_id",
"]",
".",
"edges",
"[",
"connected_to",
"]",
"=",
"edge",
".",
"weight"
] | Adds the edge to the Vertex object's `edges` dictionary | [
"Adds",
"the",
"edge",
"to",
"the",
"Vertex",
"object",
"s",
"edges",
"dictionary"
] | 9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c | https://github.com/greenelab/PathCORE-T/blob/9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c/pathcore/network.py#L503-L510 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.