code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def getquerydict(self, sep='&', encoding='utf-8', errors='strict'):
"""Split the query component into individual `name=value` pairs
separated by `sep` and return a dictionary of query variables.
The dictionary keys are the unique query variable names and
the values are lists of values for each name.
"""
dict = collections.defaultdict(list)
for name, value in self.getquerylist(sep, encoding, errors):
dict[name].append(value)
return dict | Split the query component into individual `name=value` pairs
separated by `sep` and return a dictionary of query variables.
The dictionary keys are the unique query variable names and
the values are lists of values for each name. | Below is the the instruction that describes the task:
### Input:
Split the query component into individual `name=value` pairs
separated by `sep` and return a dictionary of query variables.
The dictionary keys are the unique query variable names and
the values are lists of values for each name.
### Response:
def getquerydict(self, sep='&', encoding='utf-8', errors='strict'):
"""Split the query component into individual `name=value` pairs
separated by `sep` and return a dictionary of query variables.
The dictionary keys are the unique query variable names and
the values are lists of values for each name.
"""
dict = collections.defaultdict(list)
for name, value in self.getquerylist(sep, encoding, errors):
dict[name].append(value)
return dict |
def move_file(originals, destination):
"""
Move file from original path to destination path.
:type originals: Array of str
:param originals: The original path
:type destination: str
:param destination: The destination path
"""
for original in originals:
if os.path.exists(original):
shutil.move(original, destination) | Move file from original path to destination path.
:type originals: Array of str
:param originals: The original path
:type destination: str
:param destination: The destination path | Below is the the instruction that describes the task:
### Input:
Move file from original path to destination path.
:type originals: Array of str
:param originals: The original path
:type destination: str
:param destination: The destination path
### Response:
def move_file(originals, destination):
"""
Move file from original path to destination path.
:type originals: Array of str
:param originals: The original path
:type destination: str
:param destination: The destination path
"""
for original in originals:
if os.path.exists(original):
shutil.move(original, destination) |
def activationFunctionASIG(self, x):
"""
Determine the activation of a node based on that nodes net input.
"""
def act(v):
if v < -15.0: return 0.0
elif v > 15.0: return 1.0
else: return 1.0 / (1.0 + Numeric.exp(-v))
return Numeric.array(list(map(act, x)), 'f') | Determine the activation of a node based on that nodes net input. | Below is the the instruction that describes the task:
### Input:
Determine the activation of a node based on that nodes net input.
### Response:
def activationFunctionASIG(self, x):
"""
Determine the activation of a node based on that nodes net input.
"""
def act(v):
if v < -15.0: return 0.0
elif v > 15.0: return 1.0
else: return 1.0 / (1.0 + Numeric.exp(-v))
return Numeric.array(list(map(act, x)), 'f') |
def as_xml(self,parent):
"""Create vcard-tmp XML representation of the field.
:Parameters:
- `parent`: parent node for the element
:Types:
- `parent`: `libxml2.xmlNode`
:return: xml node with the field data.
:returntype: `libxml2.xmlNode`"""
if self.value in ("public","private","confidental"):
n=parent.newChild(None,self.name.upper(),None)
n.newChild(None,self.value.upper(),None)
return n
return None | Create vcard-tmp XML representation of the field.
:Parameters:
- `parent`: parent node for the element
:Types:
- `parent`: `libxml2.xmlNode`
:return: xml node with the field data.
:returntype: `libxml2.xmlNode` | Below is the the instruction that describes the task:
### Input:
Create vcard-tmp XML representation of the field.
:Parameters:
- `parent`: parent node for the element
:Types:
- `parent`: `libxml2.xmlNode`
:return: xml node with the field data.
:returntype: `libxml2.xmlNode`
### Response:
def as_xml(self,parent):
"""Create vcard-tmp XML representation of the field.
:Parameters:
- `parent`: parent node for the element
:Types:
- `parent`: `libxml2.xmlNode`
:return: xml node with the field data.
:returntype: `libxml2.xmlNode`"""
if self.value in ("public","private","confidental"):
n=parent.newChild(None,self.name.upper(),None)
n.newChild(None,self.value.upper(),None)
return n
return None |
def refresh(self):
"""
Refresh this model from the server.
Updates attributes with the server-defined values. This is useful where the Model
instance came from a partial response (eg. a list query) and additional details
are required.
Existing attribute values will be overwritten.
"""
r = self._client.request('GET', self.url)
return self._deserialize(r.json(), self._manager) | Refresh this model from the server.
Updates attributes with the server-defined values. This is useful where the Model
instance came from a partial response (eg. a list query) and additional details
are required.
Existing attribute values will be overwritten. | Below is the the instruction that describes the task:
### Input:
Refresh this model from the server.
Updates attributes with the server-defined values. This is useful where the Model
instance came from a partial response (eg. a list query) and additional details
are required.
Existing attribute values will be overwritten.
### Response:
def refresh(self):
"""
Refresh this model from the server.
Updates attributes with the server-defined values. This is useful where the Model
instance came from a partial response (eg. a list query) and additional details
are required.
Existing attribute values will be overwritten.
"""
r = self._client.request('GET', self.url)
return self._deserialize(r.json(), self._manager) |
def copy_all_a(input_a, *other_inputs, **kwargs):
"""Copy all readings in input a into the output.
All other inputs are skipped so that after this function runs there are no
readings left in any of the input walkers when the function finishes, even
if it generated no output readings.
Returns:
list(IOTileReading)
"""
output = []
while input_a.count() > 0:
output.append(input_a.pop())
for input_x in other_inputs:
input_x.skip_all()
return output | Copy all readings in input a into the output.
All other inputs are skipped so that after this function runs there are no
readings left in any of the input walkers when the function finishes, even
if it generated no output readings.
Returns:
list(IOTileReading) | Below is the the instruction that describes the task:
### Input:
Copy all readings in input a into the output.
All other inputs are skipped so that after this function runs there are no
readings left in any of the input walkers when the function finishes, even
if it generated no output readings.
Returns:
list(IOTileReading)
### Response:
def copy_all_a(input_a, *other_inputs, **kwargs):
"""Copy all readings in input a into the output.
All other inputs are skipped so that after this function runs there are no
readings left in any of the input walkers when the function finishes, even
if it generated no output readings.
Returns:
list(IOTileReading)
"""
output = []
while input_a.count() > 0:
output.append(input_a.pop())
for input_x in other_inputs:
input_x.skip_all()
return output |
def active_pixmap(self, value):
"""
Setter for **self.__active_pixmap** attribute.
:param value: Attribute value.
:type value: QPixmap
"""
if value is not None:
assert type(value) is QPixmap, "'{0}' attribute: '{1}' type is not 'QPixmap'!".format(
"active_pixmap", value)
self.__active_pixmap = value | Setter for **self.__active_pixmap** attribute.
:param value: Attribute value.
:type value: QPixmap | Below is the the instruction that describes the task:
### Input:
Setter for **self.__active_pixmap** attribute.
:param value: Attribute value.
:type value: QPixmap
### Response:
def active_pixmap(self, value):
"""
Setter for **self.__active_pixmap** attribute.
:param value: Attribute value.
:type value: QPixmap
"""
if value is not None:
assert type(value) is QPixmap, "'{0}' attribute: '{1}' type is not 'QPixmap'!".format(
"active_pixmap", value)
self.__active_pixmap = value |
async def updateTrigger(self, iden, query):
'''
Change an existing trigger's query
'''
trig = self.cell.triggers.get(iden)
self._trig_auth_check(trig.get('useriden'))
self.cell.triggers.mod(iden, query) | Change an existing trigger's query | Below is the the instruction that describes the task:
### Input:
Change an existing trigger's query
### Response:
async def updateTrigger(self, iden, query):
'''
Change an existing trigger's query
'''
trig = self.cell.triggers.get(iden)
self._trig_auth_check(trig.get('useriden'))
self.cell.triggers.mod(iden, query) |
def find_executable(self):
'''Find an executable node, which means nodes that has not been completed
and has no input dependency.'''
if 'DAG' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('DAG', 'find_executable')
for node in self.nodes():
# if it has not been executed
if node._status is None:
with_dependency = False
for edge in self.in_edges(node):
if edge[0]._status != 'completed':
with_dependency = True
break
if not with_dependency:
return node
# if no node could be found, let use try pending ones
pending_jobs = [
x for x in self.nodes() if x._status == 'signature_pending'
]
if pending_jobs:
try:
notifier = ActivityNotifier(
f'Waiting for {len(pending_jobs)} pending job{"s: e.g." if len(pending_jobs) > 1 else ":"} output {short_repr(pending_jobs[0]._signature[0])} with signature file {pending_jobs[0]._signature[1] + "_"}. You can manually remove this lock file if you are certain that no other process is working on the output.'
)
while True:
for node in pending_jobs:
# if it has not been executed
lock = fasteners.InterProcessLock(node._signature[1] +
'_')
if lock.acquire(blocking=False):
lock.release()
node._status = None
return node
time.sleep(0.1)
except Exception as e:
env.logger.error(e)
finally:
notifier.stop()
return None | Find an executable node, which means nodes that has not been completed
and has no input dependency. | Below is the the instruction that describes the task:
### Input:
Find an executable node, which means nodes that has not been completed
and has no input dependency.
### Response:
def find_executable(self):
'''Find an executable node, which means nodes that has not been completed
and has no input dependency.'''
if 'DAG' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('DAG', 'find_executable')
for node in self.nodes():
# if it has not been executed
if node._status is None:
with_dependency = False
for edge in self.in_edges(node):
if edge[0]._status != 'completed':
with_dependency = True
break
if not with_dependency:
return node
# if no node could be found, let use try pending ones
pending_jobs = [
x for x in self.nodes() if x._status == 'signature_pending'
]
if pending_jobs:
try:
notifier = ActivityNotifier(
f'Waiting for {len(pending_jobs)} pending job{"s: e.g." if len(pending_jobs) > 1 else ":"} output {short_repr(pending_jobs[0]._signature[0])} with signature file {pending_jobs[0]._signature[1] + "_"}. You can manually remove this lock file if you are certain that no other process is working on the output.'
)
while True:
for node in pending_jobs:
# if it has not been executed
lock = fasteners.InterProcessLock(node._signature[1] +
'_')
if lock.acquire(blocking=False):
lock.release()
node._status = None
return node
time.sleep(0.1)
except Exception as e:
env.logger.error(e)
finally:
notifier.stop()
return None |
def _build_index(self):
"""Itera todos los datasets, distribucioens y fields indexandolos."""
datasets_index = {}
distributions_index = {}
fields_index = {}
# recorre todos los datasets
for dataset_index, dataset in enumerate(self.datasets):
if "identifier" in dataset:
datasets_index[dataset["identifier"]] = {
"dataset_index": dataset_index
}
# recorre las distribuciones del dataset
for distribution_index, distribution in enumerate(
dataset.get("distribution", [])):
if "identifier" in distribution:
distributions_index[distribution["identifier"]] = {
"distribution_index": distribution_index,
"dataset_identifier": dataset["identifier"]
}
# recorre los fields de la distribucion
for field_index, field in enumerate(
distribution.get("field", [])):
if "id" in field:
fields_index[field["id"]] = {
"field_index":
field_index,
"dataset_identifier":
dataset["identifier"],
"distribution_identifier":
distribution["identifier"]
}
setattr(self, "_distributions_index", distributions_index)
setattr(self, "_datasets_index", datasets_index)
setattr(self, "_fields_index", fields_index) | Itera todos los datasets, distribucioens y fields indexandolos. | Below is the the instruction that describes the task:
### Input:
Itera todos los datasets, distribucioens y fields indexandolos.
### Response:
def _build_index(self):
"""Itera todos los datasets, distribucioens y fields indexandolos."""
datasets_index = {}
distributions_index = {}
fields_index = {}
# recorre todos los datasets
for dataset_index, dataset in enumerate(self.datasets):
if "identifier" in dataset:
datasets_index[dataset["identifier"]] = {
"dataset_index": dataset_index
}
# recorre las distribuciones del dataset
for distribution_index, distribution in enumerate(
dataset.get("distribution", [])):
if "identifier" in distribution:
distributions_index[distribution["identifier"]] = {
"distribution_index": distribution_index,
"dataset_identifier": dataset["identifier"]
}
# recorre los fields de la distribucion
for field_index, field in enumerate(
distribution.get("field", [])):
if "id" in field:
fields_index[field["id"]] = {
"field_index":
field_index,
"dataset_identifier":
dataset["identifier"],
"distribution_identifier":
distribution["identifier"]
}
setattr(self, "_distributions_index", distributions_index)
setattr(self, "_datasets_index", datasets_index)
setattr(self, "_fields_index", fields_index) |
def export(self, swf, shape, **export_opts):
""" Exports the specified shape of the SWF to SVG.
@param swf The SWF.
@param shape Which shape to export, either by characterId(int) or as a Tag object.
"""
# If `shape` is given as int, find corresponding shape tag.
if isinstance(shape, Tag):
shape_tag = shape
else:
shapes = [x for x in swf.all_tags_of_type((TagDefineShape, TagDefineSprite)) if x.characterId == shape]
if len(shapes):
shape_tag = shapes[0]
else:
raise Exception("Shape %s not found" % shape)
from swf.movie import SWF
# find a typical use of this shape
example_place_objects = [x for x in swf.all_tags_of_type(TagPlaceObject) if x.hasCharacter and x.characterId == shape_tag.characterId]
if len(example_place_objects):
place_object = example_place_objects[0]
characters = swf.build_dictionary()
ids_to_export = place_object.get_dependencies()
ids_exported = set()
tags_to_export = []
# this had better form a dag!
while len(ids_to_export):
id = ids_to_export.pop()
if id in ids_exported or id not in characters:
continue
tag = characters[id]
ids_to_export.update(tag.get_dependencies())
tags_to_export.append(tag)
ids_exported.add(id)
tags_to_export.reverse()
tags_to_export.append(place_object)
else:
place_object = TagPlaceObject()
place_object.hasCharacter = True
place_object.characterId = shape_tag.characterId
tags_to_export = [ shape_tag, place_object ]
stunt_swf = SWF()
stunt_swf.tags = tags_to_export
return super(SingleShapeSVGExporterMixin, self).export(stunt_swf, **export_opts) | Exports the specified shape of the SWF to SVG.
@param swf The SWF.
@param shape Which shape to export, either by characterId(int) or as a Tag object. | Below is the the instruction that describes the task:
### Input:
Exports the specified shape of the SWF to SVG.
@param swf The SWF.
@param shape Which shape to export, either by characterId(int) or as a Tag object.
### Response:
def export(self, swf, shape, **export_opts):
""" Exports the specified shape of the SWF to SVG.
@param swf The SWF.
@param shape Which shape to export, either by characterId(int) or as a Tag object.
"""
# If `shape` is given as int, find corresponding shape tag.
if isinstance(shape, Tag):
shape_tag = shape
else:
shapes = [x for x in swf.all_tags_of_type((TagDefineShape, TagDefineSprite)) if x.characterId == shape]
if len(shapes):
shape_tag = shapes[0]
else:
raise Exception("Shape %s not found" % shape)
from swf.movie import SWF
# find a typical use of this shape
example_place_objects = [x for x in swf.all_tags_of_type(TagPlaceObject) if x.hasCharacter and x.characterId == shape_tag.characterId]
if len(example_place_objects):
place_object = example_place_objects[0]
characters = swf.build_dictionary()
ids_to_export = place_object.get_dependencies()
ids_exported = set()
tags_to_export = []
# this had better form a dag!
while len(ids_to_export):
id = ids_to_export.pop()
if id in ids_exported or id not in characters:
continue
tag = characters[id]
ids_to_export.update(tag.get_dependencies())
tags_to_export.append(tag)
ids_exported.add(id)
tags_to_export.reverse()
tags_to_export.append(place_object)
else:
place_object = TagPlaceObject()
place_object.hasCharacter = True
place_object.characterId = shape_tag.characterId
tags_to_export = [ shape_tag, place_object ]
stunt_swf = SWF()
stunt_swf.tags = tags_to_export
return super(SingleShapeSVGExporterMixin, self).export(stunt_swf, **export_opts) |
def get_ht_op(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n766.
Positional arguments:
data -- bytearray data to read.
Returns:
Dict.
"""
protection = ('no', 'nonmember', 20, 'non-HT mixed')
sta_chan_width = (20, 'any')
answers = {
'primary channel': data[0],
'secondary channel offset': ht_secondary_offset[data[1] & 0x3],
'STA channel width': sta_chan_width[(data[1] & 0x4) >> 2],
'RIFS': (data[1] & 0x8) >> 3,
'HT protection': protection[data[2] & 0x3],
'non-GF present': (data[2] & 0x4) >> 2,
'OBSS non-GF present': (data[2] & 0x10) >> 4,
'dual beacon': (data[4] & 0x40) >> 6,
'dual CTS protection': (data[4] & 0x80) >> 7,
'STBC beacon': data[5] & 0x1,
'L-SIG TXOP Prot': (data[5] & 0x2) >> 1,
'PCO active': (data[5] & 0x4) >> 2,
'PCO phase': (data[5] & 0x8) >> 3,
}
return answers | http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n766.
Positional arguments:
data -- bytearray data to read.
Returns:
Dict. | Below is the the instruction that describes the task:
### Input:
http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n766.
Positional arguments:
data -- bytearray data to read.
Returns:
Dict.
### Response:
def get_ht_op(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n766.
Positional arguments:
data -- bytearray data to read.
Returns:
Dict.
"""
protection = ('no', 'nonmember', 20, 'non-HT mixed')
sta_chan_width = (20, 'any')
answers = {
'primary channel': data[0],
'secondary channel offset': ht_secondary_offset[data[1] & 0x3],
'STA channel width': sta_chan_width[(data[1] & 0x4) >> 2],
'RIFS': (data[1] & 0x8) >> 3,
'HT protection': protection[data[2] & 0x3],
'non-GF present': (data[2] & 0x4) >> 2,
'OBSS non-GF present': (data[2] & 0x10) >> 4,
'dual beacon': (data[4] & 0x40) >> 6,
'dual CTS protection': (data[4] & 0x80) >> 7,
'STBC beacon': data[5] & 0x1,
'L-SIG TXOP Prot': (data[5] & 0x2) >> 1,
'PCO active': (data[5] & 0x4) >> 2,
'PCO phase': (data[5] & 0x8) >> 3,
}
return answers |
def model_installed(name):
"""Check if spaCy language model is installed.
From https://github.com/explosion/spaCy/blob/master/spacy/util.py
:param name:
:return:
"""
data_path = util.get_data_path()
if not data_path or not data_path.exists():
raise IOError(f"Can't find spaCy data path: {data_path}")
if name in {d.name for d in data_path.iterdir()}:
return True
if Spacy.is_package(name): # installed as package
return True
if Path(name).exists(): # path to model data directory
return True
return False | Check if spaCy language model is installed.
From https://github.com/explosion/spaCy/blob/master/spacy/util.py
:param name:
:return: | Below is the the instruction that describes the task:
### Input:
Check if spaCy language model is installed.
From https://github.com/explosion/spaCy/blob/master/spacy/util.py
:param name:
:return:
### Response:
def model_installed(name):
"""Check if spaCy language model is installed.
From https://github.com/explosion/spaCy/blob/master/spacy/util.py
:param name:
:return:
"""
data_path = util.get_data_path()
if not data_path or not data_path.exists():
raise IOError(f"Can't find spaCy data path: {data_path}")
if name in {d.name for d in data_path.iterdir()}:
return True
if Spacy.is_package(name): # installed as package
return True
if Path(name).exists(): # path to model data directory
return True
return False |
def idle_all_workers(self):
'''Set the global mode to :attr:`IDLE` and wait for workers to stop.
This can wait arbitrarily long before returning. The worst
case in "normal" usage involves waiting five minutes for a
"lost" job to expire; a well-behaved but very-long-running job
can extend its own lease further, and this function will not
return until that job finishes (if ever).
.. deprecated:: 0.4.5
There isn't an obvious use case for this function, and its
"maybe wait forever for something out of my control" nature
makes it hard to use in real code. Polling all of the work
specs and their :meth:`num_pending` in application code if
you really needed this operation would have the same
semantics and database load.
'''
self.set_mode(self.IDLE)
while 1:
num_pending = dict()
for work_spec_name in self.registry.pull(NICE_LEVELS).keys():
num_pending[work_spec_name] = self.num_pending(work_spec_name)
if sum(num_pending.values()) == 0:
break
logger.warn('waiting for pending work_units: %r', num_pending)
time.sleep(1) | Set the global mode to :attr:`IDLE` and wait for workers to stop.
This can wait arbitrarily long before returning. The worst
case in "normal" usage involves waiting five minutes for a
"lost" job to expire; a well-behaved but very-long-running job
can extend its own lease further, and this function will not
return until that job finishes (if ever).
.. deprecated:: 0.4.5
There isn't an obvious use case for this function, and its
"maybe wait forever for something out of my control" nature
makes it hard to use in real code. Polling all of the work
specs and their :meth:`num_pending` in application code if
you really needed this operation would have the same
semantics and database load. | Below is the the instruction that describes the task:
### Input:
Set the global mode to :attr:`IDLE` and wait for workers to stop.
This can wait arbitrarily long before returning. The worst
case in "normal" usage involves waiting five minutes for a
"lost" job to expire; a well-behaved but very-long-running job
can extend its own lease further, and this function will not
return until that job finishes (if ever).
.. deprecated:: 0.4.5
There isn't an obvious use case for this function, and its
"maybe wait forever for something out of my control" nature
makes it hard to use in real code. Polling all of the work
specs and their :meth:`num_pending` in application code if
you really needed this operation would have the same
semantics and database load.
### Response:
def idle_all_workers(self):
'''Set the global mode to :attr:`IDLE` and wait for workers to stop.
This can wait arbitrarily long before returning. The worst
case in "normal" usage involves waiting five minutes for a
"lost" job to expire; a well-behaved but very-long-running job
can extend its own lease further, and this function will not
return until that job finishes (if ever).
.. deprecated:: 0.4.5
There isn't an obvious use case for this function, and its
"maybe wait forever for something out of my control" nature
makes it hard to use in real code. Polling all of the work
specs and their :meth:`num_pending` in application code if
you really needed this operation would have the same
semantics and database load.
'''
self.set_mode(self.IDLE)
while 1:
num_pending = dict()
for work_spec_name in self.registry.pull(NICE_LEVELS).keys():
num_pending[work_spec_name] = self.num_pending(work_spec_name)
if sum(num_pending.values()) == 0:
break
logger.warn('waiting for pending work_units: %r', num_pending)
time.sleep(1) |
def get_client_kwargs(self):
"""Get kwargs for use with the methods in :mod:`nailgun.client`.
This method returns a dict of attributes that can be unpacked and used
as kwargs via the ``**`` operator. For example::
cfg = ServerConfig.get()
client.get(cfg.url + '/api/v2', **cfg.get_client_kwargs())
This method is useful because client code may not know which attributes
should be passed from a ``ServerConfig`` object to one of the
``nailgun.client`` functions. Consider that the example above could
also be written like this::
cfg = ServerConfig.get()
client.get(cfg.url + '/api/v2', auth=cfg.auth, verify=cfg.verify)
But this latter approach is more fragile. It will break if ``cfg`` does
not have an ``auth`` or ``verify`` attribute.
"""
config = vars(self).copy()
config.pop('url')
config.pop('version', None)
return config | Get kwargs for use with the methods in :mod:`nailgun.client`.
This method returns a dict of attributes that can be unpacked and used
as kwargs via the ``**`` operator. For example::
cfg = ServerConfig.get()
client.get(cfg.url + '/api/v2', **cfg.get_client_kwargs())
This method is useful because client code may not know which attributes
should be passed from a ``ServerConfig`` object to one of the
``nailgun.client`` functions. Consider that the example above could
also be written like this::
cfg = ServerConfig.get()
client.get(cfg.url + '/api/v2', auth=cfg.auth, verify=cfg.verify)
But this latter approach is more fragile. It will break if ``cfg`` does
not have an ``auth`` or ``verify`` attribute. | Below is the the instruction that describes the task:
### Input:
Get kwargs for use with the methods in :mod:`nailgun.client`.
This method returns a dict of attributes that can be unpacked and used
as kwargs via the ``**`` operator. For example::
cfg = ServerConfig.get()
client.get(cfg.url + '/api/v2', **cfg.get_client_kwargs())
This method is useful because client code may not know which attributes
should be passed from a ``ServerConfig`` object to one of the
``nailgun.client`` functions. Consider that the example above could
also be written like this::
cfg = ServerConfig.get()
client.get(cfg.url + '/api/v2', auth=cfg.auth, verify=cfg.verify)
But this latter approach is more fragile. It will break if ``cfg`` does
not have an ``auth`` or ``verify`` attribute.
### Response:
def get_client_kwargs(self):
"""Get kwargs for use with the methods in :mod:`nailgun.client`.
This method returns a dict of attributes that can be unpacked and used
as kwargs via the ``**`` operator. For example::
cfg = ServerConfig.get()
client.get(cfg.url + '/api/v2', **cfg.get_client_kwargs())
This method is useful because client code may not know which attributes
should be passed from a ``ServerConfig`` object to one of the
``nailgun.client`` functions. Consider that the example above could
also be written like this::
cfg = ServerConfig.get()
client.get(cfg.url + '/api/v2', auth=cfg.auth, verify=cfg.verify)
But this latter approach is more fragile. It will break if ``cfg`` does
not have an ``auth`` or ``verify`` attribute.
"""
config = vars(self).copy()
config.pop('url')
config.pop('version', None)
return config |
def _tiles_from_bbox(bbox, zoom_level):
"""
* Returns all tiles for the specified bounding box
"""
if isinstance(bbox, dict):
point_min = Point.from_latitude_longitude(latitude=bbox['tl'], longitude=bbox['tr'])
point_max = Point.from_latitude_longitude(latitude=bbox['bl'], longitude=bbox['br'])
elif isinstance(bbox, list):
point_min = Point.from_latitude_longitude(latitude=bbox[1], longitude=bbox[0])
point_max = Point.from_latitude_longitude(latitude=bbox[3], longitude=bbox[2])
else:
raise RuntimeError("bbox must bei either a dict or a list")
tile_min = Tile.for_point(point_min, zoom_level)
tile_max = Tile.for_point(point_max, zoom_level)
tiles = []
for x in range(tile_min.tms_x, tile_max.tms_x + 1):
for y in range(tile_min.tms_y, tile_max.tms_y + 1):
tiles.append(Tile.from_tms(tms_x=x, tms_y=y, zoom=zoom_level))
return tiles | * Returns all tiles for the specified bounding box | Below is the the instruction that describes the task:
### Input:
* Returns all tiles for the specified bounding box
### Response:
def _tiles_from_bbox(bbox, zoom_level):
"""
* Returns all tiles for the specified bounding box
"""
if isinstance(bbox, dict):
point_min = Point.from_latitude_longitude(latitude=bbox['tl'], longitude=bbox['tr'])
point_max = Point.from_latitude_longitude(latitude=bbox['bl'], longitude=bbox['br'])
elif isinstance(bbox, list):
point_min = Point.from_latitude_longitude(latitude=bbox[1], longitude=bbox[0])
point_max = Point.from_latitude_longitude(latitude=bbox[3], longitude=bbox[2])
else:
raise RuntimeError("bbox must bei either a dict or a list")
tile_min = Tile.for_point(point_min, zoom_level)
tile_max = Tile.for_point(point_max, zoom_level)
tiles = []
for x in range(tile_min.tms_x, tile_max.tms_x + 1):
for y in range(tile_min.tms_y, tile_max.tms_y + 1):
tiles.append(Tile.from_tms(tms_x=x, tms_y=y, zoom=zoom_level))
return tiles |
def serialize(self, obj):
"""Serialize an object for sending to the front-end."""
if hasattr(obj, '_jsid'):
return {'immutable': False, 'value': obj._jsid}
else:
obj_json = {'immutable': True}
try:
json.dumps(obj)
obj_json['value'] = obj
except:
pass
if callable(obj):
guid = str(uuid.uuid4())
callback_registry[guid] = obj
obj_json['callback'] = guid
return obj_json | Serialize an object for sending to the front-end. | Below is the the instruction that describes the task:
### Input:
Serialize an object for sending to the front-end.
### Response:
def serialize(self, obj):
"""Serialize an object for sending to the front-end."""
if hasattr(obj, '_jsid'):
return {'immutable': False, 'value': obj._jsid}
else:
obj_json = {'immutable': True}
try:
json.dumps(obj)
obj_json['value'] = obj
except:
pass
if callable(obj):
guid = str(uuid.uuid4())
callback_registry[guid] = obj
obj_json['callback'] = guid
return obj_json |
def delete_unique_identity(db, uuid):
"""Remove a unique identity from the registry.
Function that removes from the registry, the unique identity
that matches with uuid. Data related to this identity will be
also removed.
It checks first whether the unique identity is already on the registry.
When it is found, the unique identity is removed. Otherwise, it will
raise a 'NotFoundError' exception.
:param db: database manager
:param uuid: unique identifier assigned to the unique identity set
for being removed
:raises NotFoundError: raised when the unique identity does not exist
in the registry.
"""
with db.connect() as session:
uidentity = find_unique_identity(session, uuid)
if not uidentity:
raise NotFoundError(entity=uuid)
delete_unique_identity_db(session, uidentity) | Remove a unique identity from the registry.
Function that removes from the registry, the unique identity
that matches with uuid. Data related to this identity will be
also removed.
It checks first whether the unique identity is already on the registry.
When it is found, the unique identity is removed. Otherwise, it will
raise a 'NotFoundError' exception.
:param db: database manager
:param uuid: unique identifier assigned to the unique identity set
for being removed
:raises NotFoundError: raised when the unique identity does not exist
in the registry. | Below is the the instruction that describes the task:
### Input:
Remove a unique identity from the registry.
Function that removes from the registry, the unique identity
that matches with uuid. Data related to this identity will be
also removed.
It checks first whether the unique identity is already on the registry.
When it is found, the unique identity is removed. Otherwise, it will
raise a 'NotFoundError' exception.
:param db: database manager
:param uuid: unique identifier assigned to the unique identity set
for being removed
:raises NotFoundError: raised when the unique identity does not exist
in the registry.
### Response:
def delete_unique_identity(db, uuid):
"""Remove a unique identity from the registry.
Function that removes from the registry, the unique identity
that matches with uuid. Data related to this identity will be
also removed.
It checks first whether the unique identity is already on the registry.
When it is found, the unique identity is removed. Otherwise, it will
raise a 'NotFoundError' exception.
:param db: database manager
:param uuid: unique identifier assigned to the unique identity set
for being removed
:raises NotFoundError: raised when the unique identity does not exist
in the registry.
"""
with db.connect() as session:
uidentity = find_unique_identity(session, uuid)
if not uidentity:
raise NotFoundError(entity=uuid)
delete_unique_identity_db(session, uidentity) |
def hasAttribute(self, attr: str) -> bool:
"""Return True if this node has ``attr``."""
if attr == 'class':
return bool(self.classList)
return attr in self.attributes | Return True if this node has ``attr``. | Below is the the instruction that describes the task:
### Input:
Return True if this node has ``attr``.
### Response:
def hasAttribute(self, attr: str) -> bool:
"""Return True if this node has ``attr``."""
if attr == 'class':
return bool(self.classList)
return attr in self.attributes |
def sha_hash(self) -> str:
"""
Return uppercase hex sha256 hash from signed raw document
:return:
"""
return hashlib.sha256(self.signed_raw().encode("ascii")).hexdigest().upper() | Return uppercase hex sha256 hash from signed raw document
:return: | Below is the the instruction that describes the task:
### Input:
Return uppercase hex sha256 hash from signed raw document
:return:
### Response:
def sha_hash(self) -> str:
"""
Return uppercase hex sha256 hash from signed raw document
:return:
"""
return hashlib.sha256(self.signed_raw().encode("ascii")).hexdigest().upper() |
def match_modules(allowed_modules):
"""Creates a matcher that matches a list/set/tuple of allowed modules."""
cleaned_allowed_modules = [
utils.mod_to_mod_name(tmp_mod)
for tmp_mod in allowed_modules
]
cleaned_split_allowed_modules = [
tmp_mod.split(".")
for tmp_mod in cleaned_allowed_modules
]
cleaned_allowed_modules = []
del cleaned_allowed_modules
def matcher(cause):
cause_cls = None
cause_type_name = cause.exception_type_names[0]
# Rip off the class name (usually at the end).
cause_type_name_pieces = cause_type_name.split(".")
cause_type_name_mod_pieces = cause_type_name_pieces[0:-1]
# Do any modules provided match the provided causes module?
mod_match = any(
utils.array_prefix_matches(mod_pieces,
cause_type_name_mod_pieces)
for mod_pieces in cleaned_split_allowed_modules)
if mod_match:
cause_cls = importutils.import_class(cause_type_name)
cause_cls = ensure_base_exception(cause_type_name, cause_cls)
return cause_cls
return matcher | Creates a matcher that matches a list/set/tuple of allowed modules. | Below is the the instruction that describes the task:
### Input:
Creates a matcher that matches a list/set/tuple of allowed modules.
### Response:
def match_modules(allowed_modules):
"""Creates a matcher that matches a list/set/tuple of allowed modules."""
cleaned_allowed_modules = [
utils.mod_to_mod_name(tmp_mod)
for tmp_mod in allowed_modules
]
cleaned_split_allowed_modules = [
tmp_mod.split(".")
for tmp_mod in cleaned_allowed_modules
]
cleaned_allowed_modules = []
del cleaned_allowed_modules
def matcher(cause):
cause_cls = None
cause_type_name = cause.exception_type_names[0]
# Rip off the class name (usually at the end).
cause_type_name_pieces = cause_type_name.split(".")
cause_type_name_mod_pieces = cause_type_name_pieces[0:-1]
# Do any modules provided match the provided causes module?
mod_match = any(
utils.array_prefix_matches(mod_pieces,
cause_type_name_mod_pieces)
for mod_pieces in cleaned_split_allowed_modules)
if mod_match:
cause_cls = importutils.import_class(cause_type_name)
cause_cls = ensure_base_exception(cause_type_name, cause_cls)
return cause_cls
return matcher |
def state(self, container_id=None, sudo=None, sync_socket=None):
''' get the state of an OciImage, if it exists. The optional states that
can be returned are created, running, stopped or (not existing).
Equivalent command line example:
singularity oci state <container_ID>
Parameters
==========
container_id: the id to get the state of.
sudo: Add sudo to the command. If the container was created by root,
you need sudo to interact and get its state.
sync_socket: the path to the unix socket for state synchronization
Returns
=======
state: a parsed json of the container state, if exists. If the
container is not found, None is returned.
'''
sudo = self._get_sudo(sudo)
container_id = self.get_container_id(container_id)
# singularity oci state
cmd = self._init_command('state')
if sync_socket != None:
cmd = cmd + ['--sync-socket', sync_socket]
# Finally, add the container_id
cmd.append(container_id)
# Get the instance state
result = self._run_command(cmd, sudo=sudo, quiet=True)
if result != None:
# If successful, a string is returned to parse
if isinstance(result, str):
return json.loads(result) | get the state of an OciImage, if it exists. The optional states that
can be returned are created, running, stopped or (not existing).
Equivalent command line example:
singularity oci state <container_ID>
Parameters
==========
container_id: the id to get the state of.
sudo: Add sudo to the command. If the container was created by root,
you need sudo to interact and get its state.
sync_socket: the path to the unix socket for state synchronization
Returns
=======
state: a parsed json of the container state, if exists. If the
container is not found, None is returned. | Below is the the instruction that describes the task:
### Input:
get the state of an OciImage, if it exists. The optional states that
can be returned are created, running, stopped or (not existing).
Equivalent command line example:
singularity oci state <container_ID>
Parameters
==========
container_id: the id to get the state of.
sudo: Add sudo to the command. If the container was created by root,
you need sudo to interact and get its state.
sync_socket: the path to the unix socket for state synchronization
Returns
=======
state: a parsed json of the container state, if exists. If the
container is not found, None is returned.
### Response:
def state(self, container_id=None, sudo=None, sync_socket=None):
''' get the state of an OciImage, if it exists. The optional states that
can be returned are created, running, stopped or (not existing).
Equivalent command line example:
singularity oci state <container_ID>
Parameters
==========
container_id: the id to get the state of.
sudo: Add sudo to the command. If the container was created by root,
you need sudo to interact and get its state.
sync_socket: the path to the unix socket for state synchronization
Returns
=======
state: a parsed json of the container state, if exists. If the
container is not found, None is returned.
'''
sudo = self._get_sudo(sudo)
container_id = self.get_container_id(container_id)
# singularity oci state
cmd = self._init_command('state')
if sync_socket != None:
cmd = cmd + ['--sync-socket', sync_socket]
# Finally, add the container_id
cmd.append(container_id)
# Get the instance state
result = self._run_command(cmd, sudo=sudo, quiet=True)
if result != None:
# If successful, a string is returned to parse
if isinstance(result, str):
return json.loads(result) |
def check_entitlement(doi):
"""Check whether IP and credentials enable access to content for a doi.
This function uses the entitlement endpoint of the Elsevier API to check
whether an article is available to a given institution. Note that this
feature of the API is itself not available for all institution keys.
"""
if doi.lower().startswith('doi:'):
doi = doi[4:]
url = '%s/%s' % (elsevier_entitlement_url, doi)
params = {'httpAccept': 'text/xml'}
res = requests.get(url, params, headers=ELSEVIER_KEYS)
if not res.status_code == 200:
logger.error('Could not check entitlements for article %s: '
'status code %d' % (doi, res.status_code))
logger.error('Response content: %s' % res.text)
return False
return True | Check whether IP and credentials enable access to content for a doi.
This function uses the entitlement endpoint of the Elsevier API to check
whether an article is available to a given institution. Note that this
feature of the API is itself not available for all institution keys. | Below is the the instruction that describes the task:
### Input:
Check whether IP and credentials enable access to content for a doi.
This function uses the entitlement endpoint of the Elsevier API to check
whether an article is available to a given institution. Note that this
feature of the API is itself not available for all institution keys.
### Response:
def check_entitlement(doi):
"""Check whether IP and credentials enable access to content for a doi.
This function uses the entitlement endpoint of the Elsevier API to check
whether an article is available to a given institution. Note that this
feature of the API is itself not available for all institution keys.
"""
if doi.lower().startswith('doi:'):
doi = doi[4:]
url = '%s/%s' % (elsevier_entitlement_url, doi)
params = {'httpAccept': 'text/xml'}
res = requests.get(url, params, headers=ELSEVIER_KEYS)
if not res.status_code == 200:
logger.error('Could not check entitlements for article %s: '
'status code %d' % (doi, res.status_code))
logger.error('Response content: %s' % res.text)
return False
return True |
def allows_simple_recursion(self):
"""Check recursion level and extern status."""
rec_level = self.aggregate.config["recursionlevel"]
if rec_level >= 0 and self.recursion_level >= rec_level:
log.debug(LOG_CHECK, "... no, maximum recursion level reached.")
return False
if self.extern[0]:
log.debug(LOG_CHECK, "... no, extern.")
return False
return True | Check recursion level and extern status. | Below is the the instruction that describes the task:
### Input:
Check recursion level and extern status.
### Response:
def allows_simple_recursion(self):
"""Check recursion level and extern status."""
rec_level = self.aggregate.config["recursionlevel"]
if rec_level >= 0 and self.recursion_level >= rec_level:
log.debug(LOG_CHECK, "... no, maximum recursion level reached.")
return False
if self.extern[0]:
log.debug(LOG_CHECK, "... no, extern.")
return False
return True |
def post(self, path, query=None, data=None, redirects=True):
"""
POST request wrapper for :func:`request()`
"""
return self.request('POST', path, query, data, redirects) | POST request wrapper for :func:`request()` | Below is the the instruction that describes the task:
### Input:
POST request wrapper for :func:`request()`
### Response:
def post(self, path, query=None, data=None, redirects=True):
"""
POST request wrapper for :func:`request()`
"""
return self.request('POST', path, query, data, redirects) |
def as_list(data, use_pandas=True, header=True):
"""
Convert an H2O data object into a python-specific object.
WARNING! This will pull all data local!
If Pandas is available (and use_pandas is True), then pandas will be used to parse the
data frame. Otherwise, a list-of-lists populated by character data will be returned (so
the types of data will all be str).
:param data: an H2O data object.
:param use_pandas: If True, try to use pandas for reading in the data.
:param header: If True, return column names as first element in list
:returns: List of lists (Rows x Columns).
"""
assert_is_type(data, H2OFrame)
assert_is_type(use_pandas, bool)
assert_is_type(header, bool)
return H2OFrame.as_data_frame(data, use_pandas=use_pandas, header=header) | Convert an H2O data object into a python-specific object.
WARNING! This will pull all data local!
If Pandas is available (and use_pandas is True), then pandas will be used to parse the
data frame. Otherwise, a list-of-lists populated by character data will be returned (so
the types of data will all be str).
:param data: an H2O data object.
:param use_pandas: If True, try to use pandas for reading in the data.
:param header: If True, return column names as first element in list
:returns: List of lists (Rows x Columns). | Below is the the instruction that describes the task:
### Input:
Convert an H2O data object into a python-specific object.
WARNING! This will pull all data local!
If Pandas is available (and use_pandas is True), then pandas will be used to parse the
data frame. Otherwise, a list-of-lists populated by character data will be returned (so
the types of data will all be str).
:param data: an H2O data object.
:param use_pandas: If True, try to use pandas for reading in the data.
:param header: If True, return column names as first element in list
:returns: List of lists (Rows x Columns).
### Response:
def as_list(data, use_pandas=True, header=True):
"""
Convert an H2O data object into a python-specific object.
WARNING! This will pull all data local!
If Pandas is available (and use_pandas is True), then pandas will be used to parse the
data frame. Otherwise, a list-of-lists populated by character data will be returned (so
the types of data will all be str).
:param data: an H2O data object.
:param use_pandas: If True, try to use pandas for reading in the data.
:param header: If True, return column names as first element in list
:returns: List of lists (Rows x Columns).
"""
assert_is_type(data, H2OFrame)
assert_is_type(use_pandas, bool)
assert_is_type(header, bool)
return H2OFrame.as_data_frame(data, use_pandas=use_pandas, header=header) |
def from_ofxparse(data, institution):
"""Instantiate :py:class:`ofxclient.Account` subclass from ofxparse
module
:param data: an ofxparse account
:type data: An :py:class:`ofxparse.Account` object
:param institution: The parent institution of the account
:type institution: :py:class:`ofxclient.Institution` object
"""
description = data.desc if hasattr(data, 'desc') else None
if data.type == AccountType.Bank:
return BankAccount(
institution=institution,
number=data.account_id,
routing_number=data.routing_number,
account_type=data.account_type,
description=description)
elif data.type == AccountType.CreditCard:
return CreditCardAccount(
institution=institution,
number=data.account_id,
description=description)
elif data.type == AccountType.Investment:
return BrokerageAccount(
institution=institution,
number=data.account_id,
broker_id=data.brokerid,
description=description)
raise ValueError("unknown account type: %s" % data.type) | Instantiate :py:class:`ofxclient.Account` subclass from ofxparse
module
:param data: an ofxparse account
:type data: An :py:class:`ofxparse.Account` object
:param institution: The parent institution of the account
:type institution: :py:class:`ofxclient.Institution` object | Below is the the instruction that describes the task:
### Input:
Instantiate :py:class:`ofxclient.Account` subclass from ofxparse
module
:param data: an ofxparse account
:type data: An :py:class:`ofxparse.Account` object
:param institution: The parent institution of the account
:type institution: :py:class:`ofxclient.Institution` object
### Response:
def from_ofxparse(data, institution):
"""Instantiate :py:class:`ofxclient.Account` subclass from ofxparse
module
:param data: an ofxparse account
:type data: An :py:class:`ofxparse.Account` object
:param institution: The parent institution of the account
:type institution: :py:class:`ofxclient.Institution` object
"""
description = data.desc if hasattr(data, 'desc') else None
if data.type == AccountType.Bank:
return BankAccount(
institution=institution,
number=data.account_id,
routing_number=data.routing_number,
account_type=data.account_type,
description=description)
elif data.type == AccountType.CreditCard:
return CreditCardAccount(
institution=institution,
number=data.account_id,
description=description)
elif data.type == AccountType.Investment:
return BrokerageAccount(
institution=institution,
number=data.account_id,
broker_id=data.brokerid,
description=description)
raise ValueError("unknown account type: %s" % data.type) |
def get_json(self, uri_path, http_method='GET', query_parameters=None, body=None, headers=None):
"""
Fetches the JSON returned, after making the call and checking for errors.
:param uri_path: Endpoint to be used to make a request.
:param http_method: HTTP method to be used.
:param query_parameters: Parameters to be added to the request.
:param body: Optional body, if required.
:param headers: Optional headers, if required.
:return: JSON
"""
query_parameters = query_parameters or {}
headers = headers or {}
# Add credentials to the request
query_parameters = self.add_credentials(query_parameters)
# Build the request uri with parameters
uri = self.build_request(uri_path, query_parameters)
if http_method in ('POST', 'PUT', 'DELETE') and 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
response, content = self.client.request(
uri=uri,
method=http_method,
body=body,
headers=headers
)
# Check for known errors that could be returned
self.check_status(content, response)
return json.loads(content.decode('utf-8')) | Fetches the JSON returned, after making the call and checking for errors.
:param uri_path: Endpoint to be used to make a request.
:param http_method: HTTP method to be used.
:param query_parameters: Parameters to be added to the request.
:param body: Optional body, if required.
:param headers: Optional headers, if required.
:return: JSON | Below is the the instruction that describes the task:
### Input:
Fetches the JSON returned, after making the call and checking for errors.
:param uri_path: Endpoint to be used to make a request.
:param http_method: HTTP method to be used.
:param query_parameters: Parameters to be added to the request.
:param body: Optional body, if required.
:param headers: Optional headers, if required.
:return: JSON
### Response:
def get_json(self, uri_path, http_method='GET', query_parameters=None, body=None, headers=None):
"""
Fetches the JSON returned, after making the call and checking for errors.
:param uri_path: Endpoint to be used to make a request.
:param http_method: HTTP method to be used.
:param query_parameters: Parameters to be added to the request.
:param body: Optional body, if required.
:param headers: Optional headers, if required.
:return: JSON
"""
query_parameters = query_parameters or {}
headers = headers or {}
# Add credentials to the request
query_parameters = self.add_credentials(query_parameters)
# Build the request uri with parameters
uri = self.build_request(uri_path, query_parameters)
if http_method in ('POST', 'PUT', 'DELETE') and 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
response, content = self.client.request(
uri=uri,
method=http_method,
body=body,
headers=headers
)
# Check for known errors that could be returned
self.check_status(content, response)
return json.loads(content.decode('utf-8')) |
def _sumSeries(a: float, b: float, steps: int) -> float:
"""
Return value of the the following polynomial.
.. math::
(a * e^(b*steps) - 1) / (e^b - 1)
:param a: multiplier
:param b: exponent multiplier
:param steps: the number of steps
"""
return a * (exp(b * steps) - 1) / (exp(b) - 1) | Return value of the the following polynomial.
.. math::
(a * e^(b*steps) - 1) / (e^b - 1)
:param a: multiplier
:param b: exponent multiplier
:param steps: the number of steps | Below is the the instruction that describes the task:
### Input:
Return value of the the following polynomial.
.. math::
(a * e^(b*steps) - 1) / (e^b - 1)
:param a: multiplier
:param b: exponent multiplier
:param steps: the number of steps
### Response:
def _sumSeries(a: float, b: float, steps: int) -> float:
"""
Return value of the the following polynomial.
.. math::
(a * e^(b*steps) - 1) / (e^b - 1)
:param a: multiplier
:param b: exponent multiplier
:param steps: the number of steps
"""
return a * (exp(b * steps) - 1) / (exp(b) - 1) |
def disable(name, no_block=False, root=None, **kwargs): # pylint: disable=unused-argument
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Disable the named service to not start when the system boots
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: 2017.7.0
root
Enable/disable/mask unit files in the specified root directory
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
'''
_check_for_unit_changes(name)
if name in _get_sysv_services(root):
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
service_exec = _get_service_exec()
if service_exec.endswith('/update-rc.d'):
cmd.extend([service_exec, '-f', name, 'remove'])
elif service_exec.endswith('/chkconfig'):
cmd.extend([service_exec, name, 'off'])
return __salt__['cmd.retcode'](cmd,
python_shell=False,
ignore_retcode=True) == 0
# Using cmd.run_all instead of cmd.retcode here to make unit tests easier
return __salt__['cmd.run_all'](
_systemctl_cmd('disable', name, systemd_scope=True, no_block=no_block,
root=root),
python_shell=False,
ignore_retcode=True)['retcode'] == 0 | .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Disable the named service to not start when the system boots
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: 2017.7.0
root
Enable/disable/mask unit files in the specified root directory
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name> | Below is the the instruction that describes the task:
### Input:
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Disable the named service to not start when the system boots
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: 2017.7.0
root
Enable/disable/mask unit files in the specified root directory
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
### Response:
def disable(name, no_block=False, root=None, **kwargs): # pylint: disable=unused-argument
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Disable the named service to not start when the system boots
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: 2017.7.0
root
Enable/disable/mask unit files in the specified root directory
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
'''
_check_for_unit_changes(name)
if name in _get_sysv_services(root):
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
service_exec = _get_service_exec()
if service_exec.endswith('/update-rc.d'):
cmd.extend([service_exec, '-f', name, 'remove'])
elif service_exec.endswith('/chkconfig'):
cmd.extend([service_exec, name, 'off'])
return __salt__['cmd.retcode'](cmd,
python_shell=False,
ignore_retcode=True) == 0
# Using cmd.run_all instead of cmd.retcode here to make unit tests easier
return __salt__['cmd.run_all'](
_systemctl_cmd('disable', name, systemd_scope=True, no_block=no_block,
root=root),
python_shell=False,
ignore_retcode=True)['retcode'] == 0 |
def hex(self):
"""Return a hexadecimal representation of a BigFloat."""
sign = '-' if self._sign() else ''
e = self._exponent()
if isinstance(e, six.string_types):
return sign + e
m = self._significand()
_, digits, _ = _mpfr_get_str2(
16,
0,
m,
ROUND_TIES_TO_EVEN,
)
# only print the number of digits that are actually necessary
n = 1 + (self.precision - 1) // 4
assert all(c == '0' for c in digits[n:])
result = '%s0x0.%sp%+d' % (sign, digits[:n], e)
return result | Return a hexadecimal representation of a BigFloat. | Below is the the instruction that describes the task:
### Input:
Return a hexadecimal representation of a BigFloat.
### Response:
def hex(self):
"""Return a hexadecimal representation of a BigFloat."""
sign = '-' if self._sign() else ''
e = self._exponent()
if isinstance(e, six.string_types):
return sign + e
m = self._significand()
_, digits, _ = _mpfr_get_str2(
16,
0,
m,
ROUND_TIES_TO_EVEN,
)
# only print the number of digits that are actually necessary
n = 1 + (self.precision - 1) // 4
assert all(c == '0' for c in digits[n:])
result = '%s0x0.%sp%+d' % (sign, digits[:n], e)
return result |
def estimate_skeleton(indep_test_func, data_matrix, alpha, **kwargs):
"""Estimate a skeleton graph from the statistis information.
Args:
indep_test_func: the function name for a conditional
independency test.
data_matrix: data (as a numpy array).
alpha: the significance level.
kwargs:
'max_reach': maximum value of l (see the code). The
value depends on the underlying distribution.
'method': if 'stable' given, use stable-PC algorithm
(see [Colombo2014]).
'init_graph': initial structure of skeleton graph
(as a networkx.Graph). If not specified,
a complete graph is used.
other parameters may be passed depending on the
indep_test_func()s.
Returns:
g: a skeleton graph (as a networkx.Graph).
sep_set: a separation set (as an 2D-array of set()).
[Colombo2014] Diego Colombo and Marloes H Maathuis. Order-independent
constraint-based causal structure learning. In The Journal of Machine
Learning Research, Vol. 15, pp. 3741-3782, 2014.
"""
def method_stable(kwargs):
return ('method' in kwargs) and kwargs['method'] == "stable"
node_ids = range(data_matrix.shape[1])
node_size = data_matrix.shape[1]
sep_set = [[set() for i in range(node_size)] for j in range(node_size)]
if 'init_graph' in kwargs:
g = kwargs['init_graph']
if not isinstance(g, nx.Graph):
raise ValueError
elif not g.number_of_nodes() == len(node_ids):
raise ValueError('init_graph not matching data_matrix shape')
for (i, j) in combinations(node_ids, 2):
if not g.has_edge(i, j):
sep_set[i][j] = None
sep_set[j][i] = None
else:
g = _create_complete_graph(node_ids)
l = 0
while True:
cont = False
remove_edges = []
for (i, j) in permutations(node_ids, 2):
adj_i = list(g.neighbors(i))
if j not in adj_i:
continue
else:
adj_i.remove(j)
if len(adj_i) >= l:
_logger.debug('testing %s and %s' % (i,j))
_logger.debug('neighbors of %s are %s' % (i, str(adj_i)))
if len(adj_i) < l:
continue
for k in combinations(adj_i, l):
_logger.debug('indep prob of %s and %s with subset %s'
% (i, j, str(k)))
p_val = indep_test_func(data_matrix, i, j, set(k),
**kwargs)
_logger.debug('p_val is %s' % str(p_val))
if p_val > alpha:
if g.has_edge(i, j):
_logger.debug('p: remove edge (%s, %s)' % (i, j))
if method_stable(kwargs):
remove_edges.append((i, j))
else:
g.remove_edge(i, j)
sep_set[i][j] |= set(k)
sep_set[j][i] |= set(k)
break
cont = True
l += 1
if method_stable(kwargs):
g.remove_edges_from(remove_edges)
if cont is False:
break
if ('max_reach' in kwargs) and (l > kwargs['max_reach']):
break
return (g, sep_set) | Estimate a skeleton graph from the statistis information.
Args:
indep_test_func: the function name for a conditional
independency test.
data_matrix: data (as a numpy array).
alpha: the significance level.
kwargs:
'max_reach': maximum value of l (see the code). The
value depends on the underlying distribution.
'method': if 'stable' given, use stable-PC algorithm
(see [Colombo2014]).
'init_graph': initial structure of skeleton graph
(as a networkx.Graph). If not specified,
a complete graph is used.
other parameters may be passed depending on the
indep_test_func()s.
Returns:
g: a skeleton graph (as a networkx.Graph).
sep_set: a separation set (as an 2D-array of set()).
[Colombo2014] Diego Colombo and Marloes H Maathuis. Order-independent
constraint-based causal structure learning. In The Journal of Machine
Learning Research, Vol. 15, pp. 3741-3782, 2014. | Below is the the instruction that describes the task:
### Input:
Estimate a skeleton graph from the statistis information.
Args:
indep_test_func: the function name for a conditional
independency test.
data_matrix: data (as a numpy array).
alpha: the significance level.
kwargs:
'max_reach': maximum value of l (see the code). The
value depends on the underlying distribution.
'method': if 'stable' given, use stable-PC algorithm
(see [Colombo2014]).
'init_graph': initial structure of skeleton graph
(as a networkx.Graph). If not specified,
a complete graph is used.
other parameters may be passed depending on the
indep_test_func()s.
Returns:
g: a skeleton graph (as a networkx.Graph).
sep_set: a separation set (as an 2D-array of set()).
[Colombo2014] Diego Colombo and Marloes H Maathuis. Order-independent
constraint-based causal structure learning. In The Journal of Machine
Learning Research, Vol. 15, pp. 3741-3782, 2014.
### Response:
def estimate_skeleton(indep_test_func, data_matrix, alpha, **kwargs):
"""Estimate a skeleton graph from the statistis information.
Args:
indep_test_func: the function name for a conditional
independency test.
data_matrix: data (as a numpy array).
alpha: the significance level.
kwargs:
'max_reach': maximum value of l (see the code). The
value depends on the underlying distribution.
'method': if 'stable' given, use stable-PC algorithm
(see [Colombo2014]).
'init_graph': initial structure of skeleton graph
(as a networkx.Graph). If not specified,
a complete graph is used.
other parameters may be passed depending on the
indep_test_func()s.
Returns:
g: a skeleton graph (as a networkx.Graph).
sep_set: a separation set (as an 2D-array of set()).
[Colombo2014] Diego Colombo and Marloes H Maathuis. Order-independent
constraint-based causal structure learning. In The Journal of Machine
Learning Research, Vol. 15, pp. 3741-3782, 2014.
"""
def method_stable(kwargs):
return ('method' in kwargs) and kwargs['method'] == "stable"
node_ids = range(data_matrix.shape[1])
node_size = data_matrix.shape[1]
sep_set = [[set() for i in range(node_size)] for j in range(node_size)]
if 'init_graph' in kwargs:
g = kwargs['init_graph']
if not isinstance(g, nx.Graph):
raise ValueError
elif not g.number_of_nodes() == len(node_ids):
raise ValueError('init_graph not matching data_matrix shape')
for (i, j) in combinations(node_ids, 2):
if not g.has_edge(i, j):
sep_set[i][j] = None
sep_set[j][i] = None
else:
g = _create_complete_graph(node_ids)
l = 0
while True:
cont = False
remove_edges = []
for (i, j) in permutations(node_ids, 2):
adj_i = list(g.neighbors(i))
if j not in adj_i:
continue
else:
adj_i.remove(j)
if len(adj_i) >= l:
_logger.debug('testing %s and %s' % (i,j))
_logger.debug('neighbors of %s are %s' % (i, str(adj_i)))
if len(adj_i) < l:
continue
for k in combinations(adj_i, l):
_logger.debug('indep prob of %s and %s with subset %s'
% (i, j, str(k)))
p_val = indep_test_func(data_matrix, i, j, set(k),
**kwargs)
_logger.debug('p_val is %s' % str(p_val))
if p_val > alpha:
if g.has_edge(i, j):
_logger.debug('p: remove edge (%s, %s)' % (i, j))
if method_stable(kwargs):
remove_edges.append((i, j))
else:
g.remove_edge(i, j)
sep_set[i][j] |= set(k)
sep_set[j][i] |= set(k)
break
cont = True
l += 1
if method_stable(kwargs):
g.remove_edges_from(remove_edges)
if cont is False:
break
if ('max_reach' in kwargs) and (l > kwargs['max_reach']):
break
return (g, sep_set) |
def _netcdf2pandas(self, netcdf_data, query_variables, start, end):
"""
Transforms data from netcdf to pandas DataFrame.
Parameters
----------
data: netcdf
Data returned from UNIDATA NCSS query.
query_variables: list
The variables requested.
start: Timestamp
The start time
end: Timestamp
The end time
Returns
-------
pd.DataFrame
"""
# set self.time
try:
time_var = 'time'
self.set_time(netcdf_data.variables[time_var])
except KeyError:
# which model does this dumb thing?
time_var = 'time1'
self.set_time(netcdf_data.variables[time_var])
data_dict = {}
for key, data in netcdf_data.variables.items():
# if accounts for possibility of extra variable returned
if key not in query_variables:
continue
squeezed = data[:].squeeze()
if squeezed.ndim == 1:
data_dict[key] = squeezed
elif squeezed.ndim == 2:
for num, data_level in enumerate(squeezed.T):
data_dict[key + '_' + str(num)] = data_level
else:
raise ValueError('cannot parse ndim > 2')
data = pd.DataFrame(data_dict, index=self.time)
# sometimes data is returned as hours since T0
# where T0 is before start. Then the hours between
# T0 and start are added *after* end. So sort and slice
# to remove the garbage
data = data.sort_index().loc[start:end]
return data | Transforms data from netcdf to pandas DataFrame.
Parameters
----------
data: netcdf
Data returned from UNIDATA NCSS query.
query_variables: list
The variables requested.
start: Timestamp
The start time
end: Timestamp
The end time
Returns
-------
pd.DataFrame | Below is the the instruction that describes the task:
### Input:
Transforms data from netcdf to pandas DataFrame.
Parameters
----------
data: netcdf
Data returned from UNIDATA NCSS query.
query_variables: list
The variables requested.
start: Timestamp
The start time
end: Timestamp
The end time
Returns
-------
pd.DataFrame
### Response:
def _netcdf2pandas(self, netcdf_data, query_variables, start, end):
"""
Transforms data from netcdf to pandas DataFrame.
Parameters
----------
data: netcdf
Data returned from UNIDATA NCSS query.
query_variables: list
The variables requested.
start: Timestamp
The start time
end: Timestamp
The end time
Returns
-------
pd.DataFrame
"""
# set self.time
try:
time_var = 'time'
self.set_time(netcdf_data.variables[time_var])
except KeyError:
# which model does this dumb thing?
time_var = 'time1'
self.set_time(netcdf_data.variables[time_var])
data_dict = {}
for key, data in netcdf_data.variables.items():
# if accounts for possibility of extra variable returned
if key not in query_variables:
continue
squeezed = data[:].squeeze()
if squeezed.ndim == 1:
data_dict[key] = squeezed
elif squeezed.ndim == 2:
for num, data_level in enumerate(squeezed.T):
data_dict[key + '_' + str(num)] = data_level
else:
raise ValueError('cannot parse ndim > 2')
data = pd.DataFrame(data_dict, index=self.time)
# sometimes data is returned as hours since T0
# where T0 is before start. Then the hours between
# T0 and start are added *after* end. So sort and slice
# to remove the garbage
data = data.sort_index().loc[start:end]
return data |
def binary(self):
"""
return encoded representation
"""
creation_size = len(self.creation)
if creation_size == 1:
return (
b_chr(_TAG_PID_EXT) +
self.node.binary() + self.id + self.serial + self.creation
)
elif creation_size == 4:
return (
b_chr(_TAG_NEW_PID_EXT) +
self.node.binary() + self.id + self.serial + self.creation
)
else:
raise OutputException('unknown pid type') | return encoded representation | Below is the the instruction that describes the task:
### Input:
return encoded representation
### Response:
def binary(self):
"""
return encoded representation
"""
creation_size = len(self.creation)
if creation_size == 1:
return (
b_chr(_TAG_PID_EXT) +
self.node.binary() + self.id + self.serial + self.creation
)
elif creation_size == 4:
return (
b_chr(_TAG_NEW_PID_EXT) +
self.node.binary() + self.id + self.serial + self.creation
)
else:
raise OutputException('unknown pid type') |
def get_pipe(self, object_type):
"""
Returns a generator that maps the input of the pipe to an elasticsearch object.
Will call id_to_object if it cannot serialize the data from json.
"""
for line in sys.stdin:
try:
data = json.loads(line.strip())
obj = object_type(**data)
yield obj
except ValueError:
yield self.id_to_object(line.strip()) | Returns a generator that maps the input of the pipe to an elasticsearch object.
Will call id_to_object if it cannot serialize the data from json. | Below is the the instruction that describes the task:
### Input:
Returns a generator that maps the input of the pipe to an elasticsearch object.
Will call id_to_object if it cannot serialize the data from json.
### Response:
def get_pipe(self, object_type):
"""
Returns a generator that maps the input of the pipe to an elasticsearch object.
Will call id_to_object if it cannot serialize the data from json.
"""
for line in sys.stdin:
try:
data = json.loads(line.strip())
obj = object_type(**data)
yield obj
except ValueError:
yield self.id_to_object(line.strip()) |
def is_unicode_string(string):
"""
Return ``True`` if the given string is a Unicode string,
that is, of type ``unicode`` in Python 2 or ``str`` in Python 3.
Return ``None`` if ``string`` is ``None``.
:param str string: the string to be checked
:rtype: bool
"""
if string is None:
return None
if PY2:
return isinstance(string, unicode)
return isinstance(string, str) | Return ``True`` if the given string is a Unicode string,
that is, of type ``unicode`` in Python 2 or ``str`` in Python 3.
Return ``None`` if ``string`` is ``None``.
:param str string: the string to be checked
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Return ``True`` if the given string is a Unicode string,
that is, of type ``unicode`` in Python 2 or ``str`` in Python 3.
Return ``None`` if ``string`` is ``None``.
:param str string: the string to be checked
:rtype: bool
### Response:
def is_unicode_string(string):
"""
Return ``True`` if the given string is a Unicode string,
that is, of type ``unicode`` in Python 2 or ``str`` in Python 3.
Return ``None`` if ``string`` is ``None``.
:param str string: the string to be checked
:rtype: bool
"""
if string is None:
return None
if PY2:
return isinstance(string, unicode)
return isinstance(string, str) |
def stop(self):
"""
Stops the dependency manager (must be called before clear())
:return: The removed bindings (list) or None
"""
self._context.remove_service_listener(self)
if self.services:
return [
(service, reference)
for reference, service in self.services.items()
]
return None | Stops the dependency manager (must be called before clear())
:return: The removed bindings (list) or None | Below is the the instruction that describes the task:
### Input:
Stops the dependency manager (must be called before clear())
:return: The removed bindings (list) or None
### Response:
def stop(self):
"""
Stops the dependency manager (must be called before clear())
:return: The removed bindings (list) or None
"""
self._context.remove_service_listener(self)
if self.services:
return [
(service, reference)
for reference, service in self.services.items()
]
return None |
def get_medium_url(self):
"""Returns the medium size image URL."""
if self.is_gif():
return self.get_absolute_url()
return '%s%s-%s.jpg' % (settings.MEDIA_URL, self.get_name(), 'medium') | Returns the medium size image URL. | Below is the the instruction that describes the task:
### Input:
Returns the medium size image URL.
### Response:
def get_medium_url(self):
"""Returns the medium size image URL."""
if self.is_gif():
return self.get_absolute_url()
return '%s%s-%s.jpg' % (settings.MEDIA_URL, self.get_name(), 'medium') |
def columns(self):
"""获取用户专栏.
:return: 用户专栏,返回生成器
:rtype: Column.Iterable
"""
from .column import Column
if self.url is None or self.post_num == 0:
return
soup = BeautifulSoup(self._session.get(self.url + 'posts').text)
column_list = soup.find('div', class_='column-list')
column_tags = column_list.find_all('div', class_='item')
for column_tag in column_tags:
name = column_tag['title']
url = column_tag['data-href']
numbers = column_tag.find('span', class_='des').text.split('•')
follower_num = int(re_get_number.match(numbers[0]).group(1))
if len(numbers) == 1:
post_num = 0
else:
post_num = int(
re_get_number.match(numbers[1]).group(1))
yield Column(url, name, follower_num, post_num,
session=self._session) | 获取用户专栏.
:return: 用户专栏,返回生成器
:rtype: Column.Iterable | Below is the the instruction that describes the task:
### Input:
获取用户专栏.
:return: 用户专栏,返回生成器
:rtype: Column.Iterable
### Response:
def columns(self):
"""获取用户专栏.
:return: 用户专栏,返回生成器
:rtype: Column.Iterable
"""
from .column import Column
if self.url is None or self.post_num == 0:
return
soup = BeautifulSoup(self._session.get(self.url + 'posts').text)
column_list = soup.find('div', class_='column-list')
column_tags = column_list.find_all('div', class_='item')
for column_tag in column_tags:
name = column_tag['title']
url = column_tag['data-href']
numbers = column_tag.find('span', class_='des').text.split('•')
follower_num = int(re_get_number.match(numbers[0]).group(1))
if len(numbers) == 1:
post_num = 0
else:
post_num = int(
re_get_number.match(numbers[1]).group(1))
yield Column(url, name, follower_num, post_num,
session=self._session) |
def _format_repo_args(comment=None, component=None, distribution=None,
uploaders_file=None, saltenv='base'):
'''
Format the common arguments for creating or editing a repository.
:param str comment: The description of the repository.
:param str component: The default component to use when publishing.
:param str distribution: The default distribution to use when publishing.
:param str uploaders_file: The repository upload restrictions config.
:param str saltenv: The environment the file resides in.
:return: A list of the arguments formatted as aptly arguments.
:rtype: list
'''
ret = list()
cached_uploaders_path = None
settings = {'comment': comment, 'component': component,
'distribution': distribution}
if uploaders_file:
cached_uploaders_path = __salt__['cp.cache_file'](uploaders_file, saltenv)
if not cached_uploaders_path:
log.error('Unable to get cached copy of file: %s', uploaders_file)
return False
for setting in settings:
if settings[setting] is not None:
ret.append('-{}={}'.format(setting, settings[setting]))
if cached_uploaders_path:
ret.append('-uploaders-file={}'.format(cached_uploaders_path))
return ret | Format the common arguments for creating or editing a repository.
:param str comment: The description of the repository.
:param str component: The default component to use when publishing.
:param str distribution: The default distribution to use when publishing.
:param str uploaders_file: The repository upload restrictions config.
:param str saltenv: The environment the file resides in.
:return: A list of the arguments formatted as aptly arguments.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Format the common arguments for creating or editing a repository.
:param str comment: The description of the repository.
:param str component: The default component to use when publishing.
:param str distribution: The default distribution to use when publishing.
:param str uploaders_file: The repository upload restrictions config.
:param str saltenv: The environment the file resides in.
:return: A list of the arguments formatted as aptly arguments.
:rtype: list
### Response:
def _format_repo_args(comment=None, component=None, distribution=None,
uploaders_file=None, saltenv='base'):
'''
Format the common arguments for creating or editing a repository.
:param str comment: The description of the repository.
:param str component: The default component to use when publishing.
:param str distribution: The default distribution to use when publishing.
:param str uploaders_file: The repository upload restrictions config.
:param str saltenv: The environment the file resides in.
:return: A list of the arguments formatted as aptly arguments.
:rtype: list
'''
ret = list()
cached_uploaders_path = None
settings = {'comment': comment, 'component': component,
'distribution': distribution}
if uploaders_file:
cached_uploaders_path = __salt__['cp.cache_file'](uploaders_file, saltenv)
if not cached_uploaders_path:
log.error('Unable to get cached copy of file: %s', uploaders_file)
return False
for setting in settings:
if settings[setting] is not None:
ret.append('-{}={}'.format(setting, settings[setting]))
if cached_uploaders_path:
ret.append('-uploaders-file={}'.format(cached_uploaders_path))
return ret |
def prepare_timestamp_micros(data, schema):
"""Converts datetime.datetime to int timestamp with microseconds"""
if isinstance(data, datetime.datetime):
if data.tzinfo is not None:
delta = (data - epoch)
return int(delta.total_seconds() * MCS_PER_SECOND)
t = int(time.mktime(data.timetuple())) * MCS_PER_SECOND + \
data.microsecond
return t
else:
return data | Converts datetime.datetime to int timestamp with microseconds | Below is the the instruction that describes the task:
### Input:
Converts datetime.datetime to int timestamp with microseconds
### Response:
def prepare_timestamp_micros(data, schema):
"""Converts datetime.datetime to int timestamp with microseconds"""
if isinstance(data, datetime.datetime):
if data.tzinfo is not None:
delta = (data - epoch)
return int(delta.total_seconds() * MCS_PER_SECOND)
t = int(time.mktime(data.timetuple())) * MCS_PER_SECOND + \
data.microsecond
return t
else:
return data |
def load_nii(strPathIn, varSzeThr=5000.0):
"""
Load nii file.
Parameters
----------
strPathIn : str
Path to nii file to load.
varSzeThr : float
If the nii file is larger than this threshold (in MB), the file is
loaded volume-by-volume in order to prevent memory overflow. Default
threshold is 1000 MB.
Returns
-------
aryNii : np.array
Array containing nii data. 32 bit floating point precision.
objHdr : header object
Header of nii file.
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of nii data.
Notes
-----
If the nii file is larger than the specified threshold (`varSzeThr`), the
file is loaded volume-by-volume in order to prevent memory overflow. The
reason for this is that nibabel imports data at float64 precision, which
can lead to a memory overflow even for relatively small files.
"""
# Load nii file (this does not load the data into memory yet):
objNii = nb.load(strPathIn)
# Get size of nii file:
varNiiSze = os.path.getsize(strPathIn)
# Convert to MB:
varNiiSze = np.divide(float(varNiiSze), 1000000.0)
# Load volume-by-volume or all at once, depending on file size:
if np.greater(varNiiSze, float(varSzeThr)):
# Load large nii file
print(('---------Large file size ('
+ str(np.around(varNiiSze))
+ ' MB), reading volume-by-volume'))
# Get image dimensions:
tplSze = objNii.shape
# Create empty array for nii data:
aryNii = np.zeros(tplSze, dtype=np.float32)
# Loop through volumes:
for idxVol in range(tplSze[3]):
aryNii[..., idxVol] = np.asarray(
objNii.dataobj[..., idxVol]).astype(np.float32)
else:
# Load small nii file
# Load nii file (this doesn't load the data into memory yet):
objNii = nb.load(strPathIn)
# Load data into array:
aryNii = np.asarray(objNii.dataobj).astype(np.float32)
# Get headers:
objHdr = objNii.header
# Get 'affine':
aryAff = objNii.affine
# Output nii data (as numpy array), header, and 'affine':
return aryNii, objHdr, aryAff | Load nii file.
Parameters
----------
strPathIn : str
Path to nii file to load.
varSzeThr : float
If the nii file is larger than this threshold (in MB), the file is
loaded volume-by-volume in order to prevent memory overflow. Default
threshold is 1000 MB.
Returns
-------
aryNii : np.array
Array containing nii data. 32 bit floating point precision.
objHdr : header object
Header of nii file.
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of nii data.
Notes
-----
If the nii file is larger than the specified threshold (`varSzeThr`), the
file is loaded volume-by-volume in order to prevent memory overflow. The
reason for this is that nibabel imports data at float64 precision, which
can lead to a memory overflow even for relatively small files. | Below is the the instruction that describes the task:
### Input:
Load nii file.
Parameters
----------
strPathIn : str
Path to nii file to load.
varSzeThr : float
If the nii file is larger than this threshold (in MB), the file is
loaded volume-by-volume in order to prevent memory overflow. Default
threshold is 1000 MB.
Returns
-------
aryNii : np.array
Array containing nii data. 32 bit floating point precision.
objHdr : header object
Header of nii file.
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of nii data.
Notes
-----
If the nii file is larger than the specified threshold (`varSzeThr`), the
file is loaded volume-by-volume in order to prevent memory overflow. The
reason for this is that nibabel imports data at float64 precision, which
can lead to a memory overflow even for relatively small files.
### Response:
def load_nii(strPathIn, varSzeThr=5000.0):
"""
Load nii file.
Parameters
----------
strPathIn : str
Path to nii file to load.
varSzeThr : float
If the nii file is larger than this threshold (in MB), the file is
loaded volume-by-volume in order to prevent memory overflow. Default
threshold is 1000 MB.
Returns
-------
aryNii : np.array
Array containing nii data. 32 bit floating point precision.
objHdr : header object
Header of nii file.
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of nii data.
Notes
-----
If the nii file is larger than the specified threshold (`varSzeThr`), the
file is loaded volume-by-volume in order to prevent memory overflow. The
reason for this is that nibabel imports data at float64 precision, which
can lead to a memory overflow even for relatively small files.
"""
# Load nii file (this does not load the data into memory yet):
objNii = nb.load(strPathIn)
# Get size of nii file:
varNiiSze = os.path.getsize(strPathIn)
# Convert to MB:
varNiiSze = np.divide(float(varNiiSze), 1000000.0)
# Load volume-by-volume or all at once, depending on file size:
if np.greater(varNiiSze, float(varSzeThr)):
# Load large nii file
print(('---------Large file size ('
+ str(np.around(varNiiSze))
+ ' MB), reading volume-by-volume'))
# Get image dimensions:
tplSze = objNii.shape
# Create empty array for nii data:
aryNii = np.zeros(tplSze, dtype=np.float32)
# Loop through volumes:
for idxVol in range(tplSze[3]):
aryNii[..., idxVol] = np.asarray(
objNii.dataobj[..., idxVol]).astype(np.float32)
else:
# Load small nii file
# Load nii file (this doesn't load the data into memory yet):
objNii = nb.load(strPathIn)
# Load data into array:
aryNii = np.asarray(objNii.dataobj).astype(np.float32)
# Get headers:
objHdr = objNii.header
# Get 'affine':
aryAff = objNii.affine
# Output nii data (as numpy array), header, and 'affine':
return aryNii, objHdr, aryAff |
def load(cls, data):
"""Construct a Constant class from it's dict data.
.. versionadded:: 0.0.2
"""
if len(data) == 1:
for key, value in data.items():
if "__classname__" not in value: # pragma: no cover
raise ValueError
name = key
bases = (Constant,)
attrs = dict()
for k, v in value.items():
if isinstance(v, dict):
if "__classname__" in v:
attrs[k] = cls.load({k: v})
else:
attrs[k] = v
else:
attrs[k] = v
return type(name, bases, attrs)
else: # pragma: no cover
raise ValueError | Construct a Constant class from it's dict data.
.. versionadded:: 0.0.2 | Below is the the instruction that describes the task:
### Input:
Construct a Constant class from it's dict data.
.. versionadded:: 0.0.2
### Response:
def load(cls, data):
"""Construct a Constant class from it's dict data.
.. versionadded:: 0.0.2
"""
if len(data) == 1:
for key, value in data.items():
if "__classname__" not in value: # pragma: no cover
raise ValueError
name = key
bases = (Constant,)
attrs = dict()
for k, v in value.items():
if isinstance(v, dict):
if "__classname__" in v:
attrs[k] = cls.load({k: v})
else:
attrs[k] = v
else:
attrs[k] = v
return type(name, bases, attrs)
else: # pragma: no cover
raise ValueError |
def chord_task(*args, **kwargs):
u"""
Override of the default task decorator to specify use of this backend.
"""
given_backend = kwargs.get(u'backend', None)
if not isinstance(given_backend, ChordableDjangoBackend):
kwargs[u'backend'] = ChordableDjangoBackend(kwargs.get('app', current_app))
return task(*args, **kwargs) | u"""
Override of the default task decorator to specify use of this backend. | Below is the the instruction that describes the task:
### Input:
u"""
Override of the default task decorator to specify use of this backend.
### Response:
def chord_task(*args, **kwargs):
u"""
Override of the default task decorator to specify use of this backend.
"""
given_backend = kwargs.get(u'backend', None)
if not isinstance(given_backend, ChordableDjangoBackend):
kwargs[u'backend'] = ChordableDjangoBackend(kwargs.get('app', current_app))
return task(*args, **kwargs) |
def _initialize(self):
"""
Initialize model and layers.
"""
meta = getattr(self, ModelBase._meta_attr)
# read modelfile, convert JSON and load/update model
if self.param_file is not None:
self._load()
LOGGER.debug('model:\n%r', self.model)
# initialize layers
# FIXME: move import inside loop for custom layers in different modules
mod = importlib.import_module(meta.layers_mod, meta.layers_pkg)
src_model = {}
for layer, value in self.model.iteritems():
# from layers module get the layer's class definition
layer_cls = getattr(mod, meta.layer_cls_names[layer]) # class def
self.layers[layer] = layer_cls # add layer class def to model
# check if model layers are classes
src_value = {} # layer value generated from source classes
for src in value['sources']:
# check if source has keyword arguments
try:
src, kwargs = src
except (TypeError, ValueError):
kwargs = {} # no key work arguments
# skip if not a source class
if isinstance(src, basestring):
continue
# generate layer value from source class
src_value[src.__name__] = {'module': src.__module__,
'package': None}
# update layer keyword arguments
src_value[src.__name__].update(kwargs)
# use layer values generated from source class
if src_value:
value = src_model[layer] = src_value
else:
srcmod, srcpkg = value.get('module'), value.get('package')
try:
value = dict(value['sources'])
except ValueError:
value = dict.fromkeys(value['sources'], {})
for src in value.viewkeys():
if srcmod is not None:
value[src]['module'] = srcmod
if srcpkg is not None:
value[src]['package'] = srcpkg
# set layer attribute with model data
setattr(self, layer, layer_cls(value))
# update model with layer values generated from source classes
if src_model:
self.model.update(src_model)
self._update()
self._state = 'initialized' | Initialize model and layers. | Below is the the instruction that describes the task:
### Input:
Initialize model and layers.
### Response:
def _initialize(self):
"""
Initialize model and layers.
"""
meta = getattr(self, ModelBase._meta_attr)
# read modelfile, convert JSON and load/update model
if self.param_file is not None:
self._load()
LOGGER.debug('model:\n%r', self.model)
# initialize layers
# FIXME: move import inside loop for custom layers in different modules
mod = importlib.import_module(meta.layers_mod, meta.layers_pkg)
src_model = {}
for layer, value in self.model.iteritems():
# from layers module get the layer's class definition
layer_cls = getattr(mod, meta.layer_cls_names[layer]) # class def
self.layers[layer] = layer_cls # add layer class def to model
# check if model layers are classes
src_value = {} # layer value generated from source classes
for src in value['sources']:
# check if source has keyword arguments
try:
src, kwargs = src
except (TypeError, ValueError):
kwargs = {} # no key work arguments
# skip if not a source class
if isinstance(src, basestring):
continue
# generate layer value from source class
src_value[src.__name__] = {'module': src.__module__,
'package': None}
# update layer keyword arguments
src_value[src.__name__].update(kwargs)
# use layer values generated from source class
if src_value:
value = src_model[layer] = src_value
else:
srcmod, srcpkg = value.get('module'), value.get('package')
try:
value = dict(value['sources'])
except ValueError:
value = dict.fromkeys(value['sources'], {})
for src in value.viewkeys():
if srcmod is not None:
value[src]['module'] = srcmod
if srcpkg is not None:
value[src]['package'] = srcpkg
# set layer attribute with model data
setattr(self, layer, layer_cls(value))
# update model with layer values generated from source classes
if src_model:
self.model.update(src_model)
self._update()
self._state = 'initialized' |
def nanopub_to_edges(nanopub: dict = {}, rules: List[str] = [], orthologize_targets: list = []):
"""Process nanopub into edges and load into EdgeStore
Args:
nanopub: BEL Nanopub
rules: list of compute rules to process
orthologize_targets: list of species in TAX:<int> format
Returns:
list: of edges
Edge object:
{
"edge": {
"subject": {
"name": subj_canon,
"name_lc": subj_canon.lower(),
"label": subj_lbl,
"label_lc": subj_lbl.lower(),
"components": subj_components,
},
"relation": { # relation _key is based on a hash
"relation": edge_ast.bel_relation,
"edge_hash": edge_hash,
"edge_dt": edge_dt,
"nanopub_url": nanopub_url,
"nanopub_id": nanopub_id,
"citation": citation,
"subject_canon": subj_canon,
"subject": subj_lbl,
"object_canon": obj_canon,
"object": obj_lbl,
"annotations": nanopub['annotations'],
"metadata": nanopub['metadata'],
"public_flag": True, # will be added when groups/permissions feature is finished,
"edge_types": edge_types,
},
'object': {
"name": obj_canon,
"name_lc": obj_canon.lower(),
"label": obj_lbl,
"label_lc": obj_lbl.lower(),
"components": obj_components,
}
}
}
"""
# Collect input values ####################################################
nanopub_url = nanopub.get("source_url", "")
edge_dt = utils.dt_utc_formatted() # don't want this in relation_id
# Extract BEL Version and make sure we can process this
if nanopub["nanopub"]["type"]["name"].upper() == "BEL":
bel_version = nanopub["nanopub"]["type"]["version"]
versions = bel.lang.bel_specification.get_bel_versions()
if bel_version not in versions:
log.error(
f"Do not know this BEL Version: {bel_version}, these are the ones I can process: {versions.keys()}"
)
return []
else:
log.error(
f"Not a BEL Nanopub according to nanopub.type.name: {nanopub['nanopub']['type']['name']}"
)
return []
# Required for BEL parsing/canonicalization/orthologization
api_url = config["bel_api"]["servers"]["api_url"]
try:
citation_string = normalize_nanopub_citation(nanopub)
except Exception as e:
log.error(f"Could not create citation string for {nanopub_url}")
citation_string = ""
if orthologize_targets == []:
if config["bel_api"].get("edges", None):
orthologize_targets = config["bel_api"]["edges"].get("orthologize_targets", [])
# orig_species_id = [anno['id'] for anno in nanopub['nanopub']['annotations'] if anno['type'] == 'Species']
# if orig_species_id:
# orig_species_id = orig_species_id[0]
master_annotations = copy.deepcopy(nanopub["nanopub"]["annotations"])
master_metadata = copy.deepcopy(nanopub["nanopub"]["metadata"])
master_metadata.pop("gd_abstract", None)
nanopub_type = nanopub["nanopub"]["metadata"].get("nanopub_type")
# Create Edge Assertion Info ##############################################
# r = generate_assertion_edge_info(nanopub['nanopub']['assertions'], orig_species_id, orthologize_targets, bel_version, api_url, nanopub_type)
r = generate_assertion_edge_info(
nanopub["nanopub"]["assertions"], orthologize_targets, bel_version, api_url, nanopub_type
)
edge_info_list = r["edge_info_list"]
# Build Edges #############################################################
edges = []
errors = []
for edge_info in edge_info_list:
annotations = copy.deepcopy(master_annotations)
metadata = copy.deepcopy(master_metadata)
errors.extend(edge_info["errors"])
if not edge_info.get("canonical"):
continue
# TODO - remove this
# if edge_info.get('species_id', False):
# annotations = orthologize_context(edge_info['species_id'], annotations)
edge_hash = utils._create_hash(
f'{edge_info["canonical"]["subject"]} {edge_info["canonical"]["relation"]} {edge_info["canonical"]["object"]}'
)
edge = {
"edge": {
"subject": {
"name": edge_info["canonical"]["subject"],
"name_lc": edge_info["canonical"]["subject"].lower(),
"label": edge_info["decanonical"]["subject"],
"label_lc": edge_info["decanonical"]["subject"].lower(),
"components": edge_info["subject_comp"],
},
"relation": {
"relation": edge_info["canonical"]["relation"],
"edge_hash": edge_hash,
"edge_dt": edge_dt,
"nanopub_url": nanopub_url,
"nanopub_id": nanopub["nanopub"]["id"],
"citation": citation_string,
"subject_canon": edge_info["canonical"]["subject"],
"subject": edge_info["decanonical"]["subject"],
"object_canon": edge_info["canonical"]["object"],
"object": edge_info["decanonical"]["object"],
"annotations": copy.deepcopy(annotations),
"metadata": copy.deepcopy(metadata),
"public_flag": True,
"edge_types": edge_info["edge_types"],
"species_id": edge_info["species_id"],
"species_label": edge_info["species_label"],
},
"object": {
"name": edge_info["canonical"]["object"],
"name_lc": edge_info["canonical"]["object"].lower(),
"label": edge_info["decanonical"]["object"],
"label_lc": edge_info["decanonical"]["object"].lower(),
"components": edge_info["object_comp"],
},
}
}
edges.append(copy.deepcopy(edge))
return {
"edges": edges,
"nanopub_id": nanopub["nanopub"]["id"],
"nanopub_url": nanopub_url,
"success": True,
"errors": errors,
} | Process nanopub into edges and load into EdgeStore
Args:
nanopub: BEL Nanopub
rules: list of compute rules to process
orthologize_targets: list of species in TAX:<int> format
Returns:
list: of edges
Edge object:
{
"edge": {
"subject": {
"name": subj_canon,
"name_lc": subj_canon.lower(),
"label": subj_lbl,
"label_lc": subj_lbl.lower(),
"components": subj_components,
},
"relation": { # relation _key is based on a hash
"relation": edge_ast.bel_relation,
"edge_hash": edge_hash,
"edge_dt": edge_dt,
"nanopub_url": nanopub_url,
"nanopub_id": nanopub_id,
"citation": citation,
"subject_canon": subj_canon,
"subject": subj_lbl,
"object_canon": obj_canon,
"object": obj_lbl,
"annotations": nanopub['annotations'],
"metadata": nanopub['metadata'],
"public_flag": True, # will be added when groups/permissions feature is finished,
"edge_types": edge_types,
},
'object': {
"name": obj_canon,
"name_lc": obj_canon.lower(),
"label": obj_lbl,
"label_lc": obj_lbl.lower(),
"components": obj_components,
}
}
} | Below is the the instruction that describes the task:
### Input:
Process nanopub into edges and load into EdgeStore
Args:
nanopub: BEL Nanopub
rules: list of compute rules to process
orthologize_targets: list of species in TAX:<int> format
Returns:
list: of edges
Edge object:
{
"edge": {
"subject": {
"name": subj_canon,
"name_lc": subj_canon.lower(),
"label": subj_lbl,
"label_lc": subj_lbl.lower(),
"components": subj_components,
},
"relation": { # relation _key is based on a hash
"relation": edge_ast.bel_relation,
"edge_hash": edge_hash,
"edge_dt": edge_dt,
"nanopub_url": nanopub_url,
"nanopub_id": nanopub_id,
"citation": citation,
"subject_canon": subj_canon,
"subject": subj_lbl,
"object_canon": obj_canon,
"object": obj_lbl,
"annotations": nanopub['annotations'],
"metadata": nanopub['metadata'],
"public_flag": True, # will be added when groups/permissions feature is finished,
"edge_types": edge_types,
},
'object': {
"name": obj_canon,
"name_lc": obj_canon.lower(),
"label": obj_lbl,
"label_lc": obj_lbl.lower(),
"components": obj_components,
}
}
}
### Response:
def nanopub_to_edges(nanopub: dict = {}, rules: List[str] = [], orthologize_targets: list = []):
"""Process nanopub into edges and load into EdgeStore
Args:
nanopub: BEL Nanopub
rules: list of compute rules to process
orthologize_targets: list of species in TAX:<int> format
Returns:
list: of edges
Edge object:
{
"edge": {
"subject": {
"name": subj_canon,
"name_lc": subj_canon.lower(),
"label": subj_lbl,
"label_lc": subj_lbl.lower(),
"components": subj_components,
},
"relation": { # relation _key is based on a hash
"relation": edge_ast.bel_relation,
"edge_hash": edge_hash,
"edge_dt": edge_dt,
"nanopub_url": nanopub_url,
"nanopub_id": nanopub_id,
"citation": citation,
"subject_canon": subj_canon,
"subject": subj_lbl,
"object_canon": obj_canon,
"object": obj_lbl,
"annotations": nanopub['annotations'],
"metadata": nanopub['metadata'],
"public_flag": True, # will be added when groups/permissions feature is finished,
"edge_types": edge_types,
},
'object': {
"name": obj_canon,
"name_lc": obj_canon.lower(),
"label": obj_lbl,
"label_lc": obj_lbl.lower(),
"components": obj_components,
}
}
}
"""
# Collect input values ####################################################
nanopub_url = nanopub.get("source_url", "")
edge_dt = utils.dt_utc_formatted() # don't want this in relation_id
# Extract BEL Version and make sure we can process this
if nanopub["nanopub"]["type"]["name"].upper() == "BEL":
bel_version = nanopub["nanopub"]["type"]["version"]
versions = bel.lang.bel_specification.get_bel_versions()
if bel_version not in versions:
log.error(
f"Do not know this BEL Version: {bel_version}, these are the ones I can process: {versions.keys()}"
)
return []
else:
log.error(
f"Not a BEL Nanopub according to nanopub.type.name: {nanopub['nanopub']['type']['name']}"
)
return []
# Required for BEL parsing/canonicalization/orthologization
api_url = config["bel_api"]["servers"]["api_url"]
try:
citation_string = normalize_nanopub_citation(nanopub)
except Exception as e:
log.error(f"Could not create citation string for {nanopub_url}")
citation_string = ""
if orthologize_targets == []:
if config["bel_api"].get("edges", None):
orthologize_targets = config["bel_api"]["edges"].get("orthologize_targets", [])
# orig_species_id = [anno['id'] for anno in nanopub['nanopub']['annotations'] if anno['type'] == 'Species']
# if orig_species_id:
# orig_species_id = orig_species_id[0]
master_annotations = copy.deepcopy(nanopub["nanopub"]["annotations"])
master_metadata = copy.deepcopy(nanopub["nanopub"]["metadata"])
master_metadata.pop("gd_abstract", None)
nanopub_type = nanopub["nanopub"]["metadata"].get("nanopub_type")
# Create Edge Assertion Info ##############################################
# r = generate_assertion_edge_info(nanopub['nanopub']['assertions'], orig_species_id, orthologize_targets, bel_version, api_url, nanopub_type)
r = generate_assertion_edge_info(
nanopub["nanopub"]["assertions"], orthologize_targets, bel_version, api_url, nanopub_type
)
edge_info_list = r["edge_info_list"]
# Build Edges #############################################################
edges = []
errors = []
for edge_info in edge_info_list:
annotations = copy.deepcopy(master_annotations)
metadata = copy.deepcopy(master_metadata)
errors.extend(edge_info["errors"])
if not edge_info.get("canonical"):
continue
# TODO - remove this
# if edge_info.get('species_id', False):
# annotations = orthologize_context(edge_info['species_id'], annotations)
edge_hash = utils._create_hash(
f'{edge_info["canonical"]["subject"]} {edge_info["canonical"]["relation"]} {edge_info["canonical"]["object"]}'
)
edge = {
"edge": {
"subject": {
"name": edge_info["canonical"]["subject"],
"name_lc": edge_info["canonical"]["subject"].lower(),
"label": edge_info["decanonical"]["subject"],
"label_lc": edge_info["decanonical"]["subject"].lower(),
"components": edge_info["subject_comp"],
},
"relation": {
"relation": edge_info["canonical"]["relation"],
"edge_hash": edge_hash,
"edge_dt": edge_dt,
"nanopub_url": nanopub_url,
"nanopub_id": nanopub["nanopub"]["id"],
"citation": citation_string,
"subject_canon": edge_info["canonical"]["subject"],
"subject": edge_info["decanonical"]["subject"],
"object_canon": edge_info["canonical"]["object"],
"object": edge_info["decanonical"]["object"],
"annotations": copy.deepcopy(annotations),
"metadata": copy.deepcopy(metadata),
"public_flag": True,
"edge_types": edge_info["edge_types"],
"species_id": edge_info["species_id"],
"species_label": edge_info["species_label"],
},
"object": {
"name": edge_info["canonical"]["object"],
"name_lc": edge_info["canonical"]["object"].lower(),
"label": edge_info["decanonical"]["object"],
"label_lc": edge_info["decanonical"]["object"].lower(),
"components": edge_info["object_comp"],
},
}
}
edges.append(copy.deepcopy(edge))
return {
"edges": edges,
"nanopub_id": nanopub["nanopub"]["id"],
"nanopub_url": nanopub_url,
"success": True,
"errors": errors,
} |
def normal(nmr_distributions, nmr_samples, mean=0, std=1, ctype='float', seed=None):
"""Draw random samples from the Gaussian distribution.
Args:
nmr_distributions (int): the number of unique continuous_distributions to create
nmr_samples (int): The number of samples to draw
mean (float or ndarray): The mean of the distribution
std (float or ndarray): The standard deviation or the distribution
ctype (str): the C type of the output samples
seed (float): the seed for the RNG
Returns:
ndarray: A two dimensional numpy array as (nmr_distributions, nmr_samples).
"""
if is_scalar(mean):
mean = np.ones((nmr_distributions, 1)) * mean
if is_scalar(std):
std = np.ones((nmr_distributions, 1)) * std
kernel_data = {'mean': Array(mean, as_scalar=True),
'std': Array(std, as_scalar=True)}
kernel = SimpleCLFunction.from_string('''
void compute(double mean, double std, global uint* rng_state, global ''' + ctype + '''* samples){
rand123_data rand123_rng_data = rand123_initialize_data((uint[]){
rng_state[0], rng_state[1], rng_state[2], rng_state[3],
rng_state[4], rng_state[5], 0});
void* rng_data = (void*)&rand123_rng_data;
for(uint i = 0; i < ''' + str(nmr_samples) + '''; i++){
double4 randomnr = randn4(rng_data);
samples[i] = (''' + ctype + ''')(mean + randomnr.x * std);
}
}
''', dependencies=[Rand123()])
return _generate_samples(kernel, nmr_distributions, nmr_samples, ctype, kernel_data, seed=seed) | Draw random samples from the Gaussian distribution.
Args:
nmr_distributions (int): the number of unique continuous_distributions to create
nmr_samples (int): The number of samples to draw
mean (float or ndarray): The mean of the distribution
std (float or ndarray): The standard deviation or the distribution
ctype (str): the C type of the output samples
seed (float): the seed for the RNG
Returns:
ndarray: A two dimensional numpy array as (nmr_distributions, nmr_samples). | Below is the the instruction that describes the task:
### Input:
Draw random samples from the Gaussian distribution.
Args:
nmr_distributions (int): the number of unique continuous_distributions to create
nmr_samples (int): The number of samples to draw
mean (float or ndarray): The mean of the distribution
std (float or ndarray): The standard deviation or the distribution
ctype (str): the C type of the output samples
seed (float): the seed for the RNG
Returns:
ndarray: A two dimensional numpy array as (nmr_distributions, nmr_samples).
### Response:
def normal(nmr_distributions, nmr_samples, mean=0, std=1, ctype='float', seed=None):
"""Draw random samples from the Gaussian distribution.
Args:
nmr_distributions (int): the number of unique continuous_distributions to create
nmr_samples (int): The number of samples to draw
mean (float or ndarray): The mean of the distribution
std (float or ndarray): The standard deviation or the distribution
ctype (str): the C type of the output samples
seed (float): the seed for the RNG
Returns:
ndarray: A two dimensional numpy array as (nmr_distributions, nmr_samples).
"""
if is_scalar(mean):
mean = np.ones((nmr_distributions, 1)) * mean
if is_scalar(std):
std = np.ones((nmr_distributions, 1)) * std
kernel_data = {'mean': Array(mean, as_scalar=True),
'std': Array(std, as_scalar=True)}
kernel = SimpleCLFunction.from_string('''
void compute(double mean, double std, global uint* rng_state, global ''' + ctype + '''* samples){
rand123_data rand123_rng_data = rand123_initialize_data((uint[]){
rng_state[0], rng_state[1], rng_state[2], rng_state[3],
rng_state[4], rng_state[5], 0});
void* rng_data = (void*)&rand123_rng_data;
for(uint i = 0; i < ''' + str(nmr_samples) + '''; i++){
double4 randomnr = randn4(rng_data);
samples[i] = (''' + ctype + ''')(mean + randomnr.x * std);
}
}
''', dependencies=[Rand123()])
return _generate_samples(kernel, nmr_distributions, nmr_samples, ctype, kernel_data, seed=seed) |
def qos(self, prefetch_size=0, prefetch_count=0, apply_global=False):
"""Request specific Quality of Service.
This method requests a specific quality of service. The QoS
can be specified for the current channel or for all channels
on the connection. The particular properties and semantics of
a qos method always depend on the content class semantics.
Though the qos method could in principle apply to both peers,
it is currently meaningful only for the server.
:param prefetch_size: Prefetch window in octets.
The client can request that messages be sent in
advance so that when the client finishes processing a
message, the following message is already held
locally, rather than needing to be sent down the
channel. Prefetching gives a performance improvement.
This field specifies the prefetch window size in
octets. The server will send a message in advance if
it is equal to or smaller in size than the available
prefetch size (and also falls into other prefetch
limits). May be set to zero, meaning "no specific
limit", although other prefetch limits may still
apply. The ``prefetch_size`` is ignored if the
:attr:`no_ack` option is set.
:param prefetch_count: Specifies a prefetch window in terms of whole
messages. This field may be used in combination with
``prefetch_size``; A message will only be sent
in advance if both prefetch windows (and those at the
channel and connection level) allow it. The prefetch-
count is ignored if the :attr:`no_ack` option is set.
:keyword apply_global: By default the QoS settings apply to the
current channel only. If this is set, they are applied
to the entire connection.
"""
return self.backend.qos(prefetch_size, prefetch_count, apply_global) | Request specific Quality of Service.
This method requests a specific quality of service. The QoS
can be specified for the current channel or for all channels
on the connection. The particular properties and semantics of
a qos method always depend on the content class semantics.
Though the qos method could in principle apply to both peers,
it is currently meaningful only for the server.
:param prefetch_size: Prefetch window in octets.
The client can request that messages be sent in
advance so that when the client finishes processing a
message, the following message is already held
locally, rather than needing to be sent down the
channel. Prefetching gives a performance improvement.
This field specifies the prefetch window size in
octets. The server will send a message in advance if
it is equal to or smaller in size than the available
prefetch size (and also falls into other prefetch
limits). May be set to zero, meaning "no specific
limit", although other prefetch limits may still
apply. The ``prefetch_size`` is ignored if the
:attr:`no_ack` option is set.
:param prefetch_count: Specifies a prefetch window in terms of whole
messages. This field may be used in combination with
``prefetch_size``; A message will only be sent
in advance if both prefetch windows (and those at the
channel and connection level) allow it. The prefetch-
count is ignored if the :attr:`no_ack` option is set.
:keyword apply_global: By default the QoS settings apply to the
current channel only. If this is set, they are applied
to the entire connection. | Below is the the instruction that describes the task:
### Input:
Request specific Quality of Service.
This method requests a specific quality of service. The QoS
can be specified for the current channel or for all channels
on the connection. The particular properties and semantics of
a qos method always depend on the content class semantics.
Though the qos method could in principle apply to both peers,
it is currently meaningful only for the server.
:param prefetch_size: Prefetch window in octets.
The client can request that messages be sent in
advance so that when the client finishes processing a
message, the following message is already held
locally, rather than needing to be sent down the
channel. Prefetching gives a performance improvement.
This field specifies the prefetch window size in
octets. The server will send a message in advance if
it is equal to or smaller in size than the available
prefetch size (and also falls into other prefetch
limits). May be set to zero, meaning "no specific
limit", although other prefetch limits may still
apply. The ``prefetch_size`` is ignored if the
:attr:`no_ack` option is set.
:param prefetch_count: Specifies a prefetch window in terms of whole
messages. This field may be used in combination with
``prefetch_size``; A message will only be sent
in advance if both prefetch windows (and those at the
channel and connection level) allow it. The prefetch-
count is ignored if the :attr:`no_ack` option is set.
:keyword apply_global: By default the QoS settings apply to the
current channel only. If this is set, they are applied
to the entire connection.
### Response:
def qos(self, prefetch_size=0, prefetch_count=0, apply_global=False):
"""Request specific Quality of Service.
This method requests a specific quality of service. The QoS
can be specified for the current channel or for all channels
on the connection. The particular properties and semantics of
a qos method always depend on the content class semantics.
Though the qos method could in principle apply to both peers,
it is currently meaningful only for the server.
:param prefetch_size: Prefetch window in octets.
The client can request that messages be sent in
advance so that when the client finishes processing a
message, the following message is already held
locally, rather than needing to be sent down the
channel. Prefetching gives a performance improvement.
This field specifies the prefetch window size in
octets. The server will send a message in advance if
it is equal to or smaller in size than the available
prefetch size (and also falls into other prefetch
limits). May be set to zero, meaning "no specific
limit", although other prefetch limits may still
apply. The ``prefetch_size`` is ignored if the
:attr:`no_ack` option is set.
:param prefetch_count: Specifies a prefetch window in terms of whole
messages. This field may be used in combination with
``prefetch_size``; A message will only be sent
in advance if both prefetch windows (and those at the
channel and connection level) allow it. The prefetch-
count is ignored if the :attr:`no_ack` option is set.
:keyword apply_global: By default the QoS settings apply to the
current channel only. If this is set, they are applied
to the entire connection.
"""
return self.backend.qos(prefetch_size, prefetch_count, apply_global) |
def active_service_location(doc):
"""View for getting active service by location"""
if doc.get('state') != 'deactivated':
for service_id, service in doc.get('services', {}).items():
if service.get('state') != 'deactivated':
service['id'] = service_id
service['organisation_id'] = doc['_id']
location = service.get('location', None)
if location:
yield location, service | View for getting active service by location | Below is the the instruction that describes the task:
### Input:
View for getting active service by location
### Response:
def active_service_location(doc):
"""View for getting active service by location"""
if doc.get('state') != 'deactivated':
for service_id, service in doc.get('services', {}).items():
if service.get('state') != 'deactivated':
service['id'] = service_id
service['organisation_id'] = doc['_id']
location = service.get('location', None)
if location:
yield location, service |
def get_edges(data, superints, splits):
"""
Gets edge trimming based on the overlap of sequences at the edges of
alignments and the tuple arg passed in for edge_trimming. Trims as
(R1 left, R1 right, R2 left, R2 right). We also trim off the restriction
site if it present. This modifies superints, and so should be run on an
engine so it doesn't affect local copy. If this is changed to run locally
for some reason make sure we copy the superints instead.
"""
## the filtering arg and parse it into minsamp numbers
if "trim_overhang" in data.paramsdict:
edgetrims = np.array(data.paramsdict["trim_overhang"]).astype(np.int16)
else:
edgetrims = np.array(data.paramsdict["trim_loci"]).astype(np.int16)
## Cuts 3 and 4 are only for 3rad/radcap
## TODO: This is moderately hackish, it's not using cut3/4
## correctly, just assuming the length is the same as cut1/2
try:
cut1, cut2, _, _ = data.paramsdict["restriction_overhang"]
LOGGER.debug("Found 3Rad cut sites")
except ValueError:
cut1, cut2 = data.paramsdict["restriction_overhang"]
cuts = np.array([len(cut1), len(cut2)], dtype=np.int16)
## a local array for storing edge trims
edges = np.zeros((superints.shape[0], 5), dtype=np.int16)
## a local array for storing edge filtered loci, these are stored
## eventually as minsamp excludes.
edgefilter = np.zeros((superints.shape[0],), dtype=np.bool)
## TRIM GUIDE. The cut site lengths are always trimmed. In addition,
## edge overhangs are trimmed to min(4, minsamp), and then additional
## number of columns is trimmed based on edgetrims values.
## A special case, -1 value means no trim at all.
if data.paramsdict["min_samples_locus"] <= 4:
minedge = np.int16(data.paramsdict["min_samples_locus"])
else:
minedge = np.int16(max(4, data.paramsdict["min_samples_locus"]))
## convert all - to N to make this easier
nodashints = copy.deepcopy(superints)#.copy()
nodashints[nodashints == 45] = 78
## trim overhanging edges
## get the number not Ns in each site,
#ccx = np.sum(superseqs != "N", axis=1)
ccx = np.sum(nodashints != 78, axis=1, dtype=np.uint16)
efi, edg = edgetrim_numba(splits, ccx, edges, edgefilter, edgetrims, cuts, minedge)
return efi, edg | Gets edge trimming based on the overlap of sequences at the edges of
alignments and the tuple arg passed in for edge_trimming. Trims as
(R1 left, R1 right, R2 left, R2 right). We also trim off the restriction
site if it present. This modifies superints, and so should be run on an
engine so it doesn't affect local copy. If this is changed to run locally
for some reason make sure we copy the superints instead. | Below is the the instruction that describes the task:
### Input:
Gets edge trimming based on the overlap of sequences at the edges of
alignments and the tuple arg passed in for edge_trimming. Trims as
(R1 left, R1 right, R2 left, R2 right). We also trim off the restriction
site if it present. This modifies superints, and so should be run on an
engine so it doesn't affect local copy. If this is changed to run locally
for some reason make sure we copy the superints instead.
### Response:
def get_edges(data, superints, splits):
"""
Gets edge trimming based on the overlap of sequences at the edges of
alignments and the tuple arg passed in for edge_trimming. Trims as
(R1 left, R1 right, R2 left, R2 right). We also trim off the restriction
site if it present. This modifies superints, and so should be run on an
engine so it doesn't affect local copy. If this is changed to run locally
for some reason make sure we copy the superints instead.
"""
## the filtering arg and parse it into minsamp numbers
if "trim_overhang" in data.paramsdict:
edgetrims = np.array(data.paramsdict["trim_overhang"]).astype(np.int16)
else:
edgetrims = np.array(data.paramsdict["trim_loci"]).astype(np.int16)
## Cuts 3 and 4 are only for 3rad/radcap
## TODO: This is moderately hackish, it's not using cut3/4
## correctly, just assuming the length is the same as cut1/2
try:
cut1, cut2, _, _ = data.paramsdict["restriction_overhang"]
LOGGER.debug("Found 3Rad cut sites")
except ValueError:
cut1, cut2 = data.paramsdict["restriction_overhang"]
cuts = np.array([len(cut1), len(cut2)], dtype=np.int16)
## a local array for storing edge trims
edges = np.zeros((superints.shape[0], 5), dtype=np.int16)
## a local array for storing edge filtered loci, these are stored
## eventually as minsamp excludes.
edgefilter = np.zeros((superints.shape[0],), dtype=np.bool)
## TRIM GUIDE. The cut site lengths are always trimmed. In addition,
## edge overhangs are trimmed to min(4, minsamp), and then additional
## number of columns is trimmed based on edgetrims values.
## A special case, -1 value means no trim at all.
if data.paramsdict["min_samples_locus"] <= 4:
minedge = np.int16(data.paramsdict["min_samples_locus"])
else:
minedge = np.int16(max(4, data.paramsdict["min_samples_locus"]))
## convert all - to N to make this easier
nodashints = copy.deepcopy(superints)#.copy()
nodashints[nodashints == 45] = 78
## trim overhanging edges
## get the number not Ns in each site,
#ccx = np.sum(superseqs != "N", axis=1)
ccx = np.sum(nodashints != 78, axis=1, dtype=np.uint16)
efi, edg = edgetrim_numba(splits, ccx, edges, edgefilter, edgetrims, cuts, minedge)
return efi, edg |
def open_channel(self):
"""Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
"""
_logger.info('Creating a new channel')
self._connection.channel(on_open_callback=self.on_channel_open) | Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika. | Below is the the instruction that describes the task:
### Input:
Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
### Response:
def open_channel(self):
"""Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
"""
_logger.info('Creating a new channel')
self._connection.channel(on_open_callback=self.on_channel_open) |
def update_nb_metadata(nb_path=None, title=None, summary=None, keywords='fastai', overwrite=True, **kwargs):
"Creates jekyll metadata for given notebook path."
nb = read_nb(nb_path)
data = {'title': title, 'summary': summary, 'keywords': keywords, **kwargs}
data = {k:v for (k,v) in data.items() if v is not None} # remove none values
if not data: return
nb['metadata']['jekyll'] = data
write_nb(nb, nb_path)
NotebookNotary().sign(nb) | Creates jekyll metadata for given notebook path. | Below is the the instruction that describes the task:
### Input:
Creates jekyll metadata for given notebook path.
### Response:
def update_nb_metadata(nb_path=None, title=None, summary=None, keywords='fastai', overwrite=True, **kwargs):
"Creates jekyll metadata for given notebook path."
nb = read_nb(nb_path)
data = {'title': title, 'summary': summary, 'keywords': keywords, **kwargs}
data = {k:v for (k,v) in data.items() if v is not None} # remove none values
if not data: return
nb['metadata']['jekyll'] = data
write_nb(nb, nb_path)
NotebookNotary().sign(nb) |
def course_discovery_search(search_term=None, size=20, from_=0, field_dictionary=None):
"""
Course Discovery activities against the search engine index of course details
"""
# We'll ignore the course-enrollemnt informaiton in field and filter
# dictionary, and use our own logic upon enrollment dates for these
use_search_fields = ["org"]
(search_fields, _, exclude_dictionary) = SearchFilterGenerator.generate_field_filters()
use_field_dictionary = {}
use_field_dictionary.update({field: search_fields[field] for field in search_fields if field in use_search_fields})
if field_dictionary:
use_field_dictionary.update(field_dictionary)
if not getattr(settings, "SEARCH_SKIP_ENROLLMENT_START_DATE_FILTERING", False):
use_field_dictionary["enrollment_start"] = DateRange(None, datetime.utcnow())
searcher = SearchEngine.get_search_engine(getattr(settings, "COURSEWARE_INDEX_NAME", "courseware_index"))
if not searcher:
raise NoSearchEngineError("No search engine specified in settings.SEARCH_ENGINE")
results = searcher.search(
query_string=search_term,
doc_type="course_info",
size=size,
from_=from_,
# only show when enrollment start IS provided and is before now
field_dictionary=use_field_dictionary,
# show if no enrollment end is provided and has not yet been reached
filter_dictionary={"enrollment_end": DateRange(datetime.utcnow(), None)},
exclude_dictionary=exclude_dictionary,
facet_terms=course_discovery_facets(),
)
return results | Course Discovery activities against the search engine index of course details | Below is the the instruction that describes the task:
### Input:
Course Discovery activities against the search engine index of course details
### Response:
def course_discovery_search(search_term=None, size=20, from_=0, field_dictionary=None):
"""
Course Discovery activities against the search engine index of course details
"""
# We'll ignore the course-enrollemnt informaiton in field and filter
# dictionary, and use our own logic upon enrollment dates for these
use_search_fields = ["org"]
(search_fields, _, exclude_dictionary) = SearchFilterGenerator.generate_field_filters()
use_field_dictionary = {}
use_field_dictionary.update({field: search_fields[field] for field in search_fields if field in use_search_fields})
if field_dictionary:
use_field_dictionary.update(field_dictionary)
if not getattr(settings, "SEARCH_SKIP_ENROLLMENT_START_DATE_FILTERING", False):
use_field_dictionary["enrollment_start"] = DateRange(None, datetime.utcnow())
searcher = SearchEngine.get_search_engine(getattr(settings, "COURSEWARE_INDEX_NAME", "courseware_index"))
if not searcher:
raise NoSearchEngineError("No search engine specified in settings.SEARCH_ENGINE")
results = searcher.search(
query_string=search_term,
doc_type="course_info",
size=size,
from_=from_,
# only show when enrollment start IS provided and is before now
field_dictionary=use_field_dictionary,
# show if no enrollment end is provided and has not yet been reached
filter_dictionary={"enrollment_end": DateRange(datetime.utcnow(), None)},
exclude_dictionary=exclude_dictionary,
facet_terms=course_discovery_facets(),
)
return results |
def add_chain(self, chain, parameters=None, name=None, weights=None, posterior=None, walkers=None,
grid=False, num_eff_data_points=None, num_free_params=None, color=None, linewidth=None,
linestyle=None, kde=None, shade=None, shade_alpha=None, power=None, marker_style=None, marker_size=None,
marker_alpha=None, plot_contour=None, plot_point=None, statistics=None, cloud=None,
shade_gradient=None, bar_shade=None, bins=None, smooth=None, color_params=None,
plot_color_params=None, cmap=None, num_cloud=None):
"""
Add a chain to the consumer.
Parameters
----------
chain : str|ndarray|dict
The chain to load. Normally a ``numpy.ndarray``. If a string is found, it
interprets the string as a filename and attempts to load it in. If a ``dict``
is passed in, it assumes the dict has keys of parameter names and values of
an array of samples. Notice that using a dictionary puts the order of
parameters in the output under the control of the python ``dict.keys()`` function.
If you passed ``grid`` is set, you can pass in the parameter ranges in list form.
parameters : list[str], optional
A list of parameter names, one for each column (dimension) in the chain. This parameter
should remain ``None`` if a dictionary is given as ``chain``, as the parameter names
are taken from the dictionary keys.
name : str, optional
The name of the chain. Used when plotting multiple chains at once.
weights : ndarray, optional
If given, uses this array to weight the samples in chain
posterior : ndarray, optional
If given, records the log posterior for each sample in the chain
walkers : int, optional
How many walkers went into creating the chain. Each walker should
contribute the same number of steps, and should appear in contiguous
blocks in the final chain.
grid : boolean, optional
Whether the input is a flattened chain from a grid search instead of a Monte-Carlo
chains. Note that when this is set, `walkers` should not be set, and `weights` should
be set to the posterior evaluation for the grid point. **Be careful** when using
a coarse grid of setting a high smoothing value, as this may oversmooth the posterior
surface and give unreasonably large parameter bounds.
num_eff_data_points : int|float, optional
The number of effective (independent) data points used in the model fitting. Not required
for plotting, but required if loading in multiple chains to perform model comparison.
num_free_params : int, optional
The number of degrees of freedom in your model. Not required for plotting, but required if
loading in multiple chains to perform model comparison.
color : str(hex), optional
Provide a colour for the chain. Can be used instead of calling `configure` for convenience.
linewidth : float, optional
Provide a line width to plot the contours. Can be used instead of calling `configure` for convenience.
linestyle : str, optional
Provide a line style to plot the contour. Can be used instead of calling `configure` for convenience.
kde : bool|float, optional
Set the `kde` value for this specific chain. Can be used instead of calling `configure` for convenience.
shade : booloptional
If set, overrides the default behaviour and plots filled contours or not. If a list of
bools is passed, you can turn shading on or off for specific chains.
shade_alpha : float, optional
Filled contour alpha value. Can be used instead of calling `configure` for convenience.
power : float, optional
The power to raise the posterior surface to. Useful for inflating or deflating uncertainty for debugging.
marker_style : str|, optional
The marker style to use when plotting points. Defaults to `'.'`
marker_size : numeric|, optional
Size of markers, if plotted. Defaults to `4`.
marker_alpha : numeric, optional
The alpha values when plotting markers.
plot_contour : bool, optional
Whether to plot the whole contour (as opposed to a point). Defaults to true for less than
25 concurrent chains.
plot_point : bool, optional
Whether to plot a maximum likelihood point. Defaults to true for more then 24 chains.
statistics : string, optional
Which sort of statistics to use. Defaults to `"max"` for maximum likelihood
statistics. Other available options are `"mean"`, `"cumulative"`, `"max_symmetric"`,
`"max_closest"` and `"max_central"`. In the
very, very rare case you want to enable different statistics for different
chains, you can pass in a list of strings.
cloud : bool, optional
If set, overrides the default behaviour and plots the cloud or not shade_gradient :
bar_shade : bool, optional
If set to true, shades in confidence regions in under histogram. By default
this happens if you less than 3 chains, but is disabled if you are comparing
more chains. You can pass a list if you wish to shade some chains but not others.
bins : int|float, optional
The number of bins to use. By default uses :math:`\frac{\sqrt{n}}{10}`, where
:math:`n` are the number of data points. Giving an integer will set the number
of bins to the given value. Giving a float will scale the number of bins, such
that giving ``bins=1.5`` will result in using :math:`\frac{1.5\sqrt{n}}{10}` bins.
Note this parameter is most useful if `kde=False` is also passed, so you
can actually see the bins and not a KDE. smooth :
color_params : str, optional
The name of the parameter to use for the colour scatter. Defaults to none, for no colour. If set
to 'weights', 'log_weights', or 'posterior' (without the quotes), and that is not a parameter in the chain,
it will respectively use the weights, log weights, or posterior, to colour the points.
plot_color_params : bool, optional
Whether or not the colour parameter should also be plotted as a posterior surface.
cmaps : str, optional
The matplotlib colourmap to use in the `colour_param`. If you have multiple `color_param`s, you can
specific a different cmap for each variable. By default ChainConsumer will cycle between several
cmaps.
num_cloud : int, optional
The number of scatter points to show when enabling `cloud` or setting one of the parameters
to colour scatter. Defaults to 15k per chain.
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
"""
is_dict = False
assert chain is not None, "You cannot have a chain of None"
if isinstance(chain, str):
if chain.endswith("txt"):
chain = np.loadtxt(chain)
else:
chain = np.load(chain)
elif isinstance(chain, dict):
assert parameters is None, \
"You cannot pass a dictionary and specify parameter names"
is_dict = True
parameters = list(chain.keys())
chain = np.array([chain[p] for p in parameters]).T
elif isinstance(chain, list):
chain = np.array(chain).T
if grid:
assert walkers is None, "If grid is set, walkers should not be"
assert weights is not None, "If grid is set, you need to supply weights"
if len(weights.shape) > 1:
assert not is_dict, "We cannot construct a meshgrid from a dictionary, as the parameters" \
"are no longer ordered. Please pass in a flattened array instead."
self._logger.info("Constructing meshgrid for grid results")
meshes = np.meshgrid(*[u for u in chain.T], indexing="ij")
chain = np.vstack([m.flatten() for m in meshes]).T
weights = weights.flatten()
assert weights.size == chain[:,
0].size, "Error, given weight array size disagrees with parameter sampling"
if len(chain.shape) == 1:
chain = chain[None].T
if name is None:
name = "Chain %d" % len(self.chains)
if power is not None:
assert isinstance(power, int) or isinstance(power, float), "Power should be numeric, but is %s" % type(
power)
if self._default_parameters is None and parameters is not None:
self._default_parameters = parameters
if parameters is None:
if self._default_parameters is not None:
assert chain.shape[1] == len(self._default_parameters), \
"Chain has %d dimensions, but default parameters have %d dimensions" \
% (chain.shape[1], len(self._default_parameters))
parameters = self._default_parameters
self._logger.debug("Adding chain using default parameters")
else:
self._logger.debug("Adding chain with no parameter names")
parameters = ["%d" % x for x in range(chain.shape[1])]
else:
self._logger.debug("Adding chain with defined parameters")
assert len(parameters) <= chain.shape[1], \
"Have only %d columns in chain, but have been given %d parameters names! " \
"Please double check this." % (chain.shape[1], len(parameters))
for p in parameters:
if p not in self._all_parameters:
self._all_parameters.append(p)
# Sorry, no KDE for you on a grid.
if grid:
kde = None
if color is not None:
color = self.color_finder.get_formatted([color])[0]
c = Chain(chain, parameters, name, weights=weights, posterior=posterior, walkers=walkers,
grid=grid, num_free_params=num_free_params, num_eff_data_points=num_eff_data_points,
color=color, linewidth=linewidth, linestyle=linestyle, kde=kde, shade_alpha=shade_alpha, power=power,
marker_style=marker_style, marker_size=marker_size, marker_alpha=marker_alpha,
plot_contour=plot_contour, plot_point=plot_point, statistics=statistics, cloud=cloud,
shade=shade, shade_gradient=shade_gradient, bar_shade=bar_shade, bins=bins, smooth=smooth,
color_params=color_params, plot_color_params=plot_color_params, cmap=cmap,
num_cloud=num_cloud)
self.chains.append(c)
self._init_params()
return self | Add a chain to the consumer.
Parameters
----------
chain : str|ndarray|dict
The chain to load. Normally a ``numpy.ndarray``. If a string is found, it
interprets the string as a filename and attempts to load it in. If a ``dict``
is passed in, it assumes the dict has keys of parameter names and values of
an array of samples. Notice that using a dictionary puts the order of
parameters in the output under the control of the python ``dict.keys()`` function.
If you passed ``grid`` is set, you can pass in the parameter ranges in list form.
parameters : list[str], optional
A list of parameter names, one for each column (dimension) in the chain. This parameter
should remain ``None`` if a dictionary is given as ``chain``, as the parameter names
are taken from the dictionary keys.
name : str, optional
The name of the chain. Used when plotting multiple chains at once.
weights : ndarray, optional
If given, uses this array to weight the samples in chain
posterior : ndarray, optional
If given, records the log posterior for each sample in the chain
walkers : int, optional
How many walkers went into creating the chain. Each walker should
contribute the same number of steps, and should appear in contiguous
blocks in the final chain.
grid : boolean, optional
Whether the input is a flattened chain from a grid search instead of a Monte-Carlo
chains. Note that when this is set, `walkers` should not be set, and `weights` should
be set to the posterior evaluation for the grid point. **Be careful** when using
a coarse grid of setting a high smoothing value, as this may oversmooth the posterior
surface and give unreasonably large parameter bounds.
num_eff_data_points : int|float, optional
The number of effective (independent) data points used in the model fitting. Not required
for plotting, but required if loading in multiple chains to perform model comparison.
num_free_params : int, optional
The number of degrees of freedom in your model. Not required for plotting, but required if
loading in multiple chains to perform model comparison.
color : str(hex), optional
Provide a colour for the chain. Can be used instead of calling `configure` for convenience.
linewidth : float, optional
Provide a line width to plot the contours. Can be used instead of calling `configure` for convenience.
linestyle : str, optional
Provide a line style to plot the contour. Can be used instead of calling `configure` for convenience.
kde : bool|float, optional
Set the `kde` value for this specific chain. Can be used instead of calling `configure` for convenience.
shade : booloptional
If set, overrides the default behaviour and plots filled contours or not. If a list of
bools is passed, you can turn shading on or off for specific chains.
shade_alpha : float, optional
Filled contour alpha value. Can be used instead of calling `configure` for convenience.
power : float, optional
The power to raise the posterior surface to. Useful for inflating or deflating uncertainty for debugging.
marker_style : str|, optional
The marker style to use when plotting points. Defaults to `'.'`
marker_size : numeric|, optional
Size of markers, if plotted. Defaults to `4`.
marker_alpha : numeric, optional
The alpha values when plotting markers.
plot_contour : bool, optional
Whether to plot the whole contour (as opposed to a point). Defaults to true for less than
25 concurrent chains.
plot_point : bool, optional
Whether to plot a maximum likelihood point. Defaults to true for more then 24 chains.
statistics : string, optional
Which sort of statistics to use. Defaults to `"max"` for maximum likelihood
statistics. Other available options are `"mean"`, `"cumulative"`, `"max_symmetric"`,
`"max_closest"` and `"max_central"`. In the
very, very rare case you want to enable different statistics for different
chains, you can pass in a list of strings.
cloud : bool, optional
If set, overrides the default behaviour and plots the cloud or not shade_gradient :
bar_shade : bool, optional
If set to true, shades in confidence regions in under histogram. By default
this happens if you less than 3 chains, but is disabled if you are comparing
more chains. You can pass a list if you wish to shade some chains but not others.
bins : int|float, optional
The number of bins to use. By default uses :math:`\frac{\sqrt{n}}{10}`, where
:math:`n` are the number of data points. Giving an integer will set the number
of bins to the given value. Giving a float will scale the number of bins, such
that giving ``bins=1.5`` will result in using :math:`\frac{1.5\sqrt{n}}{10}` bins.
Note this parameter is most useful if `kde=False` is also passed, so you
can actually see the bins and not a KDE. smooth :
color_params : str, optional
The name of the parameter to use for the colour scatter. Defaults to none, for no colour. If set
to 'weights', 'log_weights', or 'posterior' (without the quotes), and that is not a parameter in the chain,
it will respectively use the weights, log weights, or posterior, to colour the points.
plot_color_params : bool, optional
Whether or not the colour parameter should also be plotted as a posterior surface.
cmaps : str, optional
The matplotlib colourmap to use in the `colour_param`. If you have multiple `color_param`s, you can
specific a different cmap for each variable. By default ChainConsumer will cycle between several
cmaps.
num_cloud : int, optional
The number of scatter points to show when enabling `cloud` or setting one of the parameters
to colour scatter. Defaults to 15k per chain.
Returns
-------
ChainConsumer
Itself, to allow chaining calls. | Below is the the instruction that describes the task:
### Input:
Add a chain to the consumer.
Parameters
----------
chain : str|ndarray|dict
The chain to load. Normally a ``numpy.ndarray``. If a string is found, it
interprets the string as a filename and attempts to load it in. If a ``dict``
is passed in, it assumes the dict has keys of parameter names and values of
an array of samples. Notice that using a dictionary puts the order of
parameters in the output under the control of the python ``dict.keys()`` function.
If you passed ``grid`` is set, you can pass in the parameter ranges in list form.
parameters : list[str], optional
A list of parameter names, one for each column (dimension) in the chain. This parameter
should remain ``None`` if a dictionary is given as ``chain``, as the parameter names
are taken from the dictionary keys.
name : str, optional
The name of the chain. Used when plotting multiple chains at once.
weights : ndarray, optional
If given, uses this array to weight the samples in chain
posterior : ndarray, optional
If given, records the log posterior for each sample in the chain
walkers : int, optional
How many walkers went into creating the chain. Each walker should
contribute the same number of steps, and should appear in contiguous
blocks in the final chain.
grid : boolean, optional
Whether the input is a flattened chain from a grid search instead of a Monte-Carlo
chains. Note that when this is set, `walkers` should not be set, and `weights` should
be set to the posterior evaluation for the grid point. **Be careful** when using
a coarse grid of setting a high smoothing value, as this may oversmooth the posterior
surface and give unreasonably large parameter bounds.
num_eff_data_points : int|float, optional
The number of effective (independent) data points used in the model fitting. Not required
for plotting, but required if loading in multiple chains to perform model comparison.
num_free_params : int, optional
The number of degrees of freedom in your model. Not required for plotting, but required if
loading in multiple chains to perform model comparison.
color : str(hex), optional
Provide a colour for the chain. Can be used instead of calling `configure` for convenience.
linewidth : float, optional
Provide a line width to plot the contours. Can be used instead of calling `configure` for convenience.
linestyle : str, optional
Provide a line style to plot the contour. Can be used instead of calling `configure` for convenience.
kde : bool|float, optional
Set the `kde` value for this specific chain. Can be used instead of calling `configure` for convenience.
shade : booloptional
If set, overrides the default behaviour and plots filled contours or not. If a list of
bools is passed, you can turn shading on or off for specific chains.
shade_alpha : float, optional
Filled contour alpha value. Can be used instead of calling `configure` for convenience.
power : float, optional
The power to raise the posterior surface to. Useful for inflating or deflating uncertainty for debugging.
marker_style : str|, optional
The marker style to use when plotting points. Defaults to `'.'`
marker_size : numeric|, optional
Size of markers, if plotted. Defaults to `4`.
marker_alpha : numeric, optional
The alpha values when plotting markers.
plot_contour : bool, optional
Whether to plot the whole contour (as opposed to a point). Defaults to true for less than
25 concurrent chains.
plot_point : bool, optional
Whether to plot a maximum likelihood point. Defaults to true for more then 24 chains.
statistics : string, optional
Which sort of statistics to use. Defaults to `"max"` for maximum likelihood
statistics. Other available options are `"mean"`, `"cumulative"`, `"max_symmetric"`,
`"max_closest"` and `"max_central"`. In the
very, very rare case you want to enable different statistics for different
chains, you can pass in a list of strings.
cloud : bool, optional
If set, overrides the default behaviour and plots the cloud or not shade_gradient :
bar_shade : bool, optional
If set to true, shades in confidence regions in under histogram. By default
this happens if you less than 3 chains, but is disabled if you are comparing
more chains. You can pass a list if you wish to shade some chains but not others.
bins : int|float, optional
The number of bins to use. By default uses :math:`\frac{\sqrt{n}}{10}`, where
:math:`n` are the number of data points. Giving an integer will set the number
of bins to the given value. Giving a float will scale the number of bins, such
that giving ``bins=1.5`` will result in using :math:`\frac{1.5\sqrt{n}}{10}` bins.
Note this parameter is most useful if `kde=False` is also passed, so you
can actually see the bins and not a KDE. smooth :
color_params : str, optional
The name of the parameter to use for the colour scatter. Defaults to none, for no colour. If set
to 'weights', 'log_weights', or 'posterior' (without the quotes), and that is not a parameter in the chain,
it will respectively use the weights, log weights, or posterior, to colour the points.
plot_color_params : bool, optional
Whether or not the colour parameter should also be plotted as a posterior surface.
cmaps : str, optional
The matplotlib colourmap to use in the `colour_param`. If you have multiple `color_param`s, you can
specific a different cmap for each variable. By default ChainConsumer will cycle between several
cmaps.
num_cloud : int, optional
The number of scatter points to show when enabling `cloud` or setting one of the parameters
to colour scatter. Defaults to 15k per chain.
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
### Response:
def add_chain(self, chain, parameters=None, name=None, weights=None, posterior=None, walkers=None,
grid=False, num_eff_data_points=None, num_free_params=None, color=None, linewidth=None,
linestyle=None, kde=None, shade=None, shade_alpha=None, power=None, marker_style=None, marker_size=None,
marker_alpha=None, plot_contour=None, plot_point=None, statistics=None, cloud=None,
shade_gradient=None, bar_shade=None, bins=None, smooth=None, color_params=None,
plot_color_params=None, cmap=None, num_cloud=None):
"""
Add a chain to the consumer.
Parameters
----------
chain : str|ndarray|dict
The chain to load. Normally a ``numpy.ndarray``. If a string is found, it
interprets the string as a filename and attempts to load it in. If a ``dict``
is passed in, it assumes the dict has keys of parameter names and values of
an array of samples. Notice that using a dictionary puts the order of
parameters in the output under the control of the python ``dict.keys()`` function.
If you passed ``grid`` is set, you can pass in the parameter ranges in list form.
parameters : list[str], optional
A list of parameter names, one for each column (dimension) in the chain. This parameter
should remain ``None`` if a dictionary is given as ``chain``, as the parameter names
are taken from the dictionary keys.
name : str, optional
The name of the chain. Used when plotting multiple chains at once.
weights : ndarray, optional
If given, uses this array to weight the samples in chain
posterior : ndarray, optional
If given, records the log posterior for each sample in the chain
walkers : int, optional
How many walkers went into creating the chain. Each walker should
contribute the same number of steps, and should appear in contiguous
blocks in the final chain.
grid : boolean, optional
Whether the input is a flattened chain from a grid search instead of a Monte-Carlo
chains. Note that when this is set, `walkers` should not be set, and `weights` should
be set to the posterior evaluation for the grid point. **Be careful** when using
a coarse grid of setting a high smoothing value, as this may oversmooth the posterior
surface and give unreasonably large parameter bounds.
num_eff_data_points : int|float, optional
The number of effective (independent) data points used in the model fitting. Not required
for plotting, but required if loading in multiple chains to perform model comparison.
num_free_params : int, optional
The number of degrees of freedom in your model. Not required for plotting, but required if
loading in multiple chains to perform model comparison.
color : str(hex), optional
Provide a colour for the chain. Can be used instead of calling `configure` for convenience.
linewidth : float, optional
Provide a line width to plot the contours. Can be used instead of calling `configure` for convenience.
linestyle : str, optional
Provide a line style to plot the contour. Can be used instead of calling `configure` for convenience.
kde : bool|float, optional
Set the `kde` value for this specific chain. Can be used instead of calling `configure` for convenience.
shade : booloptional
If set, overrides the default behaviour and plots filled contours or not. If a list of
bools is passed, you can turn shading on or off for specific chains.
shade_alpha : float, optional
Filled contour alpha value. Can be used instead of calling `configure` for convenience.
power : float, optional
The power to raise the posterior surface to. Useful for inflating or deflating uncertainty for debugging.
marker_style : str|, optional
The marker style to use when plotting points. Defaults to `'.'`
marker_size : numeric|, optional
Size of markers, if plotted. Defaults to `4`.
marker_alpha : numeric, optional
The alpha values when plotting markers.
plot_contour : bool, optional
Whether to plot the whole contour (as opposed to a point). Defaults to true for less than
25 concurrent chains.
plot_point : bool, optional
Whether to plot a maximum likelihood point. Defaults to true for more then 24 chains.
statistics : string, optional
Which sort of statistics to use. Defaults to `"max"` for maximum likelihood
statistics. Other available options are `"mean"`, `"cumulative"`, `"max_symmetric"`,
`"max_closest"` and `"max_central"`. In the
very, very rare case you want to enable different statistics for different
chains, you can pass in a list of strings.
cloud : bool, optional
If set, overrides the default behaviour and plots the cloud or not shade_gradient :
bar_shade : bool, optional
If set to true, shades in confidence regions in under histogram. By default
this happens if you less than 3 chains, but is disabled if you are comparing
more chains. You can pass a list if you wish to shade some chains but not others.
bins : int|float, optional
The number of bins to use. By default uses :math:`\frac{\sqrt{n}}{10}`, where
:math:`n` are the number of data points. Giving an integer will set the number
of bins to the given value. Giving a float will scale the number of bins, such
that giving ``bins=1.5`` will result in using :math:`\frac{1.5\sqrt{n}}{10}` bins.
Note this parameter is most useful if `kde=False` is also passed, so you
can actually see the bins and not a KDE. smooth :
color_params : str, optional
The name of the parameter to use for the colour scatter. Defaults to none, for no colour. If set
to 'weights', 'log_weights', or 'posterior' (without the quotes), and that is not a parameter in the chain,
it will respectively use the weights, log weights, or posterior, to colour the points.
plot_color_params : bool, optional
Whether or not the colour parameter should also be plotted as a posterior surface.
cmaps : str, optional
The matplotlib colourmap to use in the `colour_param`. If you have multiple `color_param`s, you can
specific a different cmap for each variable. By default ChainConsumer will cycle between several
cmaps.
num_cloud : int, optional
The number of scatter points to show when enabling `cloud` or setting one of the parameters
to colour scatter. Defaults to 15k per chain.
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
"""
is_dict = False
assert chain is not None, "You cannot have a chain of None"
if isinstance(chain, str):
if chain.endswith("txt"):
chain = np.loadtxt(chain)
else:
chain = np.load(chain)
elif isinstance(chain, dict):
assert parameters is None, \
"You cannot pass a dictionary and specify parameter names"
is_dict = True
parameters = list(chain.keys())
chain = np.array([chain[p] for p in parameters]).T
elif isinstance(chain, list):
chain = np.array(chain).T
if grid:
assert walkers is None, "If grid is set, walkers should not be"
assert weights is not None, "If grid is set, you need to supply weights"
if len(weights.shape) > 1:
assert not is_dict, "We cannot construct a meshgrid from a dictionary, as the parameters" \
"are no longer ordered. Please pass in a flattened array instead."
self._logger.info("Constructing meshgrid for grid results")
meshes = np.meshgrid(*[u for u in chain.T], indexing="ij")
chain = np.vstack([m.flatten() for m in meshes]).T
weights = weights.flatten()
assert weights.size == chain[:,
0].size, "Error, given weight array size disagrees with parameter sampling"
if len(chain.shape) == 1:
chain = chain[None].T
if name is None:
name = "Chain %d" % len(self.chains)
if power is not None:
assert isinstance(power, int) or isinstance(power, float), "Power should be numeric, but is %s" % type(
power)
if self._default_parameters is None and parameters is not None:
self._default_parameters = parameters
if parameters is None:
if self._default_parameters is not None:
assert chain.shape[1] == len(self._default_parameters), \
"Chain has %d dimensions, but default parameters have %d dimensions" \
% (chain.shape[1], len(self._default_parameters))
parameters = self._default_parameters
self._logger.debug("Adding chain using default parameters")
else:
self._logger.debug("Adding chain with no parameter names")
parameters = ["%d" % x for x in range(chain.shape[1])]
else:
self._logger.debug("Adding chain with defined parameters")
assert len(parameters) <= chain.shape[1], \
"Have only %d columns in chain, but have been given %d parameters names! " \
"Please double check this." % (chain.shape[1], len(parameters))
for p in parameters:
if p not in self._all_parameters:
self._all_parameters.append(p)
# Sorry, no KDE for you on a grid.
if grid:
kde = None
if color is not None:
color = self.color_finder.get_formatted([color])[0]
c = Chain(chain, parameters, name, weights=weights, posterior=posterior, walkers=walkers,
grid=grid, num_free_params=num_free_params, num_eff_data_points=num_eff_data_points,
color=color, linewidth=linewidth, linestyle=linestyle, kde=kde, shade_alpha=shade_alpha, power=power,
marker_style=marker_style, marker_size=marker_size, marker_alpha=marker_alpha,
plot_contour=plot_contour, plot_point=plot_point, statistics=statistics, cloud=cloud,
shade=shade, shade_gradient=shade_gradient, bar_shade=bar_shade, bins=bins, smooth=smooth,
color_params=color_params, plot_color_params=plot_color_params, cmap=cmap,
num_cloud=num_cloud)
self.chains.append(c)
self._init_params()
return self |
def detail(self, *args, **kwargs):
prefix = kwargs.pop("prefix", default_prefix)
# remove dublicates
kwargs["votes"] = list(set(kwargs["votes"]))
""" This is an example how to sort votes prior to using them in the
Object
"""
# # Sort votes
# kwargs["votes"] = sorted(
# kwargs["votes"],
# key=lambda x: float(x.split(":")[1]),
# )
return OrderedDict(
[
("memo_key", PublicKey(kwargs["memo_key"], prefix=prefix)),
("voting_account", ObjectId(kwargs["voting_account"], "account")),
("num_witness", Uint16(kwargs["num_witness"])),
("num_committee", Uint16(kwargs["num_committee"])),
("votes", Array([VoteId(o) for o in kwargs["votes"]])),
("extensions", Set([])),
]
) | This is an example how to sort votes prior to using them in the
Object | Below is the the instruction that describes the task:
### Input:
This is an example how to sort votes prior to using them in the
Object
### Response:
def detail(self, *args, **kwargs):
prefix = kwargs.pop("prefix", default_prefix)
# remove dublicates
kwargs["votes"] = list(set(kwargs["votes"]))
""" This is an example how to sort votes prior to using them in the
Object
"""
# # Sort votes
# kwargs["votes"] = sorted(
# kwargs["votes"],
# key=lambda x: float(x.split(":")[1]),
# )
return OrderedDict(
[
("memo_key", PublicKey(kwargs["memo_key"], prefix=prefix)),
("voting_account", ObjectId(kwargs["voting_account"], "account")),
("num_witness", Uint16(kwargs["num_witness"])),
("num_committee", Uint16(kwargs["num_committee"])),
("votes", Array([VoteId(o) for o in kwargs["votes"]])),
("extensions", Set([])),
]
) |
def policy_factory_from_module(config, module):
"""Create a policy factory that works by config.include()'ing a module.
This function does some trickery with the Pyramid config system. Loosely,
it does config.include(module), and then sucks out information about the
authn policy that was registered. It's complicated by pyramid's delayed-
commit system, which means we have to do the work via callbacks.
"""
# Remember the policy that's active before including the module, if any.
orig_policy = config.registry.queryUtility(IAuthenticationPolicy)
# Include the module, so we get any default views etc.
config.include(module)
# That might have registered and commited a new policy object.
policy = config.registry.queryUtility(IAuthenticationPolicy)
if policy is not None and policy is not orig_policy:
return lambda: policy
# Or it might have set up a pending action to register one later.
# Find the most recent IAuthenticationPolicy action, and grab
# out the registering function so we can call it ourselves.
for action in reversed(config.action_state.actions):
# Extract the discriminator and callable. This is complicated by
# Pyramid 1.3 changing action from a tuple to a dict.
try:
discriminator = action["discriminator"]
callable = action["callable"]
except TypeError: # pragma: nocover
discriminator = action[0] # pragma: nocover
callable = action[1] # pragma: nocover
# If it's not setting the authn policy, keep looking.
if discriminator is not IAuthenticationPolicy:
continue
# Otherwise, wrap it up so we can extract the registered object.
def grab_policy(register=callable):
old_policy = config.registry.queryUtility(IAuthenticationPolicy)
register()
new_policy = config.registry.queryUtility(IAuthenticationPolicy)
config.registry.registerUtility(old_policy, IAuthenticationPolicy)
return new_policy
return grab_policy
# Or it might not have done *anything*.
# So return a null policy factory.
return lambda: None | Create a policy factory that works by config.include()'ing a module.
This function does some trickery with the Pyramid config system. Loosely,
it does config.include(module), and then sucks out information about the
authn policy that was registered. It's complicated by pyramid's delayed-
commit system, which means we have to do the work via callbacks. | Below is the the instruction that describes the task:
### Input:
Create a policy factory that works by config.include()'ing a module.
This function does some trickery with the Pyramid config system. Loosely,
it does config.include(module), and then sucks out information about the
authn policy that was registered. It's complicated by pyramid's delayed-
commit system, which means we have to do the work via callbacks.
### Response:
def policy_factory_from_module(config, module):
"""Create a policy factory that works by config.include()'ing a module.
This function does some trickery with the Pyramid config system. Loosely,
it does config.include(module), and then sucks out information about the
authn policy that was registered. It's complicated by pyramid's delayed-
commit system, which means we have to do the work via callbacks.
"""
# Remember the policy that's active before including the module, if any.
orig_policy = config.registry.queryUtility(IAuthenticationPolicy)
# Include the module, so we get any default views etc.
config.include(module)
# That might have registered and commited a new policy object.
policy = config.registry.queryUtility(IAuthenticationPolicy)
if policy is not None and policy is not orig_policy:
return lambda: policy
# Or it might have set up a pending action to register one later.
# Find the most recent IAuthenticationPolicy action, and grab
# out the registering function so we can call it ourselves.
for action in reversed(config.action_state.actions):
# Extract the discriminator and callable. This is complicated by
# Pyramid 1.3 changing action from a tuple to a dict.
try:
discriminator = action["discriminator"]
callable = action["callable"]
except TypeError: # pragma: nocover
discriminator = action[0] # pragma: nocover
callable = action[1] # pragma: nocover
# If it's not setting the authn policy, keep looking.
if discriminator is not IAuthenticationPolicy:
continue
# Otherwise, wrap it up so we can extract the registered object.
def grab_policy(register=callable):
old_policy = config.registry.queryUtility(IAuthenticationPolicy)
register()
new_policy = config.registry.queryUtility(IAuthenticationPolicy)
config.registry.registerUtility(old_policy, IAuthenticationPolicy)
return new_policy
return grab_policy
# Or it might not have done *anything*.
# So return a null policy factory.
return lambda: None |
def crc7(data):
"""
Compute CRC of a whole message.
"""
crc = 0
for c in data:
crc = CRC7_TABLE[crc ^ c]
return crc | Compute CRC of a whole message. | Below is the the instruction that describes the task:
### Input:
Compute CRC of a whole message.
### Response:
def crc7(data):
"""
Compute CRC of a whole message.
"""
crc = 0
for c in data:
crc = CRC7_TABLE[crc ^ c]
return crc |
def set_coords(self, x=0, y=0, z=0, t=0):
"""
set coords of agent in an arbitrary world
"""
self.coords = {}
self.coords['x'] = x
self.coords['y'] = y
self.coords['z'] = z
self.coords['t'] = t | set coords of agent in an arbitrary world | Below is the the instruction that describes the task:
### Input:
set coords of agent in an arbitrary world
### Response:
def set_coords(self, x=0, y=0, z=0, t=0):
"""
set coords of agent in an arbitrary world
"""
self.coords = {}
self.coords['x'] = x
self.coords['y'] = y
self.coords['z'] = z
self.coords['t'] = t |
def interpolate(self, transform, transitions=None, Y=None):
"""Interpolate new data onto a transformation of the graph data
One of either transitions or Y should be provided
Parameters
----------
transform : array-like, shape=[n_samples, n_transform_features]
transitions : array-like, optional, shape=[n_samples_y, n_samples]
Transition matrix from `Y` (not provided) to `self.data`
Y: array-like, optional, shape=[n_samples_y, n_features]
new data for which an affinity matrix is calculated
to the existing data. `n_features` must match
either the ambient or PCA dimensions
Returns
-------
Y_transform : array-like, [n_samples_y, n_features or n_pca]
Transition matrix from `Y` to `self.data`
"""
if transitions is None and Y is None:
# assume Y is self.data and use standard landmark transitions
transitions = self.transitions
return super().interpolate(transform, transitions=transitions, Y=Y) | Interpolate new data onto a transformation of the graph data
One of either transitions or Y should be provided
Parameters
----------
transform : array-like, shape=[n_samples, n_transform_features]
transitions : array-like, optional, shape=[n_samples_y, n_samples]
Transition matrix from `Y` (not provided) to `self.data`
Y: array-like, optional, shape=[n_samples_y, n_features]
new data for which an affinity matrix is calculated
to the existing data. `n_features` must match
either the ambient or PCA dimensions
Returns
-------
Y_transform : array-like, [n_samples_y, n_features or n_pca]
Transition matrix from `Y` to `self.data` | Below is the the instruction that describes the task:
### Input:
Interpolate new data onto a transformation of the graph data
One of either transitions or Y should be provided
Parameters
----------
transform : array-like, shape=[n_samples, n_transform_features]
transitions : array-like, optional, shape=[n_samples_y, n_samples]
Transition matrix from `Y` (not provided) to `self.data`
Y: array-like, optional, shape=[n_samples_y, n_features]
new data for which an affinity matrix is calculated
to the existing data. `n_features` must match
either the ambient or PCA dimensions
Returns
-------
Y_transform : array-like, [n_samples_y, n_features or n_pca]
Transition matrix from `Y` to `self.data`
### Response:
def interpolate(self, transform, transitions=None, Y=None):
"""Interpolate new data onto a transformation of the graph data
One of either transitions or Y should be provided
Parameters
----------
transform : array-like, shape=[n_samples, n_transform_features]
transitions : array-like, optional, shape=[n_samples_y, n_samples]
Transition matrix from `Y` (not provided) to `self.data`
Y: array-like, optional, shape=[n_samples_y, n_features]
new data for which an affinity matrix is calculated
to the existing data. `n_features` must match
either the ambient or PCA dimensions
Returns
-------
Y_transform : array-like, [n_samples_y, n_features or n_pca]
Transition matrix from `Y` to `self.data`
"""
if transitions is None and Y is None:
# assume Y is self.data and use standard landmark transitions
transitions = self.transitions
return super().interpolate(transform, transitions=transitions, Y=Y) |
async def select_page(self, info: SQLQueryInfo, size=1, page=1) -> Tuple[Tuple[DataRecord, ...], int]:
"""
Select from database
:param info:
:param size: -1 means infinite
:param page:
:param need_count: if True, get count as second return value, otherwise -1
:return: records. count
"""
raise NotImplementedError() | Select from database
:param info:
:param size: -1 means infinite
:param page:
:param need_count: if True, get count as second return value, otherwise -1
:return: records. count | Below is the the instruction that describes the task:
### Input:
Select from database
:param info:
:param size: -1 means infinite
:param page:
:param need_count: if True, get count as second return value, otherwise -1
:return: records. count
### Response:
async def select_page(self, info: SQLQueryInfo, size=1, page=1) -> Tuple[Tuple[DataRecord, ...], int]:
"""
Select from database
:param info:
:param size: -1 means infinite
:param page:
:param need_count: if True, get count as second return value, otherwise -1
:return: records. count
"""
raise NotImplementedError() |
def touched_files(self, parent):
"""
:API: public
"""
try:
return self._scm.changed_files(from_commit=parent,
include_untracked=True,
relative_to=get_buildroot())
except Scm.ScmException as e:
raise self.WorkspaceError("Problem detecting changed files.", e) | :API: public | Below is the the instruction that describes the task:
### Input:
:API: public
### Response:
def touched_files(self, parent):
"""
:API: public
"""
try:
return self._scm.changed_files(from_commit=parent,
include_untracked=True,
relative_to=get_buildroot())
except Scm.ScmException as e:
raise self.WorkspaceError("Problem detecting changed files.", e) |
def get_args(self, state, is_fp=None, sizes=None, stack_base=None):
"""
`is_fp` should be a list of booleans specifying whether each corresponding argument is floating-point -
True for fp and False for int. For a shorthand to assume that all the parameters are int, pass the number of
parameters as an int.
If you've customized this CC, you may omit this parameter entirely. If it is provided, it is used for
sanity-checking.
`sizes` is an optional list of argument sizes, in bytes. Be careful about using this if you've made explicit
the arg locations, since it might decide to combine two locations into one if an arg is too big.
`stack_base` is an optional pointer to the top of the stack at the function start. If it is not
specified, use the current stack pointer.
Returns a list of bitvector expressions representing the arguments of a function.
"""
if sizes is None and self.func_ty is not None:
sizes = [arg.size for arg in self.func_ty.args]
if is_fp is None:
if self.args is None:
if self.func_ty is None:
raise ValueError("You must either customize this CC or pass a value to is_fp!")
else:
arg_locs = self.arg_locs([False]*len(self.func_ty.args))
else:
arg_locs = self.args
elif type(is_fp) is int:
if self.args is not None and len(self.args) != is_fp:
raise ValueError("Bad number of args requested: got %d, expected %d" % (is_fp, len(self.args)))
arg_locs = self.arg_locs([False]*is_fp, sizes)
else:
arg_locs = self.arg_locs(is_fp, sizes)
return [loc.get_value(state, stack_base=stack_base) for loc in arg_locs] | `is_fp` should be a list of booleans specifying whether each corresponding argument is floating-point -
True for fp and False for int. For a shorthand to assume that all the parameters are int, pass the number of
parameters as an int.
If you've customized this CC, you may omit this parameter entirely. If it is provided, it is used for
sanity-checking.
`sizes` is an optional list of argument sizes, in bytes. Be careful about using this if you've made explicit
the arg locations, since it might decide to combine two locations into one if an arg is too big.
`stack_base` is an optional pointer to the top of the stack at the function start. If it is not
specified, use the current stack pointer.
Returns a list of bitvector expressions representing the arguments of a function. | Below is the the instruction that describes the task:
### Input:
`is_fp` should be a list of booleans specifying whether each corresponding argument is floating-point -
True for fp and False for int. For a shorthand to assume that all the parameters are int, pass the number of
parameters as an int.
If you've customized this CC, you may omit this parameter entirely. If it is provided, it is used for
sanity-checking.
`sizes` is an optional list of argument sizes, in bytes. Be careful about using this if you've made explicit
the arg locations, since it might decide to combine two locations into one if an arg is too big.
`stack_base` is an optional pointer to the top of the stack at the function start. If it is not
specified, use the current stack pointer.
Returns a list of bitvector expressions representing the arguments of a function.
### Response:
def get_args(self, state, is_fp=None, sizes=None, stack_base=None):
"""
`is_fp` should be a list of booleans specifying whether each corresponding argument is floating-point -
True for fp and False for int. For a shorthand to assume that all the parameters are int, pass the number of
parameters as an int.
If you've customized this CC, you may omit this parameter entirely. If it is provided, it is used for
sanity-checking.
`sizes` is an optional list of argument sizes, in bytes. Be careful about using this if you've made explicit
the arg locations, since it might decide to combine two locations into one if an arg is too big.
`stack_base` is an optional pointer to the top of the stack at the function start. If it is not
specified, use the current stack pointer.
Returns a list of bitvector expressions representing the arguments of a function.
"""
if sizes is None and self.func_ty is not None:
sizes = [arg.size for arg in self.func_ty.args]
if is_fp is None:
if self.args is None:
if self.func_ty is None:
raise ValueError("You must either customize this CC or pass a value to is_fp!")
else:
arg_locs = self.arg_locs([False]*len(self.func_ty.args))
else:
arg_locs = self.args
elif type(is_fp) is int:
if self.args is not None and len(self.args) != is_fp:
raise ValueError("Bad number of args requested: got %d, expected %d" % (is_fp, len(self.args)))
arg_locs = self.arg_locs([False]*is_fp, sizes)
else:
arg_locs = self.arg_locs(is_fp, sizes)
return [loc.get_value(state, stack_base=stack_base) for loc in arg_locs] |
def display_name(name, obj, local):
"""
Get the display name of an object.
Keyword arguments (all required):
* ``name`` -- the name of the object as a string.
* ``obj`` -- the object itself.
* ``local`` -- a boolean value indicating whether the object is in local
scope or owned by an object.
"""
prefix = '' if local else '.'
if isinstance(obj, SeeError):
suffix = '?'
elif hasattr(obj, '__call__'):
suffix = '()'
else:
suffix = ''
return ''.join((prefix, name, suffix)) | Get the display name of an object.
Keyword arguments (all required):
* ``name`` -- the name of the object as a string.
* ``obj`` -- the object itself.
* ``local`` -- a boolean value indicating whether the object is in local
scope or owned by an object. | Below is the the instruction that describes the task:
### Input:
Get the display name of an object.
Keyword arguments (all required):
* ``name`` -- the name of the object as a string.
* ``obj`` -- the object itself.
* ``local`` -- a boolean value indicating whether the object is in local
scope or owned by an object.
### Response:
def display_name(name, obj, local):
"""
Get the display name of an object.
Keyword arguments (all required):
* ``name`` -- the name of the object as a string.
* ``obj`` -- the object itself.
* ``local`` -- a boolean value indicating whether the object is in local
scope or owned by an object.
"""
prefix = '' if local else '.'
if isinstance(obj, SeeError):
suffix = '?'
elif hasattr(obj, '__call__'):
suffix = '()'
else:
suffix = ''
return ''.join((prefix, name, suffix)) |
def mol_supplier(lines, no_halt, assign_descriptors):
"""Yields molecules generated from CTAB text
Args:
lines (iterable): CTAB text lines
no_halt (boolean):
True: shows warning messages for invalid format and go on.
False: throws an exception for it and stop parsing.
assign_descriptors (boolean):
if True, default descriptors are automatically assigned.
"""
def sdf_block(lns):
mol = []
opt = []
is_mol = True
for line in lns:
if line.startswith("$$$$"):
yield mol[:], opt[:]
is_mol = True
mol.clear()
opt.clear()
elif line.startswith("M END"):
is_mol = False
elif is_mol:
mol.append(line.rstrip())
else:
opt.append(line.rstrip())
if mol:
yield mol, opt
for i, (mol, opt) in enumerate(sdf_block(lines)):
try:
c = molecule(mol)
if assign_descriptors:
molutil.assign_descriptors(c)
except ValueError as err:
if no_halt:
print("Unsupported symbol: {} (#{} in v2000reader)".format(
err, i + 1))
c = molutil.null_molecule(assign_descriptors)
else:
raise ValueError("Unsupported symbol: {}".format(err))
except RuntimeError as err:
if no_halt:
print(
"Failed to minimize ring: {} (#{} in v2000reader)".format(
err, i + 1)
)
else:
raise RuntimeError("Failed to minimize ring: {}".format(err))
except:
if no_halt:
print("Unexpected error (#{} in v2000reader)".format(i + 1))
c = molutil.null_molecule(assign_descriptors)
c.data = optional_data(opt)
yield c
continue
else:
print(traceback.format_exc())
raise Exception("Unsupported Error")
c.data = optional_data(opt)
yield c | Yields molecules generated from CTAB text
Args:
lines (iterable): CTAB text lines
no_halt (boolean):
True: shows warning messages for invalid format and go on.
False: throws an exception for it and stop parsing.
assign_descriptors (boolean):
if True, default descriptors are automatically assigned. | Below is the the instruction that describes the task:
### Input:
Yields molecules generated from CTAB text
Args:
lines (iterable): CTAB text lines
no_halt (boolean):
True: shows warning messages for invalid format and go on.
False: throws an exception for it and stop parsing.
assign_descriptors (boolean):
if True, default descriptors are automatically assigned.
### Response:
def mol_supplier(lines, no_halt, assign_descriptors):
"""Yields molecules generated from CTAB text
Args:
lines (iterable): CTAB text lines
no_halt (boolean):
True: shows warning messages for invalid format and go on.
False: throws an exception for it and stop parsing.
assign_descriptors (boolean):
if True, default descriptors are automatically assigned.
"""
def sdf_block(lns):
mol = []
opt = []
is_mol = True
for line in lns:
if line.startswith("$$$$"):
yield mol[:], opt[:]
is_mol = True
mol.clear()
opt.clear()
elif line.startswith("M END"):
is_mol = False
elif is_mol:
mol.append(line.rstrip())
else:
opt.append(line.rstrip())
if mol:
yield mol, opt
for i, (mol, opt) in enumerate(sdf_block(lines)):
try:
c = molecule(mol)
if assign_descriptors:
molutil.assign_descriptors(c)
except ValueError as err:
if no_halt:
print("Unsupported symbol: {} (#{} in v2000reader)".format(
err, i + 1))
c = molutil.null_molecule(assign_descriptors)
else:
raise ValueError("Unsupported symbol: {}".format(err))
except RuntimeError as err:
if no_halt:
print(
"Failed to minimize ring: {} (#{} in v2000reader)".format(
err, i + 1)
)
else:
raise RuntimeError("Failed to minimize ring: {}".format(err))
except:
if no_halt:
print("Unexpected error (#{} in v2000reader)".format(i + 1))
c = molutil.null_molecule(assign_descriptors)
c.data = optional_data(opt)
yield c
continue
else:
print(traceback.format_exc())
raise Exception("Unsupported Error")
c.data = optional_data(opt)
yield c |
def V_horiz_ellipsoidal(D, L, a, h, headonly=False):
r'''Calculates volume of a tank with ellipsoidal ends, according to [1]_.
.. math::
V_f = A_fL + \pi a h^2\left(1 - \frac{h}{3R}\right)
.. math::
Af = R^2\cos^{-1}\frac{R-h}{R} - (R-h)\sqrt{2Rh - h^2}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
L : float
Length of the main cylindrical section, [m]
a : float
Distance the ellipsoidal head extends on one side, [m]
h : float
Height, as measured up to where the fluid ends, [m]
headonly : bool, optional
Function returns only the volume of a single head side if True
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_horiz_ellipsoidal(D=108, L=156, a=42, h=36)/231.
2380.9565415578145
References
----------
.. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015.
http://www.webcalc.com.br/blog/Tank_Volume.PDF'''
R = 0.5*D
Af = R*R*acos((R-h)/R) - (R-h)*(2*R*h - h*h)**0.5
Vf = pi*a*h*h*(1 - h/(3.*R))
if headonly:
Vf = 0.5*Vf
else:
Vf += Af*L
return Vf | r'''Calculates volume of a tank with ellipsoidal ends, according to [1]_.
.. math::
V_f = A_fL + \pi a h^2\left(1 - \frac{h}{3R}\right)
.. math::
Af = R^2\cos^{-1}\frac{R-h}{R} - (R-h)\sqrt{2Rh - h^2}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
L : float
Length of the main cylindrical section, [m]
a : float
Distance the ellipsoidal head extends on one side, [m]
h : float
Height, as measured up to where the fluid ends, [m]
headonly : bool, optional
Function returns only the volume of a single head side if True
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_horiz_ellipsoidal(D=108, L=156, a=42, h=36)/231.
2380.9565415578145
References
----------
.. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015.
http://www.webcalc.com.br/blog/Tank_Volume.PDF | Below is the the instruction that describes the task:
### Input:
r'''Calculates volume of a tank with ellipsoidal ends, according to [1]_.
.. math::
V_f = A_fL + \pi a h^2\left(1 - \frac{h}{3R}\right)
.. math::
Af = R^2\cos^{-1}\frac{R-h}{R} - (R-h)\sqrt{2Rh - h^2}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
L : float
Length of the main cylindrical section, [m]
a : float
Distance the ellipsoidal head extends on one side, [m]
h : float
Height, as measured up to where the fluid ends, [m]
headonly : bool, optional
Function returns only the volume of a single head side if True
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_horiz_ellipsoidal(D=108, L=156, a=42, h=36)/231.
2380.9565415578145
References
----------
.. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015.
http://www.webcalc.com.br/blog/Tank_Volume.PDF
### Response:
def V_horiz_ellipsoidal(D, L, a, h, headonly=False):
r'''Calculates volume of a tank with ellipsoidal ends, according to [1]_.
.. math::
V_f = A_fL + \pi a h^2\left(1 - \frac{h}{3R}\right)
.. math::
Af = R^2\cos^{-1}\frac{R-h}{R} - (R-h)\sqrt{2Rh - h^2}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
L : float
Length of the main cylindrical section, [m]
a : float
Distance the ellipsoidal head extends on one side, [m]
h : float
Height, as measured up to where the fluid ends, [m]
headonly : bool, optional
Function returns only the volume of a single head side if True
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_horiz_ellipsoidal(D=108, L=156, a=42, h=36)/231.
2380.9565415578145
References
----------
.. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015.
http://www.webcalc.com.br/blog/Tank_Volume.PDF'''
R = 0.5*D
Af = R*R*acos((R-h)/R) - (R-h)*(2*R*h - h*h)**0.5
Vf = pi*a*h*h*(1 - h/(3.*R))
if headonly:
Vf = 0.5*Vf
else:
Vf += Af*L
return Vf |
def categorical_df_concat(df_list, inplace=False):
"""
Prepare list of pandas DataFrames to be used as input to pd.concat.
Ensure any columns of type 'category' have the same categories across each
dataframe.
Parameters
----------
df_list : list
List of dataframes with same columns.
inplace : bool
True if input list can be modified. Default is False.
Returns
-------
concatenated : df
Dataframe of concatenated list.
"""
if not inplace:
df_list = deepcopy(df_list)
# Assert each dataframe has the same columns/dtypes
df = df_list[0]
if not all([(df.dtypes.equals(df_i.dtypes)) for df_i in df_list[1:]]):
raise ValueError("Input DataFrames must have the same columns/dtypes.")
categorical_columns = df.columns[df.dtypes == 'category']
for col in categorical_columns:
new_categories = sorted(
set().union(
*(frame[col].cat.categories for frame in df_list)
)
)
with ignore_pandas_nan_categorical_warning():
for df in df_list:
df[col].cat.set_categories(new_categories, inplace=True)
return pd.concat(df_list) | Prepare list of pandas DataFrames to be used as input to pd.concat.
Ensure any columns of type 'category' have the same categories across each
dataframe.
Parameters
----------
df_list : list
List of dataframes with same columns.
inplace : bool
True if input list can be modified. Default is False.
Returns
-------
concatenated : df
Dataframe of concatenated list. | Below is the the instruction that describes the task:
### Input:
Prepare list of pandas DataFrames to be used as input to pd.concat.
Ensure any columns of type 'category' have the same categories across each
dataframe.
Parameters
----------
df_list : list
List of dataframes with same columns.
inplace : bool
True if input list can be modified. Default is False.
Returns
-------
concatenated : df
Dataframe of concatenated list.
### Response:
def categorical_df_concat(df_list, inplace=False):
"""
Prepare list of pandas DataFrames to be used as input to pd.concat.
Ensure any columns of type 'category' have the same categories across each
dataframe.
Parameters
----------
df_list : list
List of dataframes with same columns.
inplace : bool
True if input list can be modified. Default is False.
Returns
-------
concatenated : df
Dataframe of concatenated list.
"""
if not inplace:
df_list = deepcopy(df_list)
# Assert each dataframe has the same columns/dtypes
df = df_list[0]
if not all([(df.dtypes.equals(df_i.dtypes)) for df_i in df_list[1:]]):
raise ValueError("Input DataFrames must have the same columns/dtypes.")
categorical_columns = df.columns[df.dtypes == 'category']
for col in categorical_columns:
new_categories = sorted(
set().union(
*(frame[col].cat.categories for frame in df_list)
)
)
with ignore_pandas_nan_categorical_warning():
for df in df_list:
df[col].cat.set_categories(new_categories, inplace=True)
return pd.concat(df_list) |
def update(self, changed_state_model=None, with_expand=False):
"""Checks if all states are in tree and if tree has states which were deleted
:param changed_state_model: Model that row has to be updated
:param with_expand: The expand flag for the tree
"""
if not self.view_is_registered:
return
# define initial state-model for update
if changed_state_model is None:
# reset all
parent_row_iter = None
self.state_row_iter_dict_by_state_path.clear()
self.tree_store.clear()
if self._selected_sm_model:
changed_state_model = self._selected_sm_model.root_state
else:
return
else: # pick
if changed_state_model.state.is_root_state:
parent_row_iter = self.state_row_iter_dict_by_state_path[changed_state_model.state.get_path()]
else:
if changed_state_model.state.is_root_state_of_library:
# because either lib-state or lib-state-root is in tree the next higher hierarchy state is updated
changed_upper_state_m = changed_state_model.parent.parent
else:
changed_upper_state_m = changed_state_model.parent
# TODO check the work around of the next 2 lines while refactoring -> it is a check to be more robust
while changed_upper_state_m.state.get_path() not in self.state_row_iter_dict_by_state_path:
# show Warning because because avoided method states_update
logger.warning("Take a parent state because this is not in.")
changed_upper_state_m = changed_upper_state_m.parent
parent_row_iter = self.state_row_iter_dict_by_state_path[changed_upper_state_m.state.get_path()]
# do recursive update
self.insert_and_update_recursively(parent_row_iter, changed_state_model, with_expand) | Checks if all states are in tree and if tree has states which were deleted
:param changed_state_model: Model that row has to be updated
:param with_expand: The expand flag for the tree | Below is the the instruction that describes the task:
### Input:
Checks if all states are in tree and if tree has states which were deleted
:param changed_state_model: Model that row has to be updated
:param with_expand: The expand flag for the tree
### Response:
def update(self, changed_state_model=None, with_expand=False):
"""Checks if all states are in tree and if tree has states which were deleted
:param changed_state_model: Model that row has to be updated
:param with_expand: The expand flag for the tree
"""
if not self.view_is_registered:
return
# define initial state-model for update
if changed_state_model is None:
# reset all
parent_row_iter = None
self.state_row_iter_dict_by_state_path.clear()
self.tree_store.clear()
if self._selected_sm_model:
changed_state_model = self._selected_sm_model.root_state
else:
return
else: # pick
if changed_state_model.state.is_root_state:
parent_row_iter = self.state_row_iter_dict_by_state_path[changed_state_model.state.get_path()]
else:
if changed_state_model.state.is_root_state_of_library:
# because either lib-state or lib-state-root is in tree the next higher hierarchy state is updated
changed_upper_state_m = changed_state_model.parent.parent
else:
changed_upper_state_m = changed_state_model.parent
# TODO check the work around of the next 2 lines while refactoring -> it is a check to be more robust
while changed_upper_state_m.state.get_path() not in self.state_row_iter_dict_by_state_path:
# show Warning because because avoided method states_update
logger.warning("Take a parent state because this is not in.")
changed_upper_state_m = changed_upper_state_m.parent
parent_row_iter = self.state_row_iter_dict_by_state_path[changed_upper_state_m.state.get_path()]
# do recursive update
self.insert_and_update_recursively(parent_row_iter, changed_state_model, with_expand) |
def _refresh_hierarchy_recursive(self, cached_hierarchy, file_hierarchy):
"""Recursively goes through given corresponding hierarchies from cache and filesystem
and adds/refreshes/removes added/changed/removed assistants.
Args:
cached_hierarchy: the respective hierarchy part from current cache
(for format see Cache class docstring)
file_hierarchy: the respective hierarchy part from filesystem
(for format see what refresh_role accepts)
Returns:
True if self.cache has been changed, False otherwise (doesn't write anything
to cache file)
"""
was_change = False
cached_ass = set(cached_hierarchy.keys())
new_ass = set(file_hierarchy.keys())
to_add = new_ass - cached_ass
to_remove = cached_ass - new_ass
to_check = cached_ass - to_remove
if to_add or to_remove:
was_change = True
for ass in to_add:
cached_hierarchy[ass] = self._new_ass_hierarchy(file_hierarchy[ass])
for ass in to_remove:
del cached_hierarchy[ass]
for ass in to_check:
needs_refresh = False
try:
needs_refresh = self._ass_needs_refresh(cached_hierarchy[ass], file_hierarchy[ass])
except:
needs_refresh = True
if needs_refresh:
self._ass_refresh_attrs(cached_hierarchy[ass], file_hierarchy[ass])
was_change = True
was_change |= self._refresh_hierarchy_recursive(
cached_hierarchy[ass]['subhierarchy'],
file_hierarchy[ass]['subhierarchy'])
return was_change | Recursively goes through given corresponding hierarchies from cache and filesystem
and adds/refreshes/removes added/changed/removed assistants.
Args:
cached_hierarchy: the respective hierarchy part from current cache
(for format see Cache class docstring)
file_hierarchy: the respective hierarchy part from filesystem
(for format see what refresh_role accepts)
Returns:
True if self.cache has been changed, False otherwise (doesn't write anything
to cache file) | Below is the the instruction that describes the task:
### Input:
Recursively goes through given corresponding hierarchies from cache and filesystem
and adds/refreshes/removes added/changed/removed assistants.
Args:
cached_hierarchy: the respective hierarchy part from current cache
(for format see Cache class docstring)
file_hierarchy: the respective hierarchy part from filesystem
(for format see what refresh_role accepts)
Returns:
True if self.cache has been changed, False otherwise (doesn't write anything
to cache file)
### Response:
def _refresh_hierarchy_recursive(self, cached_hierarchy, file_hierarchy):
"""Recursively goes through given corresponding hierarchies from cache and filesystem
and adds/refreshes/removes added/changed/removed assistants.
Args:
cached_hierarchy: the respective hierarchy part from current cache
(for format see Cache class docstring)
file_hierarchy: the respective hierarchy part from filesystem
(for format see what refresh_role accepts)
Returns:
True if self.cache has been changed, False otherwise (doesn't write anything
to cache file)
"""
was_change = False
cached_ass = set(cached_hierarchy.keys())
new_ass = set(file_hierarchy.keys())
to_add = new_ass - cached_ass
to_remove = cached_ass - new_ass
to_check = cached_ass - to_remove
if to_add or to_remove:
was_change = True
for ass in to_add:
cached_hierarchy[ass] = self._new_ass_hierarchy(file_hierarchy[ass])
for ass in to_remove:
del cached_hierarchy[ass]
for ass in to_check:
needs_refresh = False
try:
needs_refresh = self._ass_needs_refresh(cached_hierarchy[ass], file_hierarchy[ass])
except:
needs_refresh = True
if needs_refresh:
self._ass_refresh_attrs(cached_hierarchy[ass], file_hierarchy[ass])
was_change = True
was_change |= self._refresh_hierarchy_recursive(
cached_hierarchy[ass]['subhierarchy'],
file_hierarchy[ass]['subhierarchy'])
return was_change |
def _get_adv_trans_stats(self, cmd, return_tdo=False):
"""Utility function to fetch the transfer statistics for the last
advanced transfer. Checking the stats appears to sync the
controller. For details on the advanced transfer please refer
to the documentation at
http://diamondman.github.io/Adapt/cable_digilent_adept.html#bulk-requests
"""
t = time()
code, res = self.bulkCommand(b'\x03\x02%c\x00'%(0x80|cmd), 10)
if self._scanchain and self._scanchain._print_statistics:
print("GET STATS TIME", time()-t)#pragma: no cover
if len(res) == 4:
count = struct.unpack('<I', res)[0]
return count
elif len(res) == 8:
written, read = struct.unpack('<II', res)
return written, read
return res | Utility function to fetch the transfer statistics for the last
advanced transfer. Checking the stats appears to sync the
controller. For details on the advanced transfer please refer
to the documentation at
http://diamondman.github.io/Adapt/cable_digilent_adept.html#bulk-requests | Below is the the instruction that describes the task:
### Input:
Utility function to fetch the transfer statistics for the last
advanced transfer. Checking the stats appears to sync the
controller. For details on the advanced transfer please refer
to the documentation at
http://diamondman.github.io/Adapt/cable_digilent_adept.html#bulk-requests
### Response:
def _get_adv_trans_stats(self, cmd, return_tdo=False):
"""Utility function to fetch the transfer statistics for the last
advanced transfer. Checking the stats appears to sync the
controller. For details on the advanced transfer please refer
to the documentation at
http://diamondman.github.io/Adapt/cable_digilent_adept.html#bulk-requests
"""
t = time()
code, res = self.bulkCommand(b'\x03\x02%c\x00'%(0x80|cmd), 10)
if self._scanchain and self._scanchain._print_statistics:
print("GET STATS TIME", time()-t)#pragma: no cover
if len(res) == 4:
count = struct.unpack('<I', res)[0]
return count
elif len(res) == 8:
written, read = struct.unpack('<II', res)
return written, read
return res |
def pending_settings(self):
"""Property to provide reference to bios_pending_settings instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
return BIOSPendingSettings(
self._conn, utils.get_subresource_path_by(
self, ["@Redfish.Settings", "SettingsObject"]),
redfish_version=self.redfish_version) | Property to provide reference to bios_pending_settings instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset. | Below is the the instruction that describes the task:
### Input:
Property to provide reference to bios_pending_settings instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
### Response:
def pending_settings(self):
"""Property to provide reference to bios_pending_settings instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
return BIOSPendingSettings(
self._conn, utils.get_subresource_path_by(
self, ["@Redfish.Settings", "SettingsObject"]),
redfish_version=self.redfish_version) |
def clear(self):
"Remove all items and reset internal structures"
dict.clear(self)
self._key = 0
if hasattr(self._list_view, "wx_obj"):
self._list_view.wx_obj.DeleteAllItems() | Remove all items and reset internal structures | Below is the the instruction that describes the task:
### Input:
Remove all items and reset internal structures
### Response:
def clear(self):
"Remove all items and reset internal structures"
dict.clear(self)
self._key = 0
if hasattr(self._list_view, "wx_obj"):
self._list_view.wx_obj.DeleteAllItems() |
def GetMemBalloonMaxMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemBalloonMaxMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value | Undocumented. | Below is the the instruction that describes the task:
### Input:
Undocumented.
### Response:
def GetMemBalloonMaxMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemBalloonMaxMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
def unique(list1, list2):
"""
Get unique items in list1 that are not in list2
:return: Unique items only in list 1
"""
set2 = set(list2)
list1_unique = [x for x in tqdm(list1, desc='Unique', total=len(list1)) if x not in set2]
return list1_unique | Get unique items in list1 that are not in list2
:return: Unique items only in list 1 | Below is the the instruction that describes the task:
### Input:
Get unique items in list1 that are not in list2
:return: Unique items only in list 1
### Response:
def unique(list1, list2):
"""
Get unique items in list1 that are not in list2
:return: Unique items only in list 1
"""
set2 = set(list2)
list1_unique = [x for x in tqdm(list1, desc='Unique', total=len(list1)) if x not in set2]
return list1_unique |
def get_project_root():
""" Determine location of `tasks.py`."""
try:
tasks_py = sys.modules['tasks']
except KeyError:
return None
else:
return os.path.abspath(os.path.dirname(tasks_py.__file__)) | Determine location of `tasks.py`. | Below is the the instruction that describes the task:
### Input:
Determine location of `tasks.py`.
### Response:
def get_project_root():
""" Determine location of `tasks.py`."""
try:
tasks_py = sys.modules['tasks']
except KeyError:
return None
else:
return os.path.abspath(os.path.dirname(tasks_py.__file__)) |
def getDevStats(self, dev, devtype = None):
"""Returns I/O stats for block device.
@param dev: Device name
@param devtype: Device type. (Ignored if None.)
@return: Dict of stats.
"""
if devtype is not None:
if self._devClassTree is None:
self._initDevClasses()
if devtype <> self._mapDevType.get(dev):
return None
return self._diskStats.get(dev) | Returns I/O stats for block device.
@param dev: Device name
@param devtype: Device type. (Ignored if None.)
@return: Dict of stats. | Below is the the instruction that describes the task:
### Input:
Returns I/O stats for block device.
@param dev: Device name
@param devtype: Device type. (Ignored if None.)
@return: Dict of stats.
### Response:
def getDevStats(self, dev, devtype = None):
"""Returns I/O stats for block device.
@param dev: Device name
@param devtype: Device type. (Ignored if None.)
@return: Dict of stats.
"""
if devtype is not None:
if self._devClassTree is None:
self._initDevClasses()
if devtype <> self._mapDevType.get(dev):
return None
return self._diskStats.get(dev) |
def stop(self):
"""
Stop the sensor server (soft stop - signal packet loop to stop)
Warning: Is non blocking (server might still do something after this!)
:rtype: None
"""
self.debug("()")
super(SensorServer, self).stop()
# No new clients
if self._multicast_socket is not None:
self._shutdown_multicast_socket()
# Signal packet loop to shutdown
self._is_stopped.set() | Stop the sensor server (soft stop - signal packet loop to stop)
Warning: Is non blocking (server might still do something after this!)
:rtype: None | Below is the the instruction that describes the task:
### Input:
Stop the sensor server (soft stop - signal packet loop to stop)
Warning: Is non blocking (server might still do something after this!)
:rtype: None
### Response:
def stop(self):
"""
Stop the sensor server (soft stop - signal packet loop to stop)
Warning: Is non blocking (server might still do something after this!)
:rtype: None
"""
self.debug("()")
super(SensorServer, self).stop()
# No new clients
if self._multicast_socket is not None:
self._shutdown_multicast_socket()
# Signal packet loop to shutdown
self._is_stopped.set() |
def requeue(self, message_id, timeout=0, backoff=True):
"""Re-queue a message (indicate failure to process)."""
self.send(nsq.requeue(message_id, timeout))
self.finish_inflight()
self.on_requeue.send(
self,
message_id=message_id,
timeout=timeout,
backoff=backoff
) | Re-queue a message (indicate failure to process). | Below is the the instruction that describes the task:
### Input:
Re-queue a message (indicate failure to process).
### Response:
def requeue(self, message_id, timeout=0, backoff=True):
"""Re-queue a message (indicate failure to process)."""
self.send(nsq.requeue(message_id, timeout))
self.finish_inflight()
self.on_requeue.send(
self,
message_id=message_id,
timeout=timeout,
backoff=backoff
) |
def get_codec_info(cls):
"""
Returns information used by the codecs library to configure the
codec for use.
"""
codec = cls()
codec_info = {
'encode': codec.encode,
'decode': codec.decode,
}
# In Python 2, all codecs are made equal.
# In Python 3, some codecs are more equal than others.
if PY3:
codec_info['_is_text_encoding'] = False
return CodecInfo(**codec_info) | Returns information used by the codecs library to configure the
codec for use. | Below is the the instruction that describes the task:
### Input:
Returns information used by the codecs library to configure the
codec for use.
### Response:
def get_codec_info(cls):
"""
Returns information used by the codecs library to configure the
codec for use.
"""
codec = cls()
codec_info = {
'encode': codec.encode,
'decode': codec.decode,
}
# In Python 2, all codecs are made equal.
# In Python 3, some codecs are more equal than others.
if PY3:
codec_info['_is_text_encoding'] = False
return CodecInfo(**codec_info) |
def _expand_placeholder_value(value):
"""
Return the SQL string representation of the specified placeholder's
value.
@param value: the value of a placeholder such as a simple element, a
list, or a tuple of one string.
@note: by convention, a tuple of one string indicates that this string
MUST not be quoted as it represents, for instance, a called to
a stored procedure, and not a textual content to modify into a
table.
@return: a SQL string representation.
"""
if isinstance(value, (list, set)) or (isinstance(value, tuple) and len(value) != 1):
sql_value = ','.join( [ RdbmsConnection._to_sql_value(
element if not isinstance(element, tuple) else element[0],
noquote=isinstance(element, tuple))
for element in value ])
elif isinstance(value, tuple):
assert len(value) == 1
value = value[0]
assert value is None or isinstance(value, basestring), 'basestring expected instead of %s' % type(value)
sql_value = RdbmsConnection._to_sql_value(value, True)
else:
sql_value = RdbmsConnection._to_sql_value(value)
return sql_value | Return the SQL string representation of the specified placeholder's
value.
@param value: the value of a placeholder such as a simple element, a
list, or a tuple of one string.
@note: by convention, a tuple of one string indicates that this string
MUST not be quoted as it represents, for instance, a called to
a stored procedure, and not a textual content to modify into a
table.
@return: a SQL string representation. | Below is the the instruction that describes the task:
### Input:
Return the SQL string representation of the specified placeholder's
value.
@param value: the value of a placeholder such as a simple element, a
list, or a tuple of one string.
@note: by convention, a tuple of one string indicates that this string
MUST not be quoted as it represents, for instance, a called to
a stored procedure, and not a textual content to modify into a
table.
@return: a SQL string representation.
### Response:
def _expand_placeholder_value(value):
"""
Return the SQL string representation of the specified placeholder's
value.
@param value: the value of a placeholder such as a simple element, a
list, or a tuple of one string.
@note: by convention, a tuple of one string indicates that this string
MUST not be quoted as it represents, for instance, a called to
a stored procedure, and not a textual content to modify into a
table.
@return: a SQL string representation.
"""
if isinstance(value, (list, set)) or (isinstance(value, tuple) and len(value) != 1):
sql_value = ','.join( [ RdbmsConnection._to_sql_value(
element if not isinstance(element, tuple) else element[0],
noquote=isinstance(element, tuple))
for element in value ])
elif isinstance(value, tuple):
assert len(value) == 1
value = value[0]
assert value is None or isinstance(value, basestring), 'basestring expected instead of %s' % type(value)
sql_value = RdbmsConnection._to_sql_value(value, True)
else:
sql_value = RdbmsConnection._to_sql_value(value)
return sql_value |
def subCell2DCoords(*args, **kwargs):
'''Same as subCell2DSlices but returning coordinates
Example:
g = subCell2DCoords(arr, shape)
for x, y in g:
plt.plot(x, y)
'''
for _, _, s0, s1 in subCell2DSlices(*args, **kwargs):
yield ((s1.start, s1.start, s1.stop),
(s0.start, s0.stop, s0.stop)) | Same as subCell2DSlices but returning coordinates
Example:
g = subCell2DCoords(arr, shape)
for x, y in g:
plt.plot(x, y) | Below is the the instruction that describes the task:
### Input:
Same as subCell2DSlices but returning coordinates
Example:
g = subCell2DCoords(arr, shape)
for x, y in g:
plt.plot(x, y)
### Response:
def subCell2DCoords(*args, **kwargs):
'''Same as subCell2DSlices but returning coordinates
Example:
g = subCell2DCoords(arr, shape)
for x, y in g:
plt.plot(x, y)
'''
for _, _, s0, s1 in subCell2DSlices(*args, **kwargs):
yield ((s1.start, s1.start, s1.stop),
(s0.start, s0.stop, s0.stop)) |
def add_blacklisted_filepaths(self, filepaths, remove_from_stored=True):
"""
Add `filepaths` to blacklisted filepaths.
If `remove_from_stored` is `True`, any `filepaths` in
`plugin_filepaths` will be automatically removed.
Recommend passing in absolute filepaths but method will attempt
to convert to absolute filepaths based on current working directory.
"""
filepaths = util.to_absolute_paths(filepaths)
self.blacklisted_filepaths.update(filepaths)
if remove_from_stored:
self.plugin_filepaths = util.remove_from_set(self.plugin_filepaths,
filepaths) | Add `filepaths` to blacklisted filepaths.
If `remove_from_stored` is `True`, any `filepaths` in
`plugin_filepaths` will be automatically removed.
Recommend passing in absolute filepaths but method will attempt
to convert to absolute filepaths based on current working directory. | Below is the the instruction that describes the task:
### Input:
Add `filepaths` to blacklisted filepaths.
If `remove_from_stored` is `True`, any `filepaths` in
`plugin_filepaths` will be automatically removed.
Recommend passing in absolute filepaths but method will attempt
to convert to absolute filepaths based on current working directory.
### Response:
def add_blacklisted_filepaths(self, filepaths, remove_from_stored=True):
"""
Add `filepaths` to blacklisted filepaths.
If `remove_from_stored` is `True`, any `filepaths` in
`plugin_filepaths` will be automatically removed.
Recommend passing in absolute filepaths but method will attempt
to convert to absolute filepaths based on current working directory.
"""
filepaths = util.to_absolute_paths(filepaths)
self.blacklisted_filepaths.update(filepaths)
if remove_from_stored:
self.plugin_filepaths = util.remove_from_set(self.plugin_filepaths,
filepaths) |
def _find_next_ready_node(self):
"""
Finds the next node that is ready to be built.
This is *the* main guts of the DAG walk. We loop through the
list of candidates, looking for something that has no un-built
children (i.e., that is a leaf Node or has dependencies that are
all leaf Nodes or up-to-date). Candidate Nodes are re-scanned
(both the target Node itself and its sources, which are always
scanned in the context of a given target) to discover implicit
dependencies. A Node that must wait for some children to be
built will be put back on the candidates list after the children
have finished building. A Node that has been put back on the
candidates list in this way may have itself (or its sources)
re-scanned, in order to handle generated header files (e.g.) and
the implicit dependencies therein.
Note that this method does not do any signature calculation or
up-to-date check itself. All of that is handled by the Task
class. This is purely concerned with the dependency graph walk.
"""
self.ready_exc = None
T = self.trace
if T: T.write(SCons.Util.UnicodeType('\n') + self.trace_message('Looking for a node to evaluate'))
while True:
node = self.next_candidate()
if node is None:
if T: T.write(self.trace_message('No candidate anymore.') + u'\n')
return None
node = node.disambiguate()
state = node.get_state()
# For debugging only:
#
# try:
# self._validate_pending_children()
# except:
# self.ready_exc = sys.exc_info()
# return node
if CollectStats:
if not hasattr(node.attributes, 'stats'):
node.attributes.stats = Stats()
StatsNodes.append(node)
S = node.attributes.stats
S.considered = S.considered + 1
else:
S = None
if T: T.write(self.trace_message(u' Considering node %s and its children:' % self.trace_node(node)))
if state == NODE_NO_STATE:
# Mark this node as being on the execution stack:
node.set_state(NODE_PENDING)
elif state > NODE_PENDING:
# Skip this node if it has already been evaluated:
if S: S.already_handled = S.already_handled + 1
if T: T.write(self.trace_message(u' already handled (executed)'))
continue
executor = node.get_executor()
try:
children = executor.get_all_children()
except SystemExit:
exc_value = sys.exc_info()[1]
e = SCons.Errors.ExplicitExit(node, exc_value.code)
self.ready_exc = (SCons.Errors.ExplicitExit, e)
if T: T.write(self.trace_message(' SystemExit'))
return node
except Exception as e:
# We had a problem just trying to figure out the
# children (like a child couldn't be linked in to a
# VariantDir, or a Scanner threw something). Arrange to
# raise the exception when the Task is "executed."
self.ready_exc = sys.exc_info()
if S: S.problem = S.problem + 1
if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e))
return node
children_not_visited = []
children_pending = set()
children_not_ready = []
children_failed = False
for child in chain(executor.get_all_prerequisites(), children):
childstate = child.get_state()
if T: T.write(self.trace_message(u' ' + self.trace_node(child)))
if childstate == NODE_NO_STATE:
children_not_visited.append(child)
elif childstate == NODE_PENDING:
children_pending.add(child)
elif childstate == NODE_FAILED:
children_failed = True
if childstate <= NODE_EXECUTING:
children_not_ready.append(child)
# These nodes have not even been visited yet. Add
# them to the list so that on some next pass we can
# take a stab at evaluating them (or their children).
children_not_visited.reverse()
self.candidates.extend(self.order(children_not_visited))
# if T and children_not_visited:
# T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited)))
# T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates)))
# Skip this node if any of its children have failed.
#
# This catches the case where we're descending a top-level
# target and one of our children failed while trying to be
# built by a *previous* descent of an earlier top-level
# target.
#
# It can also occur if a node is reused in multiple
# targets. One first descends though the one of the
# target, the next time occurs through the other target.
#
# Note that we can only have failed_children if the
# --keep-going flag was used, because without it the build
# will stop before diving in the other branch.
#
# Note that even if one of the children fails, we still
# added the other children to the list of candidate nodes
# to keep on building (--keep-going).
if children_failed:
for n in executor.get_action_targets():
n.set_state(NODE_FAILED)
if S: S.child_failed = S.child_failed + 1
if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
continue
if children_not_ready:
for child in children_not_ready:
# We're waiting on one or more derived targets
# that have not yet finished building.
if S: S.not_built = S.not_built + 1
# Add this node to the waiting parents lists of
# anything we're waiting on, with a reference
# count so we can be put back on the list for
# re-evaluation when they've all finished.
node.ref_count = node.ref_count + child.add_to_waiting_parents(node)
if T: T.write(self.trace_message(u' adjusted ref count: %s, child %s' %
(self.trace_node(node), repr(str(child)))))
if T:
for pc in children_pending:
T.write(self.trace_message(' adding %s to the pending children set\n' %
self.trace_node(pc)))
self.pending_children = self.pending_children | children_pending
continue
# Skip this node if it has side-effects that are
# currently being built:
wait_side_effects = False
for se in executor.get_action_side_effects():
if se.get_state() == NODE_EXECUTING:
se.add_to_waiting_s_e(node)
wait_side_effects = True
if wait_side_effects:
if S: S.side_effects = S.side_effects + 1
continue
# The default when we've gotten through all of the checks above:
# this node is ready to be built.
if S: S.build = S.build + 1
if T: T.write(self.trace_message(u'Evaluating %s\n' %
self.trace_node(node)))
# For debugging only:
#
# try:
# self._validate_pending_children()
# except:
# self.ready_exc = sys.exc_info()
# return node
return node
return None | Finds the next node that is ready to be built.
This is *the* main guts of the DAG walk. We loop through the
list of candidates, looking for something that has no un-built
children (i.e., that is a leaf Node or has dependencies that are
all leaf Nodes or up-to-date). Candidate Nodes are re-scanned
(both the target Node itself and its sources, which are always
scanned in the context of a given target) to discover implicit
dependencies. A Node that must wait for some children to be
built will be put back on the candidates list after the children
have finished building. A Node that has been put back on the
candidates list in this way may have itself (or its sources)
re-scanned, in order to handle generated header files (e.g.) and
the implicit dependencies therein.
Note that this method does not do any signature calculation or
up-to-date check itself. All of that is handled by the Task
class. This is purely concerned with the dependency graph walk. | Below is the the instruction that describes the task:
### Input:
Finds the next node that is ready to be built.
This is *the* main guts of the DAG walk. We loop through the
list of candidates, looking for something that has no un-built
children (i.e., that is a leaf Node or has dependencies that are
all leaf Nodes or up-to-date). Candidate Nodes are re-scanned
(both the target Node itself and its sources, which are always
scanned in the context of a given target) to discover implicit
dependencies. A Node that must wait for some children to be
built will be put back on the candidates list after the children
have finished building. A Node that has been put back on the
candidates list in this way may have itself (or its sources)
re-scanned, in order to handle generated header files (e.g.) and
the implicit dependencies therein.
Note that this method does not do any signature calculation or
up-to-date check itself. All of that is handled by the Task
class. This is purely concerned with the dependency graph walk.
### Response:
def _find_next_ready_node(self):
"""
Finds the next node that is ready to be built.
This is *the* main guts of the DAG walk. We loop through the
list of candidates, looking for something that has no un-built
children (i.e., that is a leaf Node or has dependencies that are
all leaf Nodes or up-to-date). Candidate Nodes are re-scanned
(both the target Node itself and its sources, which are always
scanned in the context of a given target) to discover implicit
dependencies. A Node that must wait for some children to be
built will be put back on the candidates list after the children
have finished building. A Node that has been put back on the
candidates list in this way may have itself (or its sources)
re-scanned, in order to handle generated header files (e.g.) and
the implicit dependencies therein.
Note that this method does not do any signature calculation or
up-to-date check itself. All of that is handled by the Task
class. This is purely concerned with the dependency graph walk.
"""
self.ready_exc = None
T = self.trace
if T: T.write(SCons.Util.UnicodeType('\n') + self.trace_message('Looking for a node to evaluate'))
while True:
node = self.next_candidate()
if node is None:
if T: T.write(self.trace_message('No candidate anymore.') + u'\n')
return None
node = node.disambiguate()
state = node.get_state()
# For debugging only:
#
# try:
# self._validate_pending_children()
# except:
# self.ready_exc = sys.exc_info()
# return node
if CollectStats:
if not hasattr(node.attributes, 'stats'):
node.attributes.stats = Stats()
StatsNodes.append(node)
S = node.attributes.stats
S.considered = S.considered + 1
else:
S = None
if T: T.write(self.trace_message(u' Considering node %s and its children:' % self.trace_node(node)))
if state == NODE_NO_STATE:
# Mark this node as being on the execution stack:
node.set_state(NODE_PENDING)
elif state > NODE_PENDING:
# Skip this node if it has already been evaluated:
if S: S.already_handled = S.already_handled + 1
if T: T.write(self.trace_message(u' already handled (executed)'))
continue
executor = node.get_executor()
try:
children = executor.get_all_children()
except SystemExit:
exc_value = sys.exc_info()[1]
e = SCons.Errors.ExplicitExit(node, exc_value.code)
self.ready_exc = (SCons.Errors.ExplicitExit, e)
if T: T.write(self.trace_message(' SystemExit'))
return node
except Exception as e:
# We had a problem just trying to figure out the
# children (like a child couldn't be linked in to a
# VariantDir, or a Scanner threw something). Arrange to
# raise the exception when the Task is "executed."
self.ready_exc = sys.exc_info()
if S: S.problem = S.problem + 1
if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e))
return node
children_not_visited = []
children_pending = set()
children_not_ready = []
children_failed = False
for child in chain(executor.get_all_prerequisites(), children):
childstate = child.get_state()
if T: T.write(self.trace_message(u' ' + self.trace_node(child)))
if childstate == NODE_NO_STATE:
children_not_visited.append(child)
elif childstate == NODE_PENDING:
children_pending.add(child)
elif childstate == NODE_FAILED:
children_failed = True
if childstate <= NODE_EXECUTING:
children_not_ready.append(child)
# These nodes have not even been visited yet. Add
# them to the list so that on some next pass we can
# take a stab at evaluating them (or their children).
children_not_visited.reverse()
self.candidates.extend(self.order(children_not_visited))
# if T and children_not_visited:
# T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited)))
# T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates)))
# Skip this node if any of its children have failed.
#
# This catches the case where we're descending a top-level
# target and one of our children failed while trying to be
# built by a *previous* descent of an earlier top-level
# target.
#
# It can also occur if a node is reused in multiple
# targets. One first descends though the one of the
# target, the next time occurs through the other target.
#
# Note that we can only have failed_children if the
# --keep-going flag was used, because without it the build
# will stop before diving in the other branch.
#
# Note that even if one of the children fails, we still
# added the other children to the list of candidate nodes
# to keep on building (--keep-going).
if children_failed:
for n in executor.get_action_targets():
n.set_state(NODE_FAILED)
if S: S.child_failed = S.child_failed + 1
if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
continue
if children_not_ready:
for child in children_not_ready:
# We're waiting on one or more derived targets
# that have not yet finished building.
if S: S.not_built = S.not_built + 1
# Add this node to the waiting parents lists of
# anything we're waiting on, with a reference
# count so we can be put back on the list for
# re-evaluation when they've all finished.
node.ref_count = node.ref_count + child.add_to_waiting_parents(node)
if T: T.write(self.trace_message(u' adjusted ref count: %s, child %s' %
(self.trace_node(node), repr(str(child)))))
if T:
for pc in children_pending:
T.write(self.trace_message(' adding %s to the pending children set\n' %
self.trace_node(pc)))
self.pending_children = self.pending_children | children_pending
continue
# Skip this node if it has side-effects that are
# currently being built:
wait_side_effects = False
for se in executor.get_action_side_effects():
if se.get_state() == NODE_EXECUTING:
se.add_to_waiting_s_e(node)
wait_side_effects = True
if wait_side_effects:
if S: S.side_effects = S.side_effects + 1
continue
# The default when we've gotten through all of the checks above:
# this node is ready to be built.
if S: S.build = S.build + 1
if T: T.write(self.trace_message(u'Evaluating %s\n' %
self.trace_node(node)))
# For debugging only:
#
# try:
# self._validate_pending_children()
# except:
# self.ready_exc = sys.exc_info()
# return node
return node
return None |
def datetime(value,
allow_empty = False,
minimum = None,
maximum = None,
coerce_value = True,
**kwargs):
"""Validate that ``value`` is a valid datetime.
.. caution::
If supplying a string, the string needs to be in an ISO 8601-format to pass
validation. If it is not in an ISO 8601-format, validation will fail.
:param value: The value to validate.
:type value: :class:`str <python:str>` / :class:`datetime <python:datetime.datetime>`
/ :class:`date <python:datetime.date>` / :obj:`None <python:None>`
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:param minimum: If supplied, will make sure that ``value`` is on or after this value.
:type minimum: :class:`datetime <python:datetime.datetime>` /
:class:`date <python:datetime.date>` / compliant :class:`str <python:str>` /
:obj:`None <python:None>`
:param maximum: If supplied, will make sure that ``value`` is on or before this
value.
:type maximum: :class:`datetime <python:datetime.datetime>` /
:class:`date <python:datetime.date>` / compliant :class:`str <python:str>` /
:obj:`None <python:None>`
:param coerce_value: If ``True``, will coerce dates to
:class:`datetime <python:datetime.datetime>` objects with times of 00:00:00. If ``False``, will error
if ``value`` is not an unambiguous timestamp. Defaults to ``True``.
:type coerce_value: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:rtype: :class:`datetime <python:datetime.datetime>` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises CannotCoerceError: if ``value`` cannot be coerced to a
:class:`datetime <python:datetime.datetime>` value and is not
:obj:`None <python:None>`
:raises MinimumValueError: if ``minimum`` is supplied but ``value`` occurs
before ``minimum``
:raises MaximumValueError: if ``maximum`` is supplied but ``value`` occurs
after ``minimum``
"""
# pylint: disable=too-many-branches
if not value and not allow_empty:
raise errors.EmptyValueError('value (%s) was empty' % value)
elif not value:
return None
minimum = datetime(minimum, allow_empty = True, force_run = True) # pylint: disable=E1123
maximum = datetime(maximum, allow_empty = True, force_run = True) # pylint: disable=E1123
if not isinstance(value, datetime_types):
raise errors.CannotCoerceError(
'value (%s) must be a date object, datetime object, '
'ISO 8601-formatted string, '
'or POSIX timestamp, but was %s' % (value,
type(value))
)
elif isinstance(value, timestamp_types) and coerce_value:
try:
value = datetime_.datetime.fromtimestamp(value)
except ValueError:
raise errors.CannotCoerceError(
'value (%s) must be a date object, datetime object, '
'ISO 8601-formatted string, '
'or POSIX timestamp, but was %s' % (value,
type(value))
)
elif isinstance(value, str):
# pylint: disable=line-too-long
try:
if 'T' in value:
value = datetime_.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f%z')
else:
value = datetime_.datetime.strptime(value, '%Y-%m-%d %H:%M:%S.%f%z')
except ValueError:
try:
if 'T' in value:
value = datetime_.datetime.strptime(value, '%Y/%m/%dT%H:%M:%S%z')
else:
value = datetime_.datetime.strptime(value, '%Y/%m/%d %H:%M:%S%z')
except ValueError:
try:
if 'T' in value:
value = datetime_.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S%z')
else:
value = datetime_.datetime.strptime(value, '%Y-%m-%d %H:%M:%S%z')
except ValueError:
try:
if 'T' in value:
value = datetime_.datetime.strptime(value,
'%Y/%m/%dT%H:%M:%S%z')
else:
value = datetime_.datetime.strptime(value,
'%Y/%m/%d %H:%M:%S%z')
except ValueError:
try:
if 'T' in value:
value = datetime_.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f')
else:
value = datetime_.datetime.strptime(value, '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
try:
if 'T' in value:
value = datetime_.datetime.strptime(value, '%Y/%m/%dT%H:%M:%S')
else:
value = datetime_.datetime.strptime(value, '%Y/%m/%d %H:%M:%S')
except ValueError:
try:
if 'T' in value:
value = datetime_.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S')
else:
value = datetime_.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
except ValueError:
try:
if 'T' in value:
value = datetime_.datetime.strptime(value,
'%Y/%m/%dT%H:%M:%S')
else:
value = datetime_.datetime.strptime(value,
'%Y/%m/%d %H:%M:%S')
except ValueError:
if coerce_value:
value = date(value)
else:
raise errors.CannotCoerceError(
'value (%s) must be a datetime object, '
'ISO 8601-formatted string, '
'or POSIX timestamp' % value
)
# pylint: enable=line-too-long
elif isinstance(value, numeric_types) and not coerce_value:
raise errors.CannotCoerceError(
'value (%s) must be a datetime object, '
'ISO 8601-formatted string, '
'or POSIX timestamp' % value
)
if isinstance(value, datetime_.date) and not isinstance(value, datetime_.datetime):
if coerce_value:
value = datetime_.datetime(value.year, # pylint: disable=R0204
value.month,
value.day,
0,
0,
0,
0)
else:
raise errors.CannotCoerceError(
'value (%s) must be a datetime object, '
'ISO 8601-formatted string, '
'or POSIX timestamp' % value
)
if minimum and value and value < minimum:
raise errors.MinimumValueError(
'value (%s) is before the minimum given (%s)' % (value.isoformat(),
minimum.isoformat())
)
if maximum and value and value > maximum:
raise errors.MaximumValueError(
'value (%s) is after the maximum given (%s)' % (value.isoformat(),
maximum.isoformat())
)
return value | Validate that ``value`` is a valid datetime.
.. caution::
If supplying a string, the string needs to be in an ISO 8601-format to pass
validation. If it is not in an ISO 8601-format, validation will fail.
:param value: The value to validate.
:type value: :class:`str <python:str>` / :class:`datetime <python:datetime.datetime>`
/ :class:`date <python:datetime.date>` / :obj:`None <python:None>`
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:param minimum: If supplied, will make sure that ``value`` is on or after this value.
:type minimum: :class:`datetime <python:datetime.datetime>` /
:class:`date <python:datetime.date>` / compliant :class:`str <python:str>` /
:obj:`None <python:None>`
:param maximum: If supplied, will make sure that ``value`` is on or before this
value.
:type maximum: :class:`datetime <python:datetime.datetime>` /
:class:`date <python:datetime.date>` / compliant :class:`str <python:str>` /
:obj:`None <python:None>`
:param coerce_value: If ``True``, will coerce dates to
:class:`datetime <python:datetime.datetime>` objects with times of 00:00:00. If ``False``, will error
if ``value`` is not an unambiguous timestamp. Defaults to ``True``.
:type coerce_value: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:rtype: :class:`datetime <python:datetime.datetime>` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises CannotCoerceError: if ``value`` cannot be coerced to a
:class:`datetime <python:datetime.datetime>` value and is not
:obj:`None <python:None>`
:raises MinimumValueError: if ``minimum`` is supplied but ``value`` occurs
before ``minimum``
:raises MaximumValueError: if ``maximum`` is supplied but ``value`` occurs
after ``minimum`` | Below is the the instruction that describes the task:
### Input:
Validate that ``value`` is a valid datetime.
.. caution::
If supplying a string, the string needs to be in an ISO 8601-format to pass
validation. If it is not in an ISO 8601-format, validation will fail.
:param value: The value to validate.
:type value: :class:`str <python:str>` / :class:`datetime <python:datetime.datetime>`
/ :class:`date <python:datetime.date>` / :obj:`None <python:None>`
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:param minimum: If supplied, will make sure that ``value`` is on or after this value.
:type minimum: :class:`datetime <python:datetime.datetime>` /
:class:`date <python:datetime.date>` / compliant :class:`str <python:str>` /
:obj:`None <python:None>`
:param maximum: If supplied, will make sure that ``value`` is on or before this
value.
:type maximum: :class:`datetime <python:datetime.datetime>` /
:class:`date <python:datetime.date>` / compliant :class:`str <python:str>` /
:obj:`None <python:None>`
:param coerce_value: If ``True``, will coerce dates to
:class:`datetime <python:datetime.datetime>` objects with times of 00:00:00. If ``False``, will error
if ``value`` is not an unambiguous timestamp. Defaults to ``True``.
:type coerce_value: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:rtype: :class:`datetime <python:datetime.datetime>` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises CannotCoerceError: if ``value`` cannot be coerced to a
:class:`datetime <python:datetime.datetime>` value and is not
:obj:`None <python:None>`
:raises MinimumValueError: if ``minimum`` is supplied but ``value`` occurs
before ``minimum``
:raises MaximumValueError: if ``maximum`` is supplied but ``value`` occurs
after ``minimum``
### Response:
def datetime(value,
allow_empty = False,
minimum = None,
maximum = None,
coerce_value = True,
**kwargs):
"""Validate that ``value`` is a valid datetime.
.. caution::
If supplying a string, the string needs to be in an ISO 8601-format to pass
validation. If it is not in an ISO 8601-format, validation will fail.
:param value: The value to validate.
:type value: :class:`str <python:str>` / :class:`datetime <python:datetime.datetime>`
/ :class:`date <python:datetime.date>` / :obj:`None <python:None>`
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:param minimum: If supplied, will make sure that ``value`` is on or after this value.
:type minimum: :class:`datetime <python:datetime.datetime>` /
:class:`date <python:datetime.date>` / compliant :class:`str <python:str>` /
:obj:`None <python:None>`
:param maximum: If supplied, will make sure that ``value`` is on or before this
value.
:type maximum: :class:`datetime <python:datetime.datetime>` /
:class:`date <python:datetime.date>` / compliant :class:`str <python:str>` /
:obj:`None <python:None>`
:param coerce_value: If ``True``, will coerce dates to
:class:`datetime <python:datetime.datetime>` objects with times of 00:00:00. If ``False``, will error
if ``value`` is not an unambiguous timestamp. Defaults to ``True``.
:type coerce_value: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:rtype: :class:`datetime <python:datetime.datetime>` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises CannotCoerceError: if ``value`` cannot be coerced to a
:class:`datetime <python:datetime.datetime>` value and is not
:obj:`None <python:None>`
:raises MinimumValueError: if ``minimum`` is supplied but ``value`` occurs
before ``minimum``
:raises MaximumValueError: if ``maximum`` is supplied but ``value`` occurs
after ``minimum``
"""
# pylint: disable=too-many-branches
if not value and not allow_empty:
raise errors.EmptyValueError('value (%s) was empty' % value)
elif not value:
return None
minimum = datetime(minimum, allow_empty = True, force_run = True) # pylint: disable=E1123
maximum = datetime(maximum, allow_empty = True, force_run = True) # pylint: disable=E1123
if not isinstance(value, datetime_types):
raise errors.CannotCoerceError(
'value (%s) must be a date object, datetime object, '
'ISO 8601-formatted string, '
'or POSIX timestamp, but was %s' % (value,
type(value))
)
elif isinstance(value, timestamp_types) and coerce_value:
try:
value = datetime_.datetime.fromtimestamp(value)
except ValueError:
raise errors.CannotCoerceError(
'value (%s) must be a date object, datetime object, '
'ISO 8601-formatted string, '
'or POSIX timestamp, but was %s' % (value,
type(value))
)
elif isinstance(value, str):
# pylint: disable=line-too-long
try:
if 'T' in value:
value = datetime_.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f%z')
else:
value = datetime_.datetime.strptime(value, '%Y-%m-%d %H:%M:%S.%f%z')
except ValueError:
try:
if 'T' in value:
value = datetime_.datetime.strptime(value, '%Y/%m/%dT%H:%M:%S%z')
else:
value = datetime_.datetime.strptime(value, '%Y/%m/%d %H:%M:%S%z')
except ValueError:
try:
if 'T' in value:
value = datetime_.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S%z')
else:
value = datetime_.datetime.strptime(value, '%Y-%m-%d %H:%M:%S%z')
except ValueError:
try:
if 'T' in value:
value = datetime_.datetime.strptime(value,
'%Y/%m/%dT%H:%M:%S%z')
else:
value = datetime_.datetime.strptime(value,
'%Y/%m/%d %H:%M:%S%z')
except ValueError:
try:
if 'T' in value:
value = datetime_.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f')
else:
value = datetime_.datetime.strptime(value, '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
try:
if 'T' in value:
value = datetime_.datetime.strptime(value, '%Y/%m/%dT%H:%M:%S')
else:
value = datetime_.datetime.strptime(value, '%Y/%m/%d %H:%M:%S')
except ValueError:
try:
if 'T' in value:
value = datetime_.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S')
else:
value = datetime_.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
except ValueError:
try:
if 'T' in value:
value = datetime_.datetime.strptime(value,
'%Y/%m/%dT%H:%M:%S')
else:
value = datetime_.datetime.strptime(value,
'%Y/%m/%d %H:%M:%S')
except ValueError:
if coerce_value:
value = date(value)
else:
raise errors.CannotCoerceError(
'value (%s) must be a datetime object, '
'ISO 8601-formatted string, '
'or POSIX timestamp' % value
)
# pylint: enable=line-too-long
elif isinstance(value, numeric_types) and not coerce_value:
raise errors.CannotCoerceError(
'value (%s) must be a datetime object, '
'ISO 8601-formatted string, '
'or POSIX timestamp' % value
)
if isinstance(value, datetime_.date) and not isinstance(value, datetime_.datetime):
if coerce_value:
value = datetime_.datetime(value.year, # pylint: disable=R0204
value.month,
value.day,
0,
0,
0,
0)
else:
raise errors.CannotCoerceError(
'value (%s) must be a datetime object, '
'ISO 8601-formatted string, '
'or POSIX timestamp' % value
)
if minimum and value and value < minimum:
raise errors.MinimumValueError(
'value (%s) is before the minimum given (%s)' % (value.isoformat(),
minimum.isoformat())
)
if maximum and value and value > maximum:
raise errors.MaximumValueError(
'value (%s) is after the maximum given (%s)' % (value.isoformat(),
maximum.isoformat())
)
return value |
def get_random_subreddit(self, nsfw=False):
"""Return a random Subreddit object.
:param nsfw: When true, return a random NSFW Subreddit object. Calling
in this manner will set the 'over18' cookie for the duration of the
PRAW session.
"""
path = 'random'
if nsfw:
self.http.cookies.set('over18', '1')
path = 'randnsfw'
url = self.config['subreddit'].format(subreddit=path)
response = self._request(url, params={'unique': self._unique_count},
raw_response=True)
self._unique_count += 1
return self.get_subreddit(response.url.rsplit('/', 2)[-2]) | Return a random Subreddit object.
:param nsfw: When true, return a random NSFW Subreddit object. Calling
in this manner will set the 'over18' cookie for the duration of the
PRAW session. | Below is the the instruction that describes the task:
### Input:
Return a random Subreddit object.
:param nsfw: When true, return a random NSFW Subreddit object. Calling
in this manner will set the 'over18' cookie for the duration of the
PRAW session.
### Response:
def get_random_subreddit(self, nsfw=False):
"""Return a random Subreddit object.
:param nsfw: When true, return a random NSFW Subreddit object. Calling
in this manner will set the 'over18' cookie for the duration of the
PRAW session.
"""
path = 'random'
if nsfw:
self.http.cookies.set('over18', '1')
path = 'randnsfw'
url = self.config['subreddit'].format(subreddit=path)
response = self._request(url, params={'unique': self._unique_count},
raw_response=True)
self._unique_count += 1
return self.get_subreddit(response.url.rsplit('/', 2)[-2]) |
def _ParseTokenType(self, file_object, file_offset):
"""Parses a token type.
Args:
file_object (dfvfs.FileIO): file-like object.
file_offset (int): offset of the token relative to the start of
the file-like object.
Returns:
int: token type
"""
token_type_map = self._GetDataTypeMap('uint8')
token_type, _ = self._ReadStructureFromFileObject(
file_object, file_offset, token_type_map)
return token_type | Parses a token type.
Args:
file_object (dfvfs.FileIO): file-like object.
file_offset (int): offset of the token relative to the start of
the file-like object.
Returns:
int: token type | Below is the the instruction that describes the task:
### Input:
Parses a token type.
Args:
file_object (dfvfs.FileIO): file-like object.
file_offset (int): offset of the token relative to the start of
the file-like object.
Returns:
int: token type
### Response:
def _ParseTokenType(self, file_object, file_offset):
"""Parses a token type.
Args:
file_object (dfvfs.FileIO): file-like object.
file_offset (int): offset of the token relative to the start of
the file-like object.
Returns:
int: token type
"""
token_type_map = self._GetDataTypeMap('uint8')
token_type, _ = self._ReadStructureFromFileObject(
file_object, file_offset, token_type_map)
return token_type |
def create_for_wrong_result_type(parser: _BaseParserDeclarationForRegistries, desired_type: Type[T],
obj: PersistedObject, result: T, options: Dict[str, Dict[str, Any]]):
"""
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param parser:
:param desired_type:
:param obj:
:param result:
:param options:
:return:
"""
msg = "Error while parsing {obj} as a {typ} with parser {p} using options=({opts}) - parser returned an object " \
"of wrong type {tret}: {ret}".format(obj=obj, typ=get_pretty_type_str(desired_type), p=parser,
opts=options, tret=type(result), ret=result)
return WrongTypeCreatedError(msg) | Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param parser:
:param desired_type:
:param obj:
:param result:
:param options:
:return: | Below is the the instruction that describes the task:
### Input:
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param parser:
:param desired_type:
:param obj:
:param result:
:param options:
:return:
### Response:
def create_for_wrong_result_type(parser: _BaseParserDeclarationForRegistries, desired_type: Type[T],
obj: PersistedObject, result: T, options: Dict[str, Dict[str, Any]]):
"""
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param parser:
:param desired_type:
:param obj:
:param result:
:param options:
:return:
"""
msg = "Error while parsing {obj} as a {typ} with parser {p} using options=({opts}) - parser returned an object " \
"of wrong type {tret}: {ret}".format(obj=obj, typ=get_pretty_type_str(desired_type), p=parser,
opts=options, tret=type(result), ret=result)
return WrongTypeCreatedError(msg) |
def segment(self, eps, min_time):
"""In-place segmentation of segments
Spatio-temporal segmentation of each segment
The number of segments may increse after this step
Returns:
This track
"""
new_segments = []
for segment in self.segments:
segmented = segment.segment(eps, min_time)
for seg in segmented:
new_segments.append(Segment(seg))
self.segments = new_segments
return self | In-place segmentation of segments
Spatio-temporal segmentation of each segment
The number of segments may increse after this step
Returns:
This track | Below is the the instruction that describes the task:
### Input:
In-place segmentation of segments
Spatio-temporal segmentation of each segment
The number of segments may increse after this step
Returns:
This track
### Response:
def segment(self, eps, min_time):
"""In-place segmentation of segments
Spatio-temporal segmentation of each segment
The number of segments may increse after this step
Returns:
This track
"""
new_segments = []
for segment in self.segments:
segmented = segment.segment(eps, min_time)
for seg in segmented:
new_segments.append(Segment(seg))
self.segments = new_segments
return self |
def datasets_create_new(self, dataset_new_request, **kwargs): # noqa: E501
"""Create a new dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datasets_create_new(dataset_new_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DatasetNewRequest dataset_new_request: Information for creating a new dataset (required)
:return: Result
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.datasets_create_new_with_http_info(dataset_new_request, **kwargs) # noqa: E501
else:
(data) = self.datasets_create_new_with_http_info(dataset_new_request, **kwargs) # noqa: E501
return data | Create a new dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datasets_create_new(dataset_new_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DatasetNewRequest dataset_new_request: Information for creating a new dataset (required)
:return: Result
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Create a new dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datasets_create_new(dataset_new_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DatasetNewRequest dataset_new_request: Information for creating a new dataset (required)
:return: Result
If the method is called asynchronously,
returns the request thread.
### Response:
def datasets_create_new(self, dataset_new_request, **kwargs): # noqa: E501
"""Create a new dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datasets_create_new(dataset_new_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DatasetNewRequest dataset_new_request: Information for creating a new dataset (required)
:return: Result
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.datasets_create_new_with_http_info(dataset_new_request, **kwargs) # noqa: E501
else:
(data) = self.datasets_create_new_with_http_info(dataset_new_request, **kwargs) # noqa: E501
return data |
def decompress_messages(self, partitions_offmsgs):
""" Decompress pre-defined compressed fields for each message. """
for pomsg in partitions_offmsgs:
if pomsg['message']:
pomsg['message'] = self.decompress_fun(pomsg['message'])
yield pomsg | Decompress pre-defined compressed fields for each message. | Below is the the instruction that describes the task:
### Input:
Decompress pre-defined compressed fields for each message.
### Response:
def decompress_messages(self, partitions_offmsgs):
""" Decompress pre-defined compressed fields for each message. """
for pomsg in partitions_offmsgs:
if pomsg['message']:
pomsg['message'] = self.decompress_fun(pomsg['message'])
yield pomsg |
def reverse(self):
"""
Reverses the items of this collection "in place" (only two values are
retrieved from Redis at a time).
"""
def reverse_trans(pipe):
if self.writeback:
self._sync_helper(pipe)
n = self.__len__(pipe)
for i in range(n // 2):
left = pipe.lindex(self.key, i)
right = pipe.lindex(self.key, n - i - 1)
pipe.lset(self.key, i, right)
pipe.lset(self.key, n - i - 1, left)
self._transaction(reverse_trans) | Reverses the items of this collection "in place" (only two values are
retrieved from Redis at a time). | Below is the the instruction that describes the task:
### Input:
Reverses the items of this collection "in place" (only two values are
retrieved from Redis at a time).
### Response:
def reverse(self):
"""
Reverses the items of this collection "in place" (only two values are
retrieved from Redis at a time).
"""
def reverse_trans(pipe):
if self.writeback:
self._sync_helper(pipe)
n = self.__len__(pipe)
for i in range(n // 2):
left = pipe.lindex(self.key, i)
right = pipe.lindex(self.key, n - i - 1)
pipe.lset(self.key, i, right)
pipe.lset(self.key, n - i - 1, left)
self._transaction(reverse_trans) |
def _build_processor(cls, session: AppSession):
'''Create the Processor
Returns:
Processor: An instance of :class:`.processor.BaseProcessor`.
'''
web_processor = cls._build_web_processor(session)
ftp_processor = cls._build_ftp_processor(session)
delegate_processor = session.factory.new('Processor')
delegate_processor.register('http', web_processor)
delegate_processor.register('https', web_processor)
delegate_processor.register('ftp', ftp_processor) | Create the Processor
Returns:
Processor: An instance of :class:`.processor.BaseProcessor`. | Below is the the instruction that describes the task:
### Input:
Create the Processor
Returns:
Processor: An instance of :class:`.processor.BaseProcessor`.
### Response:
def _build_processor(cls, session: AppSession):
'''Create the Processor
Returns:
Processor: An instance of :class:`.processor.BaseProcessor`.
'''
web_processor = cls._build_web_processor(session)
ftp_processor = cls._build_ftp_processor(session)
delegate_processor = session.factory.new('Processor')
delegate_processor.register('http', web_processor)
delegate_processor.register('https', web_processor)
delegate_processor.register('ftp', ftp_processor) |
def create_seq(self, project):
"""Create and return a new sequence
:param project: the project for the sequence
:type deps: :class:`jukeboxcore.djadapter.models.Project`
:returns: The created sequence or None
:rtype: None | :class:`jukeboxcore.djadapter.models.Sequence`
:raises: None
"""
dialog = SequenceCreatorDialog(project=project, parent=self)
dialog.exec_()
seq = dialog.sequence
return seq | Create and return a new sequence
:param project: the project for the sequence
:type deps: :class:`jukeboxcore.djadapter.models.Project`
:returns: The created sequence or None
:rtype: None | :class:`jukeboxcore.djadapter.models.Sequence`
:raises: None | Below is the the instruction that describes the task:
### Input:
Create and return a new sequence
:param project: the project for the sequence
:type deps: :class:`jukeboxcore.djadapter.models.Project`
:returns: The created sequence or None
:rtype: None | :class:`jukeboxcore.djadapter.models.Sequence`
:raises: None
### Response:
def create_seq(self, project):
"""Create and return a new sequence
:param project: the project for the sequence
:type deps: :class:`jukeboxcore.djadapter.models.Project`
:returns: The created sequence or None
:rtype: None | :class:`jukeboxcore.djadapter.models.Sequence`
:raises: None
"""
dialog = SequenceCreatorDialog(project=project, parent=self)
dialog.exec_()
seq = dialog.sequence
return seq |
def UnregisterFlowProcessingHandler(self, timeout=None):
"""Unregisters any registered flow processing handler."""
if self.flow_processing_request_handler_thread:
self.flow_processing_request_handler_stop = True
self.flow_processing_request_handler_thread.join(timeout)
if self.flow_processing_request_handler_thread.isAlive():
raise RuntimeError("Flow processing handler did not join in time.")
self.flow_processing_request_handler_thread = None | Unregisters any registered flow processing handler. | Below is the the instruction that describes the task:
### Input:
Unregisters any registered flow processing handler.
### Response:
def UnregisterFlowProcessingHandler(self, timeout=None):
"""Unregisters any registered flow processing handler."""
if self.flow_processing_request_handler_thread:
self.flow_processing_request_handler_stop = True
self.flow_processing_request_handler_thread.join(timeout)
if self.flow_processing_request_handler_thread.isAlive():
raise RuntimeError("Flow processing handler did not join in time.")
self.flow_processing_request_handler_thread = None |
def scatter(self, *args, **kwargs):
"""Add a scatter plot."""
cls = _make_class(ScatterVisual,
_default_marker=kwargs.pop('marker', None),
)
return self._add_item(cls, *args, **kwargs) | Add a scatter plot. | Below is the the instruction that describes the task:
### Input:
Add a scatter plot.
### Response:
def scatter(self, *args, **kwargs):
"""Add a scatter plot."""
cls = _make_class(ScatterVisual,
_default_marker=kwargs.pop('marker', None),
)
return self._add_item(cls, *args, **kwargs) |
def _root(path, root):
'''
Relocate an absolute path to a new root directory.
'''
if root:
return os.path.join(root, os.path.relpath(path, os.path.sep))
else:
return path | Relocate an absolute path to a new root directory. | Below is the the instruction that describes the task:
### Input:
Relocate an absolute path to a new root directory.
### Response:
def _root(path, root):
'''
Relocate an absolute path to a new root directory.
'''
if root:
return os.path.join(root, os.path.relpath(path, os.path.sep))
else:
return path |
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name] | Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name` | Below is the the instruction that describes the task:
### Input:
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
### Response:
def get_subparser(self, name):
"""
Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name`
"""
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name] |
def setup(console=False, port=None):
"""Setup integration
Register plug-ins and integrate into the host
Arguments:
console (bool): DEPRECATED
port (int, optional): DEPRECATED
"""
if self._has_been_setup:
teardown()
register_plugins()
register_host()
self._has_been_setup = True
print("pyblish: Pyblish loaded successfully.") | Setup integration
Register plug-ins and integrate into the host
Arguments:
console (bool): DEPRECATED
port (int, optional): DEPRECATED | Below is the the instruction that describes the task:
### Input:
Setup integration
Register plug-ins and integrate into the host
Arguments:
console (bool): DEPRECATED
port (int, optional): DEPRECATED
### Response:
def setup(console=False, port=None):
"""Setup integration
Register plug-ins and integrate into the host
Arguments:
console (bool): DEPRECATED
port (int, optional): DEPRECATED
"""
if self._has_been_setup:
teardown()
register_plugins()
register_host()
self._has_been_setup = True
print("pyblish: Pyblish loaded successfully.") |
def getWorkflowDir(workflowID, configWorkDir=None):
"""
Returns a path to the directory where worker directories and the cache will be located
for this workflow.
:param str workflowID: Unique identifier for the workflow
:param str configWorkDir: Value passed to the program using the --workDir flag
:return: Path to the workflow directory
:rtype: str
"""
workDir = configWorkDir or os.getenv('TOIL_WORKDIR') or tempfile.gettempdir()
if not os.path.exists(workDir):
raise RuntimeError("The directory specified by --workDir or TOIL_WORKDIR (%s) does not "
"exist." % workDir)
# Create the workflow dir, make it unique to each host in case workDir is on a shared FS.
# This prevents workers on different nodes from erasing each other's directories.
workflowDir = os.path.join(workDir, 'toil-%s-%s' % (workflowID, getNodeID()))
try:
# Directory creation is atomic
os.mkdir(workflowDir)
except OSError as err:
if err.errno != 17:
# The directory exists if a previous worker set it up.
raise
else:
logger.debug('Created the workflow directory at %s' % workflowDir)
return workflowDir | Returns a path to the directory where worker directories and the cache will be located
for this workflow.
:param str workflowID: Unique identifier for the workflow
:param str configWorkDir: Value passed to the program using the --workDir flag
:return: Path to the workflow directory
:rtype: str | Below is the the instruction that describes the task:
### Input:
Returns a path to the directory where worker directories and the cache will be located
for this workflow.
:param str workflowID: Unique identifier for the workflow
:param str configWorkDir: Value passed to the program using the --workDir flag
:return: Path to the workflow directory
:rtype: str
### Response:
def getWorkflowDir(workflowID, configWorkDir=None):
"""
Returns a path to the directory where worker directories and the cache will be located
for this workflow.
:param str workflowID: Unique identifier for the workflow
:param str configWorkDir: Value passed to the program using the --workDir flag
:return: Path to the workflow directory
:rtype: str
"""
workDir = configWorkDir or os.getenv('TOIL_WORKDIR') or tempfile.gettempdir()
if not os.path.exists(workDir):
raise RuntimeError("The directory specified by --workDir or TOIL_WORKDIR (%s) does not "
"exist." % workDir)
# Create the workflow dir, make it unique to each host in case workDir is on a shared FS.
# This prevents workers on different nodes from erasing each other's directories.
workflowDir = os.path.join(workDir, 'toil-%s-%s' % (workflowID, getNodeID()))
try:
# Directory creation is atomic
os.mkdir(workflowDir)
except OSError as err:
if err.errno != 17:
# The directory exists if a previous worker set it up.
raise
else:
logger.debug('Created the workflow directory at %s' % workflowDir)
return workflowDir |
def get_table_rate_rule_by_id(cls, table_rate_rule_id, **kwargs):
"""Find TableRateRule
Return single instance of TableRateRule by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_table_rate_rule_by_id(table_rate_rule_id, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_rule_id: ID of tableRateRule to return (required)
:return: TableRateRule
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_table_rate_rule_by_id_with_http_info(table_rate_rule_id, **kwargs)
else:
(data) = cls._get_table_rate_rule_by_id_with_http_info(table_rate_rule_id, **kwargs)
return data | Find TableRateRule
Return single instance of TableRateRule by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_table_rate_rule_by_id(table_rate_rule_id, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_rule_id: ID of tableRateRule to return (required)
:return: TableRateRule
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Find TableRateRule
Return single instance of TableRateRule by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_table_rate_rule_by_id(table_rate_rule_id, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_rule_id: ID of tableRateRule to return (required)
:return: TableRateRule
If the method is called asynchronously,
returns the request thread.
### Response:
def get_table_rate_rule_by_id(cls, table_rate_rule_id, **kwargs):
"""Find TableRateRule
Return single instance of TableRateRule by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_table_rate_rule_by_id(table_rate_rule_id, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_rule_id: ID of tableRateRule to return (required)
:return: TableRateRule
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_table_rate_rule_by_id_with_http_info(table_rate_rule_id, **kwargs)
else:
(data) = cls._get_table_rate_rule_by_id_with_http_info(table_rate_rule_id, **kwargs)
return data |
def get_media_detail_output_interface_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_media_detail = ET.Element("get_media_detail")
config = get_media_detail
output = ET.SubElement(get_media_detail, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name = ET.SubElement(interface, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_media_detail_output_interface_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_media_detail = ET.Element("get_media_detail")
config = get_media_detail
output = ET.SubElement(get_media_detail, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name = ET.SubElement(interface, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.