code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def parse_component_reference(self, node):
"""
Parses <ComponentReference>
@param node: Node containing the <ComponentTypeRef> element
@type node: xml.etree.Element
"""
if 'name' in node.lattrib:
name = node.lattrib['name']
else:
self.raise_error('<ComponentReference> must specify a name for the ' +
'reference.')
if 'type' in node.lattrib:
type_ = node.lattrib['type']
else:
self.raise_error('<ComponentReference> must specify a type for the ' +
'reference.')
if 'local' in node.lattrib:
local = node.lattrib['local']
else:
local = None
self.current_component_type.add_component_reference(ComponentReference(name, type_, local)) | Parses <ComponentReference>
@param node: Node containing the <ComponentTypeRef> element
@type node: xml.etree.Element | Below is the the instruction that describes the task:
### Input:
Parses <ComponentReference>
@param node: Node containing the <ComponentTypeRef> element
@type node: xml.etree.Element
### Response:
def parse_component_reference(self, node):
"""
Parses <ComponentReference>
@param node: Node containing the <ComponentTypeRef> element
@type node: xml.etree.Element
"""
if 'name' in node.lattrib:
name = node.lattrib['name']
else:
self.raise_error('<ComponentReference> must specify a name for the ' +
'reference.')
if 'type' in node.lattrib:
type_ = node.lattrib['type']
else:
self.raise_error('<ComponentReference> must specify a type for the ' +
'reference.')
if 'local' in node.lattrib:
local = node.lattrib['local']
else:
local = None
self.current_component_type.add_component_reference(ComponentReference(name, type_, local)) |
def parse_bool(s, default=False):
"""
Return the boolean value of an English string or default if it can't be
determined.
"""
if s is None:
return default
return TRUTH.get(s.lower(), default) | Return the boolean value of an English string or default if it can't be
determined. | Below is the the instruction that describes the task:
### Input:
Return the boolean value of an English string or default if it can't be
determined.
### Response:
def parse_bool(s, default=False):
"""
Return the boolean value of an English string or default if it can't be
determined.
"""
if s is None:
return default
return TRUTH.get(s.lower(), default) |
def add_training_sample(self, text=u'', lang=''):
""" Initial step for adding new sample to training data.
You need to call `save_training_samples()` afterwards.
:param text: Sample text to be added.
:param lang: Language label for the input text.
"""
self.trainer.add(text=text, lang=lang) | Initial step for adding new sample to training data.
You need to call `save_training_samples()` afterwards.
:param text: Sample text to be added.
:param lang: Language label for the input text. | Below is the the instruction that describes the task:
### Input:
Initial step for adding new sample to training data.
You need to call `save_training_samples()` afterwards.
:param text: Sample text to be added.
:param lang: Language label for the input text.
### Response:
def add_training_sample(self, text=u'', lang=''):
""" Initial step for adding new sample to training data.
You need to call `save_training_samples()` afterwards.
:param text: Sample text to be added.
:param lang: Language label for the input text.
"""
self.trainer.add(text=text, lang=lang) |
def expand_ssh_proxy_command(command, user, addr, port=22):
"""
Expand spacial digraphs ``%h``, ``%p``, and ``%r``.
Return a copy of `command` with the following string
substitutions applied:
* ``%h`` is replaced by *addr*
* ``%p`` is replaced by *port*
* ``%r`` is replaced by *user*
* ``%%`` is replaced by ``%``.
See also: man page ``ssh_config``, section "TOKENS".
"""
translated = []
subst = {
'h': list(str(addr)),
'p': list(str(port)),
'r': list(str(user)),
'%': ['%'],
}
escaped = False
for char in command:
if char == '%':
escaped = True
continue
if escaped:
try:
translated.extend(subst[char])
escaped = False
continue
except KeyError:
raise ValueError(
"Unknown digraph `%{0}`"
" in proxy command string `{1}`"
.format(char, command))
else:
translated.append(char)
continue
return ''.join(translated) | Expand spacial digraphs ``%h``, ``%p``, and ``%r``.
Return a copy of `command` with the following string
substitutions applied:
* ``%h`` is replaced by *addr*
* ``%p`` is replaced by *port*
* ``%r`` is replaced by *user*
* ``%%`` is replaced by ``%``.
See also: man page ``ssh_config``, section "TOKENS". | Below is the the instruction that describes the task:
### Input:
Expand spacial digraphs ``%h``, ``%p``, and ``%r``.
Return a copy of `command` with the following string
substitutions applied:
* ``%h`` is replaced by *addr*
* ``%p`` is replaced by *port*
* ``%r`` is replaced by *user*
* ``%%`` is replaced by ``%``.
See also: man page ``ssh_config``, section "TOKENS".
### Response:
def expand_ssh_proxy_command(command, user, addr, port=22):
"""
Expand spacial digraphs ``%h``, ``%p``, and ``%r``.
Return a copy of `command` with the following string
substitutions applied:
* ``%h`` is replaced by *addr*
* ``%p`` is replaced by *port*
* ``%r`` is replaced by *user*
* ``%%`` is replaced by ``%``.
See also: man page ``ssh_config``, section "TOKENS".
"""
translated = []
subst = {
'h': list(str(addr)),
'p': list(str(port)),
'r': list(str(user)),
'%': ['%'],
}
escaped = False
for char in command:
if char == '%':
escaped = True
continue
if escaped:
try:
translated.extend(subst[char])
escaped = False
continue
except KeyError:
raise ValueError(
"Unknown digraph `%{0}`"
" in proxy command string `{1}`"
.format(char, command))
else:
translated.append(char)
continue
return ''.join(translated) |
def visit_importfrom(self, node):
"""visit astroid.ImportFrom and catch modules for package diagram
"""
if self.pkgdiagram:
self.pkgdiagram.add_from_depend(node, node.modname) | visit astroid.ImportFrom and catch modules for package diagram | Below is the the instruction that describes the task:
### Input:
visit astroid.ImportFrom and catch modules for package diagram
### Response:
def visit_importfrom(self, node):
"""visit astroid.ImportFrom and catch modules for package diagram
"""
if self.pkgdiagram:
self.pkgdiagram.add_from_depend(node, node.modname) |
def pythonize(self, val):
"""Convert value into a list::
* split value (or each element if value is a list) on coma char
* strip split values
:param val: value to convert
:type val: str
:return: list corresponding to value
:rtype: list
"""
if isinstance(val, list):
return [s.strip() if hasattr(s, "strip") else s
for s in list_split(val, self.split_on_comma)
if hasattr(s, "strip") and s.strip() != '' or self.keep_empty]
return [s.strip() if hasattr(s, "strip") else s
for s in to_split(val, self.split_on_comma)
if hasattr(s, "strip") and s.strip() != '' or self.keep_empty] | Convert value into a list::
* split value (or each element if value is a list) on coma char
* strip split values
:param val: value to convert
:type val: str
:return: list corresponding to value
:rtype: list | Below is the the instruction that describes the task:
### Input:
Convert value into a list::
* split value (or each element if value is a list) on coma char
* strip split values
:param val: value to convert
:type val: str
:return: list corresponding to value
:rtype: list
### Response:
def pythonize(self, val):
"""Convert value into a list::
* split value (or each element if value is a list) on coma char
* strip split values
:param val: value to convert
:type val: str
:return: list corresponding to value
:rtype: list
"""
if isinstance(val, list):
return [s.strip() if hasattr(s, "strip") else s
for s in list_split(val, self.split_on_comma)
if hasattr(s, "strip") and s.strip() != '' or self.keep_empty]
return [s.strip() if hasattr(s, "strip") else s
for s in to_split(val, self.split_on_comma)
if hasattr(s, "strip") and s.strip() != '' or self.keep_empty] |
def p_systemcall(self, p):
'systemcall : DOLLER ID LPAREN sysargs RPAREN'
p[0] = SystemCall(p[2], p[4], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | systemcall : DOLLER ID LPAREN sysargs RPAREN | Below is the the instruction that describes the task:
### Input:
systemcall : DOLLER ID LPAREN sysargs RPAREN
### Response:
def p_systemcall(self, p):
'systemcall : DOLLER ID LPAREN sysargs RPAREN'
p[0] = SystemCall(p[2], p[4], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
def _get_instance(self):
"""Retrieve instance matching instance_id."""
resource = self._connect()
try:
instance = resource.Instance(self.running_instance_id)
except Exception:
raise EC2CloudException(
'Instance with ID: {instance_id} not found.'.format(
instance_id=self.running_instance_id
)
)
return instance | Retrieve instance matching instance_id. | Below is the the instruction that describes the task:
### Input:
Retrieve instance matching instance_id.
### Response:
def _get_instance(self):
"""Retrieve instance matching instance_id."""
resource = self._connect()
try:
instance = resource.Instance(self.running_instance_id)
except Exception:
raise EC2CloudException(
'Instance with ID: {instance_id} not found.'.format(
instance_id=self.running_instance_id
)
)
return instance |
def getSpec(cls):
"""
Return the Spec for ApicalTMPairRegion
"""
spec = {
"description": ApicalTMPairRegion.__doc__,
"singleNodeOnly": True,
"inputs": {
"activeColumns": {
"description": ("An array of 0's and 1's representing the active "
"minicolumns, i.e. the input to the TemporalMemory"),
"dataType": "Real32",
"count": 0,
"required": True,
"regionLevel": True,
"isDefaultInput": True,
"requireSplitterMap": False
},
"resetIn": {
"description": ("A boolean flag that indicates whether"
" or not the input vector received in this compute cycle"
" represents the first presentation in a"
" new temporal sequence."),
"dataType": "Real32",
"count": 1,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False
},
"basalInput": {
"description": "An array of 0's and 1's representing basal input",
"dataType": "Real32",
"count": 0,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False
},
"basalGrowthCandidates": {
"description": ("An array of 0's and 1's representing basal input " +
"that can be learned on new synapses on basal " +
"segments. If this input is a length-0 array, the " +
"whole basalInput is used."),
"dataType": "Real32",
"count": 0,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False
},
"apicalInput": {
"description": "An array of 0's and 1's representing top down input."
" The input will be provided to apical dendrites.",
"dataType": "Real32",
"count": 0,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False
},
"apicalGrowthCandidates": {
"description": ("An array of 0's and 1's representing apical input " +
"that can be learned on new synapses on apical " +
"segments. If this input is a length-0 array, the " +
"whole apicalInput is used."),
"dataType": "Real32",
"count": 0,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False},
},
"outputs": {
"predictedCells": {
"description": ("A binary output containing a 1 for every "
"cell that was predicted for this timestep."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": False
},
"predictedActiveCells": {
"description": ("A binary output containing a 1 for every "
"cell that transitioned from predicted to active."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": False
},
"activeCells": {
"description": ("A binary output containing a 1 for every "
"cell that is currently active."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": True
},
"winnerCells": {
"description": ("A binary output containing a 1 for every "
"'winner' cell in the TM."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": False
},
},
"parameters": {
# Input sizes (the network API doesn't provide these during initialize)
"columnCount": {
"description": ("The size of the 'activeColumns' input "
"(i.e. the number of columns)"),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"basalInputWidth": {
"description": "The size of the 'basalInput' input",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"apicalInputWidth": {
"description": "The size of the 'apicalInput' input",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"learn": {
"description": "True if the TM should learn.",
"accessMode": "ReadWrite",
"dataType": "Bool",
"count": 1,
"defaultValue": "true"
},
"cellsPerColumn": {
"description": "Number of cells per column",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"activationThreshold": {
"description": ("If the number of active connected synapses on a "
"segment is at least this threshold, the segment "
"is said to be active."),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"reducedBasalThreshold": {
"description": ("Activation threshold of basal segments for cells "
"with active apical segments (with apicalTiebreak "
"implementation). "),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"initialPermanence": {
"description": "Initial permanence of a new synapse.",
"accessMode": "Read",
"dataType": "Real32",
"count": 1,
"constraints": ""
},
"connectedPermanence": {
"description": ("If the permanence value for a synapse is greater "
"than this value, it is said to be connected."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1,
"constraints": ""
},
"minThreshold": {
"description": ("If the number of synapses active on a segment is at "
"least this threshold, it is selected as the best "
"matching cell in a bursting column."),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"sampleSize": {
"description": ("The desired number of active synapses for an "
"active cell"),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1
},
"learnOnOneCell": {
"description": ("If True, the winner cell for each column will be"
" fixed between resets."),
"accessMode": "Read",
"dataType": "Bool",
"count": 1,
"defaultValue": "false"
},
"maxSynapsesPerSegment": {
"description": "The maximum number of synapses per segment. Use -1 "
"for unlimited.",
"accessMode": "Read",
"dataType": "Int32",
"count": 1
},
"maxSegmentsPerCell": {
"description": "The maximum number of segments per cell",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1
},
"permanenceIncrement": {
"description": ("Amount by which permanences of synapses are "
"incremented during learning."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"permanenceDecrement": {
"description": ("Amount by which permanences of synapses are "
"decremented during learning."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"basalPredictedSegmentDecrement": {
"description": ("Amount by which active permanences of synapses of "
"previously predicted but inactive segments are "
"decremented."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"apicalPredictedSegmentDecrement": {
"description": ("Amount by which active permanences of synapses of "
"previously predicted but inactive segments are "
"decremented."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"seed": {
"description": "Seed for the random number generator.",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1
},
"implementation": {
"description": "Apical implementation",
"accessMode": "Read",
"dataType": "Byte",
"count": 0,
"constraints": ("enum: ApicalTiebreak, ApicalTiebreakCPP, ApicalDependent"),
"defaultValue": "ApicalTiebreakCPP"
},
},
}
return spec | Return the Spec for ApicalTMPairRegion | Below is the the instruction that describes the task:
### Input:
Return the Spec for ApicalTMPairRegion
### Response:
def getSpec(cls):
"""
Return the Spec for ApicalTMPairRegion
"""
spec = {
"description": ApicalTMPairRegion.__doc__,
"singleNodeOnly": True,
"inputs": {
"activeColumns": {
"description": ("An array of 0's and 1's representing the active "
"minicolumns, i.e. the input to the TemporalMemory"),
"dataType": "Real32",
"count": 0,
"required": True,
"regionLevel": True,
"isDefaultInput": True,
"requireSplitterMap": False
},
"resetIn": {
"description": ("A boolean flag that indicates whether"
" or not the input vector received in this compute cycle"
" represents the first presentation in a"
" new temporal sequence."),
"dataType": "Real32",
"count": 1,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False
},
"basalInput": {
"description": "An array of 0's and 1's representing basal input",
"dataType": "Real32",
"count": 0,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False
},
"basalGrowthCandidates": {
"description": ("An array of 0's and 1's representing basal input " +
"that can be learned on new synapses on basal " +
"segments. If this input is a length-0 array, the " +
"whole basalInput is used."),
"dataType": "Real32",
"count": 0,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False
},
"apicalInput": {
"description": "An array of 0's and 1's representing top down input."
" The input will be provided to apical dendrites.",
"dataType": "Real32",
"count": 0,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False
},
"apicalGrowthCandidates": {
"description": ("An array of 0's and 1's representing apical input " +
"that can be learned on new synapses on apical " +
"segments. If this input is a length-0 array, the " +
"whole apicalInput is used."),
"dataType": "Real32",
"count": 0,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False},
},
"outputs": {
"predictedCells": {
"description": ("A binary output containing a 1 for every "
"cell that was predicted for this timestep."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": False
},
"predictedActiveCells": {
"description": ("A binary output containing a 1 for every "
"cell that transitioned from predicted to active."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": False
},
"activeCells": {
"description": ("A binary output containing a 1 for every "
"cell that is currently active."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": True
},
"winnerCells": {
"description": ("A binary output containing a 1 for every "
"'winner' cell in the TM."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": False
},
},
"parameters": {
# Input sizes (the network API doesn't provide these during initialize)
"columnCount": {
"description": ("The size of the 'activeColumns' input "
"(i.e. the number of columns)"),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"basalInputWidth": {
"description": "The size of the 'basalInput' input",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"apicalInputWidth": {
"description": "The size of the 'apicalInput' input",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"learn": {
"description": "True if the TM should learn.",
"accessMode": "ReadWrite",
"dataType": "Bool",
"count": 1,
"defaultValue": "true"
},
"cellsPerColumn": {
"description": "Number of cells per column",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"activationThreshold": {
"description": ("If the number of active connected synapses on a "
"segment is at least this threshold, the segment "
"is said to be active."),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"reducedBasalThreshold": {
"description": ("Activation threshold of basal segments for cells "
"with active apical segments (with apicalTiebreak "
"implementation). "),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"initialPermanence": {
"description": "Initial permanence of a new synapse.",
"accessMode": "Read",
"dataType": "Real32",
"count": 1,
"constraints": ""
},
"connectedPermanence": {
"description": ("If the permanence value for a synapse is greater "
"than this value, it is said to be connected."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1,
"constraints": ""
},
"minThreshold": {
"description": ("If the number of synapses active on a segment is at "
"least this threshold, it is selected as the best "
"matching cell in a bursting column."),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"sampleSize": {
"description": ("The desired number of active synapses for an "
"active cell"),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1
},
"learnOnOneCell": {
"description": ("If True, the winner cell for each column will be"
" fixed between resets."),
"accessMode": "Read",
"dataType": "Bool",
"count": 1,
"defaultValue": "false"
},
"maxSynapsesPerSegment": {
"description": "The maximum number of synapses per segment. Use -1 "
"for unlimited.",
"accessMode": "Read",
"dataType": "Int32",
"count": 1
},
"maxSegmentsPerCell": {
"description": "The maximum number of segments per cell",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1
},
"permanenceIncrement": {
"description": ("Amount by which permanences of synapses are "
"incremented during learning."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"permanenceDecrement": {
"description": ("Amount by which permanences of synapses are "
"decremented during learning."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"basalPredictedSegmentDecrement": {
"description": ("Amount by which active permanences of synapses of "
"previously predicted but inactive segments are "
"decremented."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"apicalPredictedSegmentDecrement": {
"description": ("Amount by which active permanences of synapses of "
"previously predicted but inactive segments are "
"decremented."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"seed": {
"description": "Seed for the random number generator.",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1
},
"implementation": {
"description": "Apical implementation",
"accessMode": "Read",
"dataType": "Byte",
"count": 0,
"constraints": ("enum: ApicalTiebreak, ApicalTiebreakCPP, ApicalDependent"),
"defaultValue": "ApicalTiebreakCPP"
},
},
}
return spec |
def cpu_count(logical=True):
"""Return system CPU count
"""
if logical:
from multiprocessing import cpu_count
ncpu=cpu_count()
else:
import psutil
ncpu=psutil.cpu_count(logical=False)
return ncpu | Return system CPU count | Below is the the instruction that describes the task:
### Input:
Return system CPU count
### Response:
def cpu_count(logical=True):
"""Return system CPU count
"""
if logical:
from multiprocessing import cpu_count
ncpu=cpu_count()
else:
import psutil
ncpu=psutil.cpu_count(logical=False)
return ncpu |
def get_value(self, row, column):
"""Return the value of the DataFrame."""
# To increase the performance iat is used but that requires error
# handling, so fallback uses iloc
try:
value = self.df.iat[row, column]
except OutOfBoundsDatetime:
value = self.df.iloc[:, column].astype(str).iat[row]
except:
value = self.df.iloc[row, column]
return value | Return the value of the DataFrame. | Below is the the instruction that describes the task:
### Input:
Return the value of the DataFrame.
### Response:
def get_value(self, row, column):
"""Return the value of the DataFrame."""
# To increase the performance iat is used but that requires error
# handling, so fallback uses iloc
try:
value = self.df.iat[row, column]
except OutOfBoundsDatetime:
value = self.df.iloc[:, column].astype(str).iat[row]
except:
value = self.df.iloc[row, column]
return value |
def threshold(self, vmin=None, vmax=None, replaceWith=None):
"""
Binary or continuous volume thresholding.
"""
th = vtk.vtkImageThreshold()
th.SetInputData(self.image)
if vmin is not None and vmax is not None:
th.ThresholdBetween(vmin, vmax)
elif vmin is not None:
th.ThresholdByUpper(vmin)
elif vmax is not None:
th.ThresholdByLower(vmax)
if replaceWith:
th.ReplaceOutOn()
th.SetOutValue(replaceWith)
th.Update()
self.image = th.GetOutput()
self.mapper.SetInputData(self.image)
self.mapper.Modified()
return self | Binary or continuous volume thresholding. | Below is the the instruction that describes the task:
### Input:
Binary or continuous volume thresholding.
### Response:
def threshold(self, vmin=None, vmax=None, replaceWith=None):
"""
Binary or continuous volume thresholding.
"""
th = vtk.vtkImageThreshold()
th.SetInputData(self.image)
if vmin is not None and vmax is not None:
th.ThresholdBetween(vmin, vmax)
elif vmin is not None:
th.ThresholdByUpper(vmin)
elif vmax is not None:
th.ThresholdByLower(vmax)
if replaceWith:
th.ReplaceOutOn()
th.SetOutValue(replaceWith)
th.Update()
self.image = th.GetOutput()
self.mapper.SetInputData(self.image)
self.mapper.Modified()
return self |
def attrs(class_):
""" Like attr.s with slots=True,
but with attributes extracted from __init__ method signature.
slots=True ensures that signature matches what really happens
(we can't define different attributes on self).
It is useful if we still want __init__ for proper type-checking and
do not want to repeat attribute definitions in the class body.
"""
attrs_kwargs = {}
for method, kw_name in [
('__repr__', 'repr'),
('__eq__', 'cmp'),
('__hash__', 'hash'),
]:
if method in class_.__dict__:
# Allow to redefine a special method (or else attr.s will do it)
attrs_kwargs[kw_name] = False
init_args = inspect.getargspec(class_.__init__)
defaults_shift = len(init_args.args) - len(init_args.defaults or []) - 1
these = {}
for idx, arg in enumerate(init_args.args[1:]):
attrib_kwargs = {}
if idx >= defaults_shift:
attrib_kwargs['default'] = init_args.defaults[idx - defaults_shift]
these[arg] = attr.ib(**attrib_kwargs)
return attr.s(class_, these=these, init=False, slots=True, **attrs_kwargs) | Like attr.s with slots=True,
but with attributes extracted from __init__ method signature.
slots=True ensures that signature matches what really happens
(we can't define different attributes on self).
It is useful if we still want __init__ for proper type-checking and
do not want to repeat attribute definitions in the class body. | Below is the the instruction that describes the task:
### Input:
Like attr.s with slots=True,
but with attributes extracted from __init__ method signature.
slots=True ensures that signature matches what really happens
(we can't define different attributes on self).
It is useful if we still want __init__ for proper type-checking and
do not want to repeat attribute definitions in the class body.
### Response:
def attrs(class_):
""" Like attr.s with slots=True,
but with attributes extracted from __init__ method signature.
slots=True ensures that signature matches what really happens
(we can't define different attributes on self).
It is useful if we still want __init__ for proper type-checking and
do not want to repeat attribute definitions in the class body.
"""
attrs_kwargs = {}
for method, kw_name in [
('__repr__', 'repr'),
('__eq__', 'cmp'),
('__hash__', 'hash'),
]:
if method in class_.__dict__:
# Allow to redefine a special method (or else attr.s will do it)
attrs_kwargs[kw_name] = False
init_args = inspect.getargspec(class_.__init__)
defaults_shift = len(init_args.args) - len(init_args.defaults or []) - 1
these = {}
for idx, arg in enumerate(init_args.args[1:]):
attrib_kwargs = {}
if idx >= defaults_shift:
attrib_kwargs['default'] = init_args.defaults[idx - defaults_shift]
these[arg] = attr.ib(**attrib_kwargs)
return attr.s(class_, these=these, init=False, slots=True, **attrs_kwargs) |
def check_filepath(self, path, filename):
"""
Check and return the final filepath to settings
Args:
path (str): Directory path where to search for settings file.
filename (str): Filename to use to search for settings file.
Raises:
boussole.exceptions.SettingsBackendError: If determined filepath
does not exists or is a directory.
Returns:
string: Settings file path, joining given path and filename.
"""
settings_path = os.path.join(path, filename)
if not os.path.exists(settings_path) or \
not os.path.isfile(settings_path):
msg = "Unable to find settings file: {}"
raise SettingsBackendError(msg.format(settings_path))
return settings_path | Check and return the final filepath to settings
Args:
path (str): Directory path where to search for settings file.
filename (str): Filename to use to search for settings file.
Raises:
boussole.exceptions.SettingsBackendError: If determined filepath
does not exists or is a directory.
Returns:
string: Settings file path, joining given path and filename. | Below is the the instruction that describes the task:
### Input:
Check and return the final filepath to settings
Args:
path (str): Directory path where to search for settings file.
filename (str): Filename to use to search for settings file.
Raises:
boussole.exceptions.SettingsBackendError: If determined filepath
does not exists or is a directory.
Returns:
string: Settings file path, joining given path and filename.
### Response:
def check_filepath(self, path, filename):
"""
Check and return the final filepath to settings
Args:
path (str): Directory path where to search for settings file.
filename (str): Filename to use to search for settings file.
Raises:
boussole.exceptions.SettingsBackendError: If determined filepath
does not exists or is a directory.
Returns:
string: Settings file path, joining given path and filename.
"""
settings_path = os.path.join(path, filename)
if not os.path.exists(settings_path) or \
not os.path.isfile(settings_path):
msg = "Unable to find settings file: {}"
raise SettingsBackendError(msg.format(settings_path))
return settings_path |
def get_since_until(time_range: Optional[str] = None,
since: Optional[str] = None,
until: Optional[str] = None,
time_shift: Optional[str] = None,
relative_end: Optional[str] = None) -> Tuple[datetime, datetime]:
"""Return `since` and `until` date time tuple from string representations of
time_range, since, until and time_shift.
This functiom supports both reading the keys separately (from `since` and
`until`), as well as the new `time_range` key. Valid formats are:
- ISO 8601
- X days/years/hours/day/year/weeks
- X days/years/hours/day/year/weeks ago
- X days/years/hours/day/year/weeks from now
- freeform
Additionally, for `time_range` (these specify both `since` and `until`):
- Last day
- Last week
- Last month
- Last quarter
- Last year
- No filter
- Last X seconds/minutes/hours/days/weeks/months/years
- Next X seconds/minutes/hours/days/weeks/months/years
"""
separator = ' : '
relative_end = parse_human_datetime(relative_end if relative_end else 'today')
common_time_frames = {
'Last day': (relative_end - relativedelta(days=1), relative_end), # noqa: T400
'Last week': (relative_end - relativedelta(weeks=1), relative_end), # noqa: T400
'Last month': (relative_end - relativedelta(months=1), relative_end), # noqa: E501, T400
'Last quarter': (relative_end - relativedelta(months=3), relative_end), # noqa: E501, T400
'Last year': (relative_end - relativedelta(years=1), relative_end), # noqa: T400
}
if time_range:
if separator in time_range:
since, until = time_range.split(separator, 1)
if since and since not in common_time_frames:
since = add_ago_to_since(since)
since = parse_human_datetime(since)
until = parse_human_datetime(until)
elif time_range in common_time_frames:
since, until = common_time_frames[time_range]
elif time_range == 'No filter':
since = until = None
else:
rel, num, grain = time_range.split()
if rel == 'Last':
since = relative_end - relativedelta(**{grain: int(num)}) # noqa: T400
until = relative_end
else: # rel == 'Next'
since = relative_end
until = relative_end + relativedelta(**{grain: int(num)}) # noqa: T400
else:
since = since or ''
if since:
since = add_ago_to_since(since)
since = parse_human_datetime(since)
until = parse_human_datetime(until) if until else relative_end
if time_shift:
time_shift = parse_human_timedelta(time_shift)
since = since if since is None else (since - time_shift) # noqa: T400
until = until if until is None else (until - time_shift) # noqa: T400
if since and until and since > until:
raise ValueError(_('From date cannot be larger than to date'))
return since, until | Return `since` and `until` date time tuple from string representations of
time_range, since, until and time_shift.
This functiom supports both reading the keys separately (from `since` and
`until`), as well as the new `time_range` key. Valid formats are:
- ISO 8601
- X days/years/hours/day/year/weeks
- X days/years/hours/day/year/weeks ago
- X days/years/hours/day/year/weeks from now
- freeform
Additionally, for `time_range` (these specify both `since` and `until`):
- Last day
- Last week
- Last month
- Last quarter
- Last year
- No filter
- Last X seconds/minutes/hours/days/weeks/months/years
- Next X seconds/minutes/hours/days/weeks/months/years | Below is the the instruction that describes the task:
### Input:
Return `since` and `until` date time tuple from string representations of
time_range, since, until and time_shift.
This functiom supports both reading the keys separately (from `since` and
`until`), as well as the new `time_range` key. Valid formats are:
- ISO 8601
- X days/years/hours/day/year/weeks
- X days/years/hours/day/year/weeks ago
- X days/years/hours/day/year/weeks from now
- freeform
Additionally, for `time_range` (these specify both `since` and `until`):
- Last day
- Last week
- Last month
- Last quarter
- Last year
- No filter
- Last X seconds/minutes/hours/days/weeks/months/years
- Next X seconds/minutes/hours/days/weeks/months/years
### Response:
def get_since_until(time_range: Optional[str] = None,
since: Optional[str] = None,
until: Optional[str] = None,
time_shift: Optional[str] = None,
relative_end: Optional[str] = None) -> Tuple[datetime, datetime]:
"""Return `since` and `until` date time tuple from string representations of
time_range, since, until and time_shift.
This functiom supports both reading the keys separately (from `since` and
`until`), as well as the new `time_range` key. Valid formats are:
- ISO 8601
- X days/years/hours/day/year/weeks
- X days/years/hours/day/year/weeks ago
- X days/years/hours/day/year/weeks from now
- freeform
Additionally, for `time_range` (these specify both `since` and `until`):
- Last day
- Last week
- Last month
- Last quarter
- Last year
- No filter
- Last X seconds/minutes/hours/days/weeks/months/years
- Next X seconds/minutes/hours/days/weeks/months/years
"""
separator = ' : '
relative_end = parse_human_datetime(relative_end if relative_end else 'today')
common_time_frames = {
'Last day': (relative_end - relativedelta(days=1), relative_end), # noqa: T400
'Last week': (relative_end - relativedelta(weeks=1), relative_end), # noqa: T400
'Last month': (relative_end - relativedelta(months=1), relative_end), # noqa: E501, T400
'Last quarter': (relative_end - relativedelta(months=3), relative_end), # noqa: E501, T400
'Last year': (relative_end - relativedelta(years=1), relative_end), # noqa: T400
}
if time_range:
if separator in time_range:
since, until = time_range.split(separator, 1)
if since and since not in common_time_frames:
since = add_ago_to_since(since)
since = parse_human_datetime(since)
until = parse_human_datetime(until)
elif time_range in common_time_frames:
since, until = common_time_frames[time_range]
elif time_range == 'No filter':
since = until = None
else:
rel, num, grain = time_range.split()
if rel == 'Last':
since = relative_end - relativedelta(**{grain: int(num)}) # noqa: T400
until = relative_end
else: # rel == 'Next'
since = relative_end
until = relative_end + relativedelta(**{grain: int(num)}) # noqa: T400
else:
since = since or ''
if since:
since = add_ago_to_since(since)
since = parse_human_datetime(since)
until = parse_human_datetime(until) if until else relative_end
if time_shift:
time_shift = parse_human_timedelta(time_shift)
since = since if since is None else (since - time_shift) # noqa: T400
until = until if until is None else (until - time_shift) # noqa: T400
if since and until and since > until:
raise ValueError(_('From date cannot be larger than to date'))
return since, until |
def _fromiter(it, dtype, count, progress, log):
"""Utility function to load an array from an iterator."""
if progress > 0:
it = _iter_withprogress(it, progress, log)
if count is not None:
a = np.fromiter(it, dtype=dtype, count=count)
else:
a = np.fromiter(it, dtype=dtype)
return a | Utility function to load an array from an iterator. | Below is the the instruction that describes the task:
### Input:
Utility function to load an array from an iterator.
### Response:
def _fromiter(it, dtype, count, progress, log):
"""Utility function to load an array from an iterator."""
if progress > 0:
it = _iter_withprogress(it, progress, log)
if count is not None:
a = np.fromiter(it, dtype=dtype, count=count)
else:
a = np.fromiter(it, dtype=dtype)
return a |
def ssml_emphasis(self, words, level=None, **kwargs):
"""
Create a <Emphasis> element
:param words: Words to emphasize
:param level: Specify the degree of emphasis
:param kwargs: additional attributes
:returns: <Emphasis> element
"""
return self.nest(SsmlEmphasis(words, level=level, **kwargs)) | Create a <Emphasis> element
:param words: Words to emphasize
:param level: Specify the degree of emphasis
:param kwargs: additional attributes
:returns: <Emphasis> element | Below is the the instruction that describes the task:
### Input:
Create a <Emphasis> element
:param words: Words to emphasize
:param level: Specify the degree of emphasis
:param kwargs: additional attributes
:returns: <Emphasis> element
### Response:
def ssml_emphasis(self, words, level=None, **kwargs):
"""
Create a <Emphasis> element
:param words: Words to emphasize
:param level: Specify the degree of emphasis
:param kwargs: additional attributes
:returns: <Emphasis> element
"""
return self.nest(SsmlEmphasis(words, level=level, **kwargs)) |
def _to_bits(nqbits, ncbits=0, func=None):
"""Convert gate arguments to [qu|cl]bits from integers, slices, ranges, etc.
For example circuit.h(0) -> circuit.h(QuantumRegister(2)[0]) """
if func is None:
return functools.partial(_to_bits, nqbits, ncbits)
@functools.wraps(func)
def wrapper(self, *args):
qbits = self.qubits()
cbits = self.clbits()
nparams = len(args) - nqbits - ncbits
params = args[:nparams]
qb_args = args[nparams:nparams + nqbits]
cl_args = args[nparams + nqbits:]
args = list(params) + _convert_to_bits(qb_args, qbits) + _convert_to_bits(cl_args, cbits)
return func(self, *args)
return wrapper | Convert gate arguments to [qu|cl]bits from integers, slices, ranges, etc.
For example circuit.h(0) -> circuit.h(QuantumRegister(2)[0]) | Below is the the instruction that describes the task:
### Input:
Convert gate arguments to [qu|cl]bits from integers, slices, ranges, etc.
For example circuit.h(0) -> circuit.h(QuantumRegister(2)[0])
### Response:
def _to_bits(nqbits, ncbits=0, func=None):
"""Convert gate arguments to [qu|cl]bits from integers, slices, ranges, etc.
For example circuit.h(0) -> circuit.h(QuantumRegister(2)[0]) """
if func is None:
return functools.partial(_to_bits, nqbits, ncbits)
@functools.wraps(func)
def wrapper(self, *args):
qbits = self.qubits()
cbits = self.clbits()
nparams = len(args) - nqbits - ncbits
params = args[:nparams]
qb_args = args[nparams:nparams + nqbits]
cl_args = args[nparams + nqbits:]
args = list(params) + _convert_to_bits(qb_args, qbits) + _convert_to_bits(cl_args, cbits)
return func(self, *args)
return wrapper |
async def parseResults(self, api_data):
""" See CoverSource.parseResults. """
results = []
# parse HTML and get results
parser = lxml.etree.HTMLParser()
html = lxml.etree.XML(api_data.decode("latin-1"), parser)
for rank, result in enumerate(__class__.RESULTS_SELECTOR(html), 1):
# extract url
metadata_div = result.find("div")
metadata_json = lxml.etree.tostring(metadata_div, encoding="unicode", method="text")
metadata_json = json.loads(metadata_json)
google_url = result.find("a").get("href")
if google_url is not None:
query = urllib.parse.urlsplit(google_url).query
else:
query = None
if not query:
img_url = metadata_json["ou"]
else:
query = urllib.parse.parse_qs(query)
img_url = query["imgurl"][0]
# extract format
check_metadata = CoverImageMetadata.NONE
format = metadata_json["ity"].lower()
try:
format = SUPPORTED_IMG_FORMATS[format]
except KeyError:
# format could not be identified or is unknown
format = None
check_metadata = CoverImageMetadata.FORMAT
# extract size
if not query:
size = metadata_json["ow"], metadata_json["oh"]
else:
size = tuple(map(int, (query["w"][0], query["h"][0])))
# extract thumbnail url
thumbnail_url = metadata_json["tu"]
# result
results.append(GoogleImagesCoverSourceResult(img_url,
size,
format,
thumbnail_url=thumbnail_url,
source=self,
rank=rank,
check_metadata=check_metadata))
return results | See CoverSource.parseResults. | Below is the the instruction that describes the task:
### Input:
See CoverSource.parseResults.
### Response:
async def parseResults(self, api_data):
""" See CoverSource.parseResults. """
results = []
# parse HTML and get results
parser = lxml.etree.HTMLParser()
html = lxml.etree.XML(api_data.decode("latin-1"), parser)
for rank, result in enumerate(__class__.RESULTS_SELECTOR(html), 1):
# extract url
metadata_div = result.find("div")
metadata_json = lxml.etree.tostring(metadata_div, encoding="unicode", method="text")
metadata_json = json.loads(metadata_json)
google_url = result.find("a").get("href")
if google_url is not None:
query = urllib.parse.urlsplit(google_url).query
else:
query = None
if not query:
img_url = metadata_json["ou"]
else:
query = urllib.parse.parse_qs(query)
img_url = query["imgurl"][0]
# extract format
check_metadata = CoverImageMetadata.NONE
format = metadata_json["ity"].lower()
try:
format = SUPPORTED_IMG_FORMATS[format]
except KeyError:
# format could not be identified or is unknown
format = None
check_metadata = CoverImageMetadata.FORMAT
# extract size
if not query:
size = metadata_json["ow"], metadata_json["oh"]
else:
size = tuple(map(int, (query["w"][0], query["h"][0])))
# extract thumbnail url
thumbnail_url = metadata_json["tu"]
# result
results.append(GoogleImagesCoverSourceResult(img_url,
size,
format,
thumbnail_url=thumbnail_url,
source=self,
rank=rank,
check_metadata=check_metadata))
return results |
def doorient(self):
"""
NOTE: we need to retrieve values in case no modifications are done.
(since we'd get a closed h5py handle)
"""
assert self.cal1Dfn.is_file(
), 'please specify filename for each camera under [cam]/cal1Dname: in .ini file {}'.format(self.cal1Dfn)
with h5py.File(self.cal1Dfn, 'r') as f:
az = f['az'][()]
el = f['el'][()]
ra = f['ra'][()]
dec = f['dec'][()]
assert az.ndim == el.ndim == 2
assert az.shape == el.shape
if self.transpose:
logging.debug(
'tranposing cam #{} az/el/ra/dec data. '.format(self.name))
az = az.T
el = el.T
ra = ra.T
dec = dec.T
if self.fliplr:
logging.debug(
'flipping horizontally cam #{} az/el/ra/dec data.'.format(self.name))
az = np.fliplr(az)
el = np.fliplr(el)
ra = np.fliplr(ra)
dec = np.fliplr(dec)
if self.flipud:
logging.debug(
'flipping vertically cam #{} az/el/ra/dec data.'.format(self.name))
az = np.flipud(az)
el = np.flipud(el)
ra = np.flipud(ra)
dec = np.flipud(dec)
if self.rotccw != 0:
logging.debug(
'rotating cam #{} az/el/ra/dec data.'.format(self.name))
az = np.rot90(az, self.rotccw)
el = np.rot90(el, self.rotccw)
ra = np.rot90(ra, self.rotccw)
dec = np.rot90(dec, self.rotccw)
self.az = az
self.el = el
self.ra = ra
self.dec = dec | NOTE: we need to retrieve values in case no modifications are done.
(since we'd get a closed h5py handle) | Below is the the instruction that describes the task:
### Input:
NOTE: we need to retrieve values in case no modifications are done.
(since we'd get a closed h5py handle)
### Response:
def doorient(self):
"""
NOTE: we need to retrieve values in case no modifications are done.
(since we'd get a closed h5py handle)
"""
assert self.cal1Dfn.is_file(
), 'please specify filename for each camera under [cam]/cal1Dname: in .ini file {}'.format(self.cal1Dfn)
with h5py.File(self.cal1Dfn, 'r') as f:
az = f['az'][()]
el = f['el'][()]
ra = f['ra'][()]
dec = f['dec'][()]
assert az.ndim == el.ndim == 2
assert az.shape == el.shape
if self.transpose:
logging.debug(
'tranposing cam #{} az/el/ra/dec data. '.format(self.name))
az = az.T
el = el.T
ra = ra.T
dec = dec.T
if self.fliplr:
logging.debug(
'flipping horizontally cam #{} az/el/ra/dec data.'.format(self.name))
az = np.fliplr(az)
el = np.fliplr(el)
ra = np.fliplr(ra)
dec = np.fliplr(dec)
if self.flipud:
logging.debug(
'flipping vertically cam #{} az/el/ra/dec data.'.format(self.name))
az = np.flipud(az)
el = np.flipud(el)
ra = np.flipud(ra)
dec = np.flipud(dec)
if self.rotccw != 0:
logging.debug(
'rotating cam #{} az/el/ra/dec data.'.format(self.name))
az = np.rot90(az, self.rotccw)
el = np.rot90(el, self.rotccw)
ra = np.rot90(ra, self.rotccw)
dec = np.rot90(dec, self.rotccw)
self.az = az
self.el = el
self.ra = ra
self.dec = dec |
def _ParseBinaryDataAsString(self, parser_mediator, binary_data_value):
"""Parses a binary data value as string
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
binary_data_value (bytes): binary data value
(CSSM_DB_ATTRIBUTE_FORMAT_BLOB)
Returns:
str: binary data value formatted as a string or None if no string could
be extracted or binary data value is None (NULL).
"""
if not binary_data_value:
return None
try:
return binary_data_value.decode('utf-8')
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning(
'invalid binary data string value: {0:s}'.format(
repr(binary_data_value)))
return None | Parses a binary data value as string
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
binary_data_value (bytes): binary data value
(CSSM_DB_ATTRIBUTE_FORMAT_BLOB)
Returns:
str: binary data value formatted as a string or None if no string could
be extracted or binary data value is None (NULL). | Below is the the instruction that describes the task:
### Input:
Parses a binary data value as string
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
binary_data_value (bytes): binary data value
(CSSM_DB_ATTRIBUTE_FORMAT_BLOB)
Returns:
str: binary data value formatted as a string or None if no string could
be extracted or binary data value is None (NULL).
### Response:
def _ParseBinaryDataAsString(self, parser_mediator, binary_data_value):
"""Parses a binary data value as string
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
binary_data_value (bytes): binary data value
(CSSM_DB_ATTRIBUTE_FORMAT_BLOB)
Returns:
str: binary data value formatted as a string or None if no string could
be extracted or binary data value is None (NULL).
"""
if not binary_data_value:
return None
try:
return binary_data_value.decode('utf-8')
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning(
'invalid binary data string value: {0:s}'.format(
repr(binary_data_value)))
return None |
def union(self, key, *others):
"""Return a new set with elements from the set and all others."""
if not isinstance(key, str):
raise ValueError("String expected.")
self.db.sunionstore(key, [self.key] + [o.key for o in others])
return Set(key) | Return a new set with elements from the set and all others. | Below is the the instruction that describes the task:
### Input:
Return a new set with elements from the set and all others.
### Response:
def union(self, key, *others):
"""Return a new set with elements from the set and all others."""
if not isinstance(key, str):
raise ValueError("String expected.")
self.db.sunionstore(key, [self.key] + [o.key for o in others])
return Set(key) |
def _parse_nodes_section(f, current_section, nodes):
"""Parse TSPLIB NODE_COORD_SECTION or DEMAND_SECTION from file descript f
Returns a dict containing the node as key
"""
section = {}
dimensions = None
if current_section == 'NODE_COORD_SECTION':
dimensions = 3 # i: (i, j)
elif current_section == 'DEMAND_SECTION':
dimensions = 2 # i: q
else:
raise ParseException('Invalid section {}'.format(current_section))
n = 0
for line in f:
line = strip(line)
# Check dimensions
definitions = re.split(r'\s*', line)
if len(definitions) != dimensions:
raise ParseException('Invalid dimensions from section {}. Expected: {}'.format(current_section, dimensions))
node = int(definitions[0])
values = [int(v) for v in definitions[1:]]
if len(values) == 1:
values = values[0]
section[node] = values
n = n + 1
if n == nodes:
break
# Assert all nodes were read
if n != nodes:
raise ParseException('Missing {} nodes definition from section {}'.format(nodes - n, current_section))
return section | Parse TSPLIB NODE_COORD_SECTION or DEMAND_SECTION from file descript f
Returns a dict containing the node as key | Below is the the instruction that describes the task:
### Input:
Parse TSPLIB NODE_COORD_SECTION or DEMAND_SECTION from file descript f
Returns a dict containing the node as key
### Response:
def _parse_nodes_section(f, current_section, nodes):
"""Parse TSPLIB NODE_COORD_SECTION or DEMAND_SECTION from file descript f
Returns a dict containing the node as key
"""
section = {}
dimensions = None
if current_section == 'NODE_COORD_SECTION':
dimensions = 3 # i: (i, j)
elif current_section == 'DEMAND_SECTION':
dimensions = 2 # i: q
else:
raise ParseException('Invalid section {}'.format(current_section))
n = 0
for line in f:
line = strip(line)
# Check dimensions
definitions = re.split(r'\s*', line)
if len(definitions) != dimensions:
raise ParseException('Invalid dimensions from section {}. Expected: {}'.format(current_section, dimensions))
node = int(definitions[0])
values = [int(v) for v in definitions[1:]]
if len(values) == 1:
values = values[0]
section[node] = values
n = n + 1
if n == nodes:
break
# Assert all nodes were read
if n != nodes:
raise ParseException('Missing {} nodes definition from section {}'.format(nodes - n, current_section))
return section |
def simple_merge(kls, skeletons):
"""
Simple concatenation of skeletons into one object
without adding edges between them.
"""
if len(skeletons) == 0:
return PrecomputedSkeleton()
if type(skeletons[0]) is np.ndarray:
skeletons = [ skeletons ]
ct = 0
edges = []
for skel in skeletons:
edge = skel.edges + ct
edges.append(edge)
ct += skel.vertices.shape[0]
return PrecomputedSkeleton(
vertices=np.concatenate([ skel.vertices for skel in skeletons ], axis=0),
edges=np.concatenate(edges, axis=0),
radii=np.concatenate([ skel.radii for skel in skeletons ], axis=0),
vertex_types=np.concatenate([ skel.vertex_types for skel in skeletons ], axis=0),
segid=skeletons[0].id,
) | Simple concatenation of skeletons into one object
without adding edges between them. | Below is the the instruction that describes the task:
### Input:
Simple concatenation of skeletons into one object
without adding edges between them.
### Response:
def simple_merge(kls, skeletons):
"""
Simple concatenation of skeletons into one object
without adding edges between them.
"""
if len(skeletons) == 0:
return PrecomputedSkeleton()
if type(skeletons[0]) is np.ndarray:
skeletons = [ skeletons ]
ct = 0
edges = []
for skel in skeletons:
edge = skel.edges + ct
edges.append(edge)
ct += skel.vertices.shape[0]
return PrecomputedSkeleton(
vertices=np.concatenate([ skel.vertices for skel in skeletons ], axis=0),
edges=np.concatenate(edges, axis=0),
radii=np.concatenate([ skel.radii for skel in skeletons ], axis=0),
vertex_types=np.concatenate([ skel.vertex_types for skel in skeletons ], axis=0),
segid=skeletons[0].id,
) |
def _get_dataframe(self):
"""
Load dataframe based on specified connection
:return:
"""
if self.source_conn is None: # Use local file
return ge.read_csv(self.dataset_name, **self.dataset_params)
if isinstance(self.source_conn, S3Hook):
hook = ExpectationS3CsvHook(aws_conn_id=self.source_conn_id)
return hook.get_ge_df(self.dataset_name, self.source_bucket_name, **self.dataset_params)
if isinstance(self.source_conn, DbApiHook):
hook = ExpectationMySQLHook(mysql_conn_id=self.source_conn_id)
return hook.get_ge_df(self.dataset_name, **self.dataset_params) | Load dataframe based on specified connection
:return: | Below is the the instruction that describes the task:
### Input:
Load dataframe based on specified connection
:return:
### Response:
def _get_dataframe(self):
"""
Load dataframe based on specified connection
:return:
"""
if self.source_conn is None: # Use local file
return ge.read_csv(self.dataset_name, **self.dataset_params)
if isinstance(self.source_conn, S3Hook):
hook = ExpectationS3CsvHook(aws_conn_id=self.source_conn_id)
return hook.get_ge_df(self.dataset_name, self.source_bucket_name, **self.dataset_params)
if isinstance(self.source_conn, DbApiHook):
hook = ExpectationMySQLHook(mysql_conn_id=self.source_conn_id)
return hook.get_ge_df(self.dataset_name, **self.dataset_params) |
def get_nodesitemtypeinsertion(cls, itemgroup, indent) -> str:
"""Return a string defining the required types for the given
combination of an exchange item group and |Node| objects.
>>> from hydpy.auxs.xmltools import XSDWriter
>>> print(XSDWriter.get_nodesitemtypeinsertion(
... 'setitems', 1)) # doctest: +ELLIPSIS
<complexType name="nodes_setitemsType">
<sequence>
<element ref="hpcb:selections"
minOccurs="0"/>
<element ref="hpcb:devices"
minOccurs="0"/>
<element name="sim"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
<element name="obs"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
<element name="sim.series"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
<element name="obs.series"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
</sequence>
</complexType>
<BLANKLINE>
"""
blanks = ' ' * (indent * 4)
subs = [
f'{blanks}<complexType name="nodes_{itemgroup}Type">',
f'{blanks} <sequence>',
f'{blanks} <element ref="hpcb:selections"',
f'{blanks} minOccurs="0"/>',
f'{blanks} <element ref="hpcb:devices"',
f'{blanks} minOccurs="0"/>']
type_ = 'getitemType' if itemgroup == 'getitems' else 'setitemType'
for name in ('sim', 'obs', 'sim.series', 'obs.series'):
subs.extend([
f'{blanks} <element name="{name}"',
f'{blanks} type="hpcb:{type_}"',
f'{blanks} minOccurs="0"',
f'{blanks} maxOccurs="unbounded"/>'])
subs.extend([
f'{blanks} </sequence>',
f'{blanks}</complexType>',
f''])
return '\n'.join(subs) | Return a string defining the required types for the given
combination of an exchange item group and |Node| objects.
>>> from hydpy.auxs.xmltools import XSDWriter
>>> print(XSDWriter.get_nodesitemtypeinsertion(
... 'setitems', 1)) # doctest: +ELLIPSIS
<complexType name="nodes_setitemsType">
<sequence>
<element ref="hpcb:selections"
minOccurs="0"/>
<element ref="hpcb:devices"
minOccurs="0"/>
<element name="sim"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
<element name="obs"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
<element name="sim.series"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
<element name="obs.series"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
</sequence>
</complexType>
<BLANKLINE> | Below is the the instruction that describes the task:
### Input:
Return a string defining the required types for the given
combination of an exchange item group and |Node| objects.
>>> from hydpy.auxs.xmltools import XSDWriter
>>> print(XSDWriter.get_nodesitemtypeinsertion(
... 'setitems', 1)) # doctest: +ELLIPSIS
<complexType name="nodes_setitemsType">
<sequence>
<element ref="hpcb:selections"
minOccurs="0"/>
<element ref="hpcb:devices"
minOccurs="0"/>
<element name="sim"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
<element name="obs"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
<element name="sim.series"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
<element name="obs.series"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
</sequence>
</complexType>
<BLANKLINE>
### Response:
def get_nodesitemtypeinsertion(cls, itemgroup, indent) -> str:
"""Return a string defining the required types for the given
combination of an exchange item group and |Node| objects.
>>> from hydpy.auxs.xmltools import XSDWriter
>>> print(XSDWriter.get_nodesitemtypeinsertion(
... 'setitems', 1)) # doctest: +ELLIPSIS
<complexType name="nodes_setitemsType">
<sequence>
<element ref="hpcb:selections"
minOccurs="0"/>
<element ref="hpcb:devices"
minOccurs="0"/>
<element name="sim"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
<element name="obs"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
<element name="sim.series"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
<element name="obs.series"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
</sequence>
</complexType>
<BLANKLINE>
"""
blanks = ' ' * (indent * 4)
subs = [
f'{blanks}<complexType name="nodes_{itemgroup}Type">',
f'{blanks} <sequence>',
f'{blanks} <element ref="hpcb:selections"',
f'{blanks} minOccurs="0"/>',
f'{blanks} <element ref="hpcb:devices"',
f'{blanks} minOccurs="0"/>']
type_ = 'getitemType' if itemgroup == 'getitems' else 'setitemType'
for name in ('sim', 'obs', 'sim.series', 'obs.series'):
subs.extend([
f'{blanks} <element name="{name}"',
f'{blanks} type="hpcb:{type_}"',
f'{blanks} minOccurs="0"',
f'{blanks} maxOccurs="unbounded"/>'])
subs.extend([
f'{blanks} </sequence>',
f'{blanks}</complexType>',
f''])
return '\n'.join(subs) |
def fcmp_ordered(self, cmpop, lhs, rhs, name='', flags=[]):
"""
Floating-point ordered comparison:
name = lhs <cmpop> rhs
where cmpop can be '==', '!=', '<', '<=', '>', '>=', 'ord', 'uno'
"""
if cmpop in _CMP_MAP:
op = 'o' + _CMP_MAP[cmpop]
else:
op = cmpop
instr = instructions.FCMPInstr(self.block, op, lhs, rhs, name=name, flags=flags)
self._insert(instr)
return instr | Floating-point ordered comparison:
name = lhs <cmpop> rhs
where cmpop can be '==', '!=', '<', '<=', '>', '>=', 'ord', 'uno' | Below is the the instruction that describes the task:
### Input:
Floating-point ordered comparison:
name = lhs <cmpop> rhs
where cmpop can be '==', '!=', '<', '<=', '>', '>=', 'ord', 'uno'
### Response:
def fcmp_ordered(self, cmpop, lhs, rhs, name='', flags=[]):
"""
Floating-point ordered comparison:
name = lhs <cmpop> rhs
where cmpop can be '==', '!=', '<', '<=', '>', '>=', 'ord', 'uno'
"""
if cmpop in _CMP_MAP:
op = 'o' + _CMP_MAP[cmpop]
else:
op = cmpop
instr = instructions.FCMPInstr(self.block, op, lhs, rhs, name=name, flags=flags)
self._insert(instr)
return instr |
def _reorder_lines(lines):
"""
Reorder lines so that the distance from the end of one to the beginning of
the next is minimised.
"""
x = 0
y = 0
new_lines = []
# treat the list of lines as a stack, off which we keep popping the best
# one to add next.
while lines:
# looping over all the lines like this isn't terribly efficient, but
# in local tests seems to handle a few thousand lines without a
# problem.
min_dist = None
min_i = None
for i, line in enumerate(lines):
moveto, _, _ = line
dist = abs(moveto.x - x) + abs(moveto.y - y)
if min_dist is None or dist < min_dist:
min_dist = dist
min_i = i
assert min_i is not None
line = lines.pop(min_i)
_, endsat, _ = line
x = endsat.x
y = endsat.y
new_lines.append(line)
return new_lines | Reorder lines so that the distance from the end of one to the beginning of
the next is minimised. | Below is the the instruction that describes the task:
### Input:
Reorder lines so that the distance from the end of one to the beginning of
the next is minimised.
### Response:
def _reorder_lines(lines):
"""
Reorder lines so that the distance from the end of one to the beginning of
the next is minimised.
"""
x = 0
y = 0
new_lines = []
# treat the list of lines as a stack, off which we keep popping the best
# one to add next.
while lines:
# looping over all the lines like this isn't terribly efficient, but
# in local tests seems to handle a few thousand lines without a
# problem.
min_dist = None
min_i = None
for i, line in enumerate(lines):
moveto, _, _ = line
dist = abs(moveto.x - x) + abs(moveto.y - y)
if min_dist is None or dist < min_dist:
min_dist = dist
min_i = i
assert min_i is not None
line = lines.pop(min_i)
_, endsat, _ = line
x = endsat.x
y = endsat.y
new_lines.append(line)
return new_lines |
def _generate_data_key(self, algorithm: AlgorithmSuite, encryption_context: Dict[Text, Text]) -> DataKey:
"""Perform the provider-specific data key generation task.
:param algorithm: Algorithm on which to base data key
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param dict encryption_context: Encryption context to use in encryption
:returns: Generated data key
:rtype: aws_encryption_sdk.structures.DataKey
"""
data_key = b"".join([chr(i).encode("utf-8") for i in range(1, algorithm.data_key_len + 1)])
return DataKey(key_provider=self.key_provider, data_key=data_key, encrypted_data_key=self._encrypted_data_key) | Perform the provider-specific data key generation task.
:param algorithm: Algorithm on which to base data key
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param dict encryption_context: Encryption context to use in encryption
:returns: Generated data key
:rtype: aws_encryption_sdk.structures.DataKey | Below is the the instruction that describes the task:
### Input:
Perform the provider-specific data key generation task.
:param algorithm: Algorithm on which to base data key
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param dict encryption_context: Encryption context to use in encryption
:returns: Generated data key
:rtype: aws_encryption_sdk.structures.DataKey
### Response:
def _generate_data_key(self, algorithm: AlgorithmSuite, encryption_context: Dict[Text, Text]) -> DataKey:
"""Perform the provider-specific data key generation task.
:param algorithm: Algorithm on which to base data key
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param dict encryption_context: Encryption context to use in encryption
:returns: Generated data key
:rtype: aws_encryption_sdk.structures.DataKey
"""
data_key = b"".join([chr(i).encode("utf-8") for i in range(1, algorithm.data_key_len + 1)])
return DataKey(key_provider=self.key_provider, data_key=data_key, encrypted_data_key=self._encrypted_data_key) |
def assertFileEncodingNotEqual(self, filename, encoding, msg=None):
'''Fail if ``filename`` is encoded with the given ``encoding``
as determined by the '!=' operator.
Parameters
----------
filename : str, bytes, file-like
encoding : str, bytes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fencoding = self._get_file_encoding(filename)
fname = self._get_file_name(filename)
standardMsg = '%s is %s encoded' % (fname, encoding)
self.assertNotEqual(fencoding.lower(),
encoding.lower(),
self._formatMessage(msg, standardMsg)) | Fail if ``filename`` is encoded with the given ``encoding``
as determined by the '!=' operator.
Parameters
----------
filename : str, bytes, file-like
encoding : str, bytes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like. | Below is the the instruction that describes the task:
### Input:
Fail if ``filename`` is encoded with the given ``encoding``
as determined by the '!=' operator.
Parameters
----------
filename : str, bytes, file-like
encoding : str, bytes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
### Response:
def assertFileEncodingNotEqual(self, filename, encoding, msg=None):
'''Fail if ``filename`` is encoded with the given ``encoding``
as determined by the '!=' operator.
Parameters
----------
filename : str, bytes, file-like
encoding : str, bytes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fencoding = self._get_file_encoding(filename)
fname = self._get_file_name(filename)
standardMsg = '%s is %s encoded' % (fname, encoding)
self.assertNotEqual(fencoding.lower(),
encoding.lower(),
self._formatMessage(msg, standardMsg)) |
def amount_object_to_dict(self, amount):
"""Return the dictionary representation of an Amount object.
Amount object must have amount and currency properties and as_tuple method which will return (currency, amount)
and as_quantized method to quantize amount property.
:param amount: instance of Amount object
:return: dict with amount and currency keys.
"""
currency, amount = (
amount.as_quantized(digits=2).as_tuple()
if not isinstance(amount, dict)
else (amount["currency"], amount["amount"])
)
if currency not in self.currencies:
raise ValueError(self.err_unknown_currency.format(currency=currency))
return {
"amount": str(amount),
"currency": str(currency),
} | Return the dictionary representation of an Amount object.
Amount object must have amount and currency properties and as_tuple method which will return (currency, amount)
and as_quantized method to quantize amount property.
:param amount: instance of Amount object
:return: dict with amount and currency keys. | Below is the the instruction that describes the task:
### Input:
Return the dictionary representation of an Amount object.
Amount object must have amount and currency properties and as_tuple method which will return (currency, amount)
and as_quantized method to quantize amount property.
:param amount: instance of Amount object
:return: dict with amount and currency keys.
### Response:
def amount_object_to_dict(self, amount):
"""Return the dictionary representation of an Amount object.
Amount object must have amount and currency properties and as_tuple method which will return (currency, amount)
and as_quantized method to quantize amount property.
:param amount: instance of Amount object
:return: dict with amount and currency keys.
"""
currency, amount = (
amount.as_quantized(digits=2).as_tuple()
if not isinstance(amount, dict)
else (amount["currency"], amount["amount"])
)
if currency not in self.currencies:
raise ValueError(self.err_unknown_currency.format(currency=currency))
return {
"amount": str(amount),
"currency": str(currency),
} |
def get_parser(parser):
"""
Grabs the parser.
args:
parser: The parser
"""
parser.description = textwrap.dedent("""
Segment the .po files in LOCALE(s) based on the segmenting rules in
config.yaml.
Note that segmenting is *not* idempotent: it modifies the input file, so
be careful that you don't run it twice on the same file.
""".strip())
parser.add_argument("locale", nargs="+", help="a locale to segment") | Grabs the parser.
args:
parser: The parser | Below is the the instruction that describes the task:
### Input:
Grabs the parser.
args:
parser: The parser
### Response:
def get_parser(parser):
"""
Grabs the parser.
args:
parser: The parser
"""
parser.description = textwrap.dedent("""
Segment the .po files in LOCALE(s) based on the segmenting rules in
config.yaml.
Note that segmenting is *not* idempotent: it modifies the input file, so
be careful that you don't run it twice on the same file.
""".strip())
parser.add_argument("locale", nargs="+", help="a locale to segment") |
def process_subprotocol(
headers: Headers, available_subprotocols: Optional[Sequence[Subprotocol]]
) -> Optional[Subprotocol]:
"""
Handle the Sec-WebSocket-Protocol HTTP response header.
Check that it contains exactly one supported subprotocol.
Return the selected subprotocol.
"""
subprotocol: Optional[Subprotocol] = None
header_values = headers.get_all("Sec-WebSocket-Protocol")
if header_values:
if available_subprotocols is None:
raise InvalidHandshake("No subprotocols supported")
parsed_header_values: Sequence[Subprotocol] = sum(
[parse_subprotocol(header_value) for header_value in header_values], []
)
if len(parsed_header_values) > 1:
subprotocols = ", ".join(parsed_header_values)
raise InvalidHandshake(f"Multiple subprotocols: {subprotocols}")
subprotocol = parsed_header_values[0]
if subprotocol not in available_subprotocols:
raise NegotiationError(f"Unsupported subprotocol: {subprotocol}")
return subprotocol | Handle the Sec-WebSocket-Protocol HTTP response header.
Check that it contains exactly one supported subprotocol.
Return the selected subprotocol. | Below is the the instruction that describes the task:
### Input:
Handle the Sec-WebSocket-Protocol HTTP response header.
Check that it contains exactly one supported subprotocol.
Return the selected subprotocol.
### Response:
def process_subprotocol(
headers: Headers, available_subprotocols: Optional[Sequence[Subprotocol]]
) -> Optional[Subprotocol]:
"""
Handle the Sec-WebSocket-Protocol HTTP response header.
Check that it contains exactly one supported subprotocol.
Return the selected subprotocol.
"""
subprotocol: Optional[Subprotocol] = None
header_values = headers.get_all("Sec-WebSocket-Protocol")
if header_values:
if available_subprotocols is None:
raise InvalidHandshake("No subprotocols supported")
parsed_header_values: Sequence[Subprotocol] = sum(
[parse_subprotocol(header_value) for header_value in header_values], []
)
if len(parsed_header_values) > 1:
subprotocols = ", ".join(parsed_header_values)
raise InvalidHandshake(f"Multiple subprotocols: {subprotocols}")
subprotocol = parsed_header_values[0]
if subprotocol not in available_subprotocols:
raise NegotiationError(f"Unsupported subprotocol: {subprotocol}")
return subprotocol |
def _load_keyring_path(config):
"load the keyring-path option (if present)"
try:
path = config.get("backend", "keyring-path").strip()
sys.path.insert(0, path)
except (configparser.NoOptionError, configparser.NoSectionError):
pass | load the keyring-path option (if present) | Below is the the instruction that describes the task:
### Input:
load the keyring-path option (if present)
### Response:
def _load_keyring_path(config):
"load the keyring-path option (if present)"
try:
path = config.get("backend", "keyring-path").strip()
sys.path.insert(0, path)
except (configparser.NoOptionError, configparser.NoSectionError):
pass |
def get_processing_block(block_id):
"""Return the Processing Block Configuration for the specified ID"""
identifiers = block_id.split(':')
scheduling_block_id = identifiers[0]
scheduling_block_config = get_scheduling_block(scheduling_block_id)
for processing_block in scheduling_block_config['processing_blocks']:
if processing_block['id'] == block_id:
return processing_block
raise KeyError('Unknown Processing Block id: {} ({})'
.format(identifiers[-1], block_id)) | Return the Processing Block Configuration for the specified ID | Below is the the instruction that describes the task:
### Input:
Return the Processing Block Configuration for the specified ID
### Response:
def get_processing_block(block_id):
"""Return the Processing Block Configuration for the specified ID"""
identifiers = block_id.split(':')
scheduling_block_id = identifiers[0]
scheduling_block_config = get_scheduling_block(scheduling_block_id)
for processing_block in scheduling_block_config['processing_blocks']:
if processing_block['id'] == block_id:
return processing_block
raise KeyError('Unknown Processing Block id: {} ({})'
.format(identifiers[-1], block_id)) |
def create_temp_directory(self, **mkdtemp_kwargs) -> str:
"""
Creates a temp directory.
:param mkdtemp_kwargs: named arguments to be passed to `tempfile.mkdtemp`
:return: the location of the temp directory
"""
kwargs = {**self.default_mkdtemp_kwargs, **mkdtemp_kwargs}
location = tempfile.mkdtemp(**kwargs)
self._temp_directories.add(location)
return location | Creates a temp directory.
:param mkdtemp_kwargs: named arguments to be passed to `tempfile.mkdtemp`
:return: the location of the temp directory | Below is the the instruction that describes the task:
### Input:
Creates a temp directory.
:param mkdtemp_kwargs: named arguments to be passed to `tempfile.mkdtemp`
:return: the location of the temp directory
### Response:
def create_temp_directory(self, **mkdtemp_kwargs) -> str:
"""
Creates a temp directory.
:param mkdtemp_kwargs: named arguments to be passed to `tempfile.mkdtemp`
:return: the location of the temp directory
"""
kwargs = {**self.default_mkdtemp_kwargs, **mkdtemp_kwargs}
location = tempfile.mkdtemp(**kwargs)
self._temp_directories.add(location)
return location |
def disable_restricted(self, ):
"""Disable the restricted buttons
:returns: None
:rtype: None
:raises: None
"""
todisable = [(self.reftrack.duplicate, self.duplicate_tb),
(self.reftrack.delete, self.delete_tb),
(self.reftrack.reference, self.reference_tb),
(self.reftrack.replace, self.replace_tb),]
for action, btn in todisable:
res = self.reftrack.is_restricted(action)
btn.setDisabled(res) | Disable the restricted buttons
:returns: None
:rtype: None
:raises: None | Below is the the instruction that describes the task:
### Input:
Disable the restricted buttons
:returns: None
:rtype: None
:raises: None
### Response:
def disable_restricted(self, ):
"""Disable the restricted buttons
:returns: None
:rtype: None
:raises: None
"""
todisable = [(self.reftrack.duplicate, self.duplicate_tb),
(self.reftrack.delete, self.delete_tb),
(self.reftrack.reference, self.reference_tb),
(self.reftrack.replace, self.replace_tb),]
for action, btn in todisable:
res = self.reftrack.is_restricted(action)
btn.setDisabled(res) |
def _expectation(p, mean1, none1, mean2, none2, nghp=None):
"""
Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
- m1(.) :: Linear mean function
- m2(.) :: Identity mean function
:return: NxQxD
"""
with params_as_tensors_for(mean1):
N = tf.shape(p.mu)[0]
e_xxt = p.cov + (p.mu[:, :, None] * p.mu[:, None, :]) # NxDxD
e_A_xxt = tf.matmul(tf.tile(mean1.A[None, ...], (N, 1, 1)), e_xxt, transpose_a=True) # NxQxD
e_b_xt = mean1.b[None, :, None] * p.mu[:, None, :] # NxQxD
return e_A_xxt + e_b_xt | Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
- m1(.) :: Linear mean function
- m2(.) :: Identity mean function
:return: NxQxD | Below is the the instruction that describes the task:
### Input:
Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
- m1(.) :: Linear mean function
- m2(.) :: Identity mean function
:return: NxQxD
### Response:
def _expectation(p, mean1, none1, mean2, none2, nghp=None):
"""
Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
- m1(.) :: Linear mean function
- m2(.) :: Identity mean function
:return: NxQxD
"""
with params_as_tensors_for(mean1):
N = tf.shape(p.mu)[0]
e_xxt = p.cov + (p.mu[:, :, None] * p.mu[:, None, :]) # NxDxD
e_A_xxt = tf.matmul(tf.tile(mean1.A[None, ...], (N, 1, 1)), e_xxt, transpose_a=True) # NxQxD
e_b_xt = mean1.b[None, :, None] * p.mu[:, None, :] # NxQxD
return e_A_xxt + e_b_xt |
def checkattr(metacls, attr, value):
"""
Only allow class attributes that are instances of
rootpy.types.Column, ROOT.TObject, or ROOT.ObjectProxy
"""
if not isinstance(value, (
types.MethodType,
types.FunctionType,
classmethod,
staticmethod,
property)):
if attr in dir(type('dummy', (object,), {})) + \
['__metaclass__', '__qualname__']:
return
if attr.startswith('_'):
raise SyntaxError(
"TreeModel attribute `{0}` "
"must not start with `_`".format(attr))
if not inspect.isclass(value):
if not isinstance(value, Column):
raise TypeError(
"TreeModel attribute `{0}` "
"must be an instance of "
"`rootpy.tree.treetypes.Column`".format(attr))
return
if not issubclass(value, (ROOT.TObject, ROOT.ObjectProxy)):
raise TypeError(
"TreeModel attribute `{0}` must inherit "
"from `ROOT.TObject` or `ROOT.ObjectProxy`".format(
attr)) | Only allow class attributes that are instances of
rootpy.types.Column, ROOT.TObject, or ROOT.ObjectProxy | Below is the the instruction that describes the task:
### Input:
Only allow class attributes that are instances of
rootpy.types.Column, ROOT.TObject, or ROOT.ObjectProxy
### Response:
def checkattr(metacls, attr, value):
"""
Only allow class attributes that are instances of
rootpy.types.Column, ROOT.TObject, or ROOT.ObjectProxy
"""
if not isinstance(value, (
types.MethodType,
types.FunctionType,
classmethod,
staticmethod,
property)):
if attr in dir(type('dummy', (object,), {})) + \
['__metaclass__', '__qualname__']:
return
if attr.startswith('_'):
raise SyntaxError(
"TreeModel attribute `{0}` "
"must not start with `_`".format(attr))
if not inspect.isclass(value):
if not isinstance(value, Column):
raise TypeError(
"TreeModel attribute `{0}` "
"must be an instance of "
"`rootpy.tree.treetypes.Column`".format(attr))
return
if not issubclass(value, (ROOT.TObject, ROOT.ObjectProxy)):
raise TypeError(
"TreeModel attribute `{0}` must inherit "
"from `ROOT.TObject` or `ROOT.ObjectProxy`".format(
attr)) |
def verify(self, obj):
"""Verify that the object conforms to this verifier's schema
Args:
obj (object): A python object to verify
Raises:
ValidationError: If there is a problem verifying the dictionary, a
ValidationError is thrown with at least the reason key set indicating
the reason for the lack of validation.
"""
if not isinstance(obj, int):
raise ValidationError("Object is not a int", reason='object is not a int', object=obj,
type=type(obj), int_type=int)
return obj | Verify that the object conforms to this verifier's schema
Args:
obj (object): A python object to verify
Raises:
ValidationError: If there is a problem verifying the dictionary, a
ValidationError is thrown with at least the reason key set indicating
the reason for the lack of validation. | Below is the the instruction that describes the task:
### Input:
Verify that the object conforms to this verifier's schema
Args:
obj (object): A python object to verify
Raises:
ValidationError: If there is a problem verifying the dictionary, a
ValidationError is thrown with at least the reason key set indicating
the reason for the lack of validation.
### Response:
def verify(self, obj):
"""Verify that the object conforms to this verifier's schema
Args:
obj (object): A python object to verify
Raises:
ValidationError: If there is a problem verifying the dictionary, a
ValidationError is thrown with at least the reason key set indicating
the reason for the lack of validation.
"""
if not isinstance(obj, int):
raise ValidationError("Object is not a int", reason='object is not a int', object=obj,
type=type(obj), int_type=int)
return obj |
def _remove_previous_ned_queries(
self,
coordinateList):
"""iterate through the transient locations to see if we have recent local NED coverage of that area already
**Key Arguments:**
- ``coordinateList`` -- set of coordinate to check for previous queries
**Return:**
- ``updatedCoordinateList`` -- coordinate list with previous queries removed
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_remove_previous_ned_queries`` method')
# 1 DEGREE QUERY RADIUS
radius = 60. * 60.
updatedCoordinateList = []
keepers = []
# CALCULATE THE OLDEST RESULTS LIMIT
now = datetime.now()
td = timedelta(
days=self.settings["ned stream refresh rate in days"])
refreshLimit = now - td
refreshLimit = refreshLimit.strftime("%Y-%m-%d %H:%M:%S")
raList = []
raList[:] = [c[0] for c in coordinateList]
decList = []
decList[:] = [c[1] for c in coordinateList]
# MATCH COORDINATES AGAINST PREVIOUS NED SEARCHES
cs = conesearch(
log=self.log,
dbConn=self.cataloguesDbConn,
tableName="tcs_helper_ned_query_history",
columns="*",
ra=raList,
dec=decList,
radiusArcsec=radius,
separations=True,
distinct=True,
sqlWhere="dateQueried > '%(refreshLimit)s'" % locals(),
closest=False
)
matchIndies, matches = cs.search()
# DETERMINE WHICH COORDINATES REQUIRE A NED QUERY
curatedMatchIndices = []
curatedMatches = []
for i, m in zip(matchIndies, matches.list):
match = False
row = m
row["separationArcsec"] = row["cmSepArcsec"]
raStream = row["raDeg"]
decStream = row["decDeg"]
radiusStream = row["arcsecRadius"]
dateStream = row["dateQueried"]
angularSeparation = row["separationArcsec"]
if angularSeparation + self.settings["first pass ned search radius arcec"] < radiusStream:
curatedMatchIndices.append(i)
curatedMatches.append(m)
# NON MATCHES
for i, v in enumerate(coordinateList):
if i not in curatedMatchIndices:
updatedCoordinateList.append(v)
self.log.debug('completed the ``_remove_previous_ned_queries`` method')
return updatedCoordinateList | iterate through the transient locations to see if we have recent local NED coverage of that area already
**Key Arguments:**
- ``coordinateList`` -- set of coordinate to check for previous queries
**Return:**
- ``updatedCoordinateList`` -- coordinate list with previous queries removed
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | Below is the the instruction that describes the task:
### Input:
iterate through the transient locations to see if we have recent local NED coverage of that area already
**Key Arguments:**
- ``coordinateList`` -- set of coordinate to check for previous queries
**Return:**
- ``updatedCoordinateList`` -- coordinate list with previous queries removed
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
### Response:
def _remove_previous_ned_queries(
self,
coordinateList):
"""iterate through the transient locations to see if we have recent local NED coverage of that area already
**Key Arguments:**
- ``coordinateList`` -- set of coordinate to check for previous queries
**Return:**
- ``updatedCoordinateList`` -- coordinate list with previous queries removed
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_remove_previous_ned_queries`` method')
# 1 DEGREE QUERY RADIUS
radius = 60. * 60.
updatedCoordinateList = []
keepers = []
# CALCULATE THE OLDEST RESULTS LIMIT
now = datetime.now()
td = timedelta(
days=self.settings["ned stream refresh rate in days"])
refreshLimit = now - td
refreshLimit = refreshLimit.strftime("%Y-%m-%d %H:%M:%S")
raList = []
raList[:] = [c[0] for c in coordinateList]
decList = []
decList[:] = [c[1] for c in coordinateList]
# MATCH COORDINATES AGAINST PREVIOUS NED SEARCHES
cs = conesearch(
log=self.log,
dbConn=self.cataloguesDbConn,
tableName="tcs_helper_ned_query_history",
columns="*",
ra=raList,
dec=decList,
radiusArcsec=radius,
separations=True,
distinct=True,
sqlWhere="dateQueried > '%(refreshLimit)s'" % locals(),
closest=False
)
matchIndies, matches = cs.search()
# DETERMINE WHICH COORDINATES REQUIRE A NED QUERY
curatedMatchIndices = []
curatedMatches = []
for i, m in zip(matchIndies, matches.list):
match = False
row = m
row["separationArcsec"] = row["cmSepArcsec"]
raStream = row["raDeg"]
decStream = row["decDeg"]
radiusStream = row["arcsecRadius"]
dateStream = row["dateQueried"]
angularSeparation = row["separationArcsec"]
if angularSeparation + self.settings["first pass ned search radius arcec"] < radiusStream:
curatedMatchIndices.append(i)
curatedMatches.append(m)
# NON MATCHES
for i, v in enumerate(coordinateList):
if i not in curatedMatchIndices:
updatedCoordinateList.append(v)
self.log.debug('completed the ``_remove_previous_ned_queries`` method')
return updatedCoordinateList |
def get_statements(self):
"""Process reader output to produce INDRA Statements."""
for k, v in self.reader_output.items():
for interaction in v['interactions']:
self._process_interaction(k, interaction, v['text'], self.pmid,
self.extra_annotations) | Process reader output to produce INDRA Statements. | Below is the the instruction that describes the task:
### Input:
Process reader output to produce INDRA Statements.
### Response:
def get_statements(self):
"""Process reader output to produce INDRA Statements."""
for k, v in self.reader_output.items():
for interaction in v['interactions']:
self._process_interaction(k, interaction, v['text'], self.pmid,
self.extra_annotations) |
def scatter_drag(
x_points: 'Array',
y_points: 'Array',
*,
fig=None,
show_eqn=True,
options={}
):
"""
Generates an interactive scatter plot with the best fit line plotted over
the points. The points can be dragged by the user and the line will
automatically update.
Args:
x_points (Array Number): x-values of points to plot
y_points (Array Number): y-values of points to plot
Kwargs:
show_eqn (bool): If True (default), displays the best fit line's
equation above the scatterplot.
{options}
Returns:
VBox with two children: the equation widget and the figure.
>>> xs = np.arange(10)
>>> ys = np.arange(10) + np.random.rand(10)
>>> scatter_drag(xs, ys)
VBox(...)
"""
params = {
'marks': [{
'x': x_points,
'y': y_points,
'enable_move': True,
}, {
'colors': [GOLDENROD],
}]
}
fig = options.get('_fig', False) or _create_fig(options=options)
[scat, lin] = _create_marks(
fig=fig, marks=[bq.Scatter, bq.Lines], options=options, params=params
)
_add_marks(fig, [scat, lin])
equation = widgets.Label()
# create line fit to data and display equation
def update_line(change=None):
x_sc = scat.scales['x']
lin.x = [
x_sc.min if x_sc.min is not None else np.min(scat.x),
x_sc.max if x_sc.max is not None else np.max(scat.x),
]
poly = np.polyfit(scat.x, scat.y, deg=1)
lin.y = np.polyval(poly, lin.x)
if show_eqn:
equation.value = 'y = {:.2f}x + {:.2f}'.format(poly[0], poly[1])
update_line()
scat.observe(update_line, names=['x', 'y'])
return widgets.VBox([equation, fig]) | Generates an interactive scatter plot with the best fit line plotted over
the points. The points can be dragged by the user and the line will
automatically update.
Args:
x_points (Array Number): x-values of points to plot
y_points (Array Number): y-values of points to plot
Kwargs:
show_eqn (bool): If True (default), displays the best fit line's
equation above the scatterplot.
{options}
Returns:
VBox with two children: the equation widget and the figure.
>>> xs = np.arange(10)
>>> ys = np.arange(10) + np.random.rand(10)
>>> scatter_drag(xs, ys)
VBox(...) | Below is the the instruction that describes the task:
### Input:
Generates an interactive scatter plot with the best fit line plotted over
the points. The points can be dragged by the user and the line will
automatically update.
Args:
x_points (Array Number): x-values of points to plot
y_points (Array Number): y-values of points to plot
Kwargs:
show_eqn (bool): If True (default), displays the best fit line's
equation above the scatterplot.
{options}
Returns:
VBox with two children: the equation widget and the figure.
>>> xs = np.arange(10)
>>> ys = np.arange(10) + np.random.rand(10)
>>> scatter_drag(xs, ys)
VBox(...)
### Response:
def scatter_drag(
x_points: 'Array',
y_points: 'Array',
*,
fig=None,
show_eqn=True,
options={}
):
"""
Generates an interactive scatter plot with the best fit line plotted over
the points. The points can be dragged by the user and the line will
automatically update.
Args:
x_points (Array Number): x-values of points to plot
y_points (Array Number): y-values of points to plot
Kwargs:
show_eqn (bool): If True (default), displays the best fit line's
equation above the scatterplot.
{options}
Returns:
VBox with two children: the equation widget and the figure.
>>> xs = np.arange(10)
>>> ys = np.arange(10) + np.random.rand(10)
>>> scatter_drag(xs, ys)
VBox(...)
"""
params = {
'marks': [{
'x': x_points,
'y': y_points,
'enable_move': True,
}, {
'colors': [GOLDENROD],
}]
}
fig = options.get('_fig', False) or _create_fig(options=options)
[scat, lin] = _create_marks(
fig=fig, marks=[bq.Scatter, bq.Lines], options=options, params=params
)
_add_marks(fig, [scat, lin])
equation = widgets.Label()
# create line fit to data and display equation
def update_line(change=None):
x_sc = scat.scales['x']
lin.x = [
x_sc.min if x_sc.min is not None else np.min(scat.x),
x_sc.max if x_sc.max is not None else np.max(scat.x),
]
poly = np.polyfit(scat.x, scat.y, deg=1)
lin.y = np.polyval(poly, lin.x)
if show_eqn:
equation.value = 'y = {:.2f}x + {:.2f}'.format(poly[0], poly[1])
update_line()
scat.observe(update_line, names=['x', 'y'])
return widgets.VBox([equation, fig]) |
def clean(self):
""" Validates the topic instance. """
super().clean()
if self.forum.is_category or self.forum.is_link:
raise ValidationError(
_('A topic can not be associated with a category or a link forum')
) | Validates the topic instance. | Below is the the instruction that describes the task:
### Input:
Validates the topic instance.
### Response:
def clean(self):
""" Validates the topic instance. """
super().clean()
if self.forum.is_category or self.forum.is_link:
raise ValidationError(
_('A topic can not be associated with a category or a link forum')
) |
def plot_gaussian_2D(mu, lmbda, color='b', centermarker=True,label='',alpha=1.,ax=None,artists=None):
'''
Plots mean and cov ellipsoid into current axes. Must be 2D. lmbda is a covariance matrix.
'''
assert len(mu) == 2
ax = ax if ax else plt.gca()
# TODO use artists!
t = np.hstack([np.arange(0,2*np.pi,0.01),0])
circle = np.vstack([np.sin(t),np.cos(t)])
ellipse = np.dot(np.linalg.cholesky(lmbda),circle)
if artists is None:
point = ax.scatter([mu[0]],[mu[1]],marker='D',color=color,s=4,alpha=alpha) \
if centermarker else None
line, = ax.plot(ellipse[0,:] + mu[0], ellipse[1,:] + mu[1],linestyle='-',
linewidth=2,color=color,label=label,alpha=alpha)
else:
line, point = artists
if centermarker:
point.set_offsets(np.atleast_2d(mu))
line.set_xdata(ellipse[0,:] + mu[0])
line.set_ydata(ellipse[1,:] + mu[1])
line.set_alpha(alpha)
line.set_color(color)
return line, point | Plots mean and cov ellipsoid into current axes. Must be 2D. lmbda is a covariance matrix. | Below is the the instruction that describes the task:
### Input:
Plots mean and cov ellipsoid into current axes. Must be 2D. lmbda is a covariance matrix.
### Response:
def plot_gaussian_2D(mu, lmbda, color='b', centermarker=True,label='',alpha=1.,ax=None,artists=None):
'''
Plots mean and cov ellipsoid into current axes. Must be 2D. lmbda is a covariance matrix.
'''
assert len(mu) == 2
ax = ax if ax else plt.gca()
# TODO use artists!
t = np.hstack([np.arange(0,2*np.pi,0.01),0])
circle = np.vstack([np.sin(t),np.cos(t)])
ellipse = np.dot(np.linalg.cholesky(lmbda),circle)
if artists is None:
point = ax.scatter([mu[0]],[mu[1]],marker='D',color=color,s=4,alpha=alpha) \
if centermarker else None
line, = ax.plot(ellipse[0,:] + mu[0], ellipse[1,:] + mu[1],linestyle='-',
linewidth=2,color=color,label=label,alpha=alpha)
else:
line, point = artists
if centermarker:
point.set_offsets(np.atleast_2d(mu))
line.set_xdata(ellipse[0,:] + mu[0])
line.set_ydata(ellipse[1,:] + mu[1])
line.set_alpha(alpha)
line.set_color(color)
return line, point |
def coalesce_events(self, coalesce=True):
"""
Coalescing events. Events are usually processed by batchs, their size
depend on various factors. Thus, before processing them, events received
from inotify are aggregated in a fifo queue. If this coalescing
option is enabled events are filtered based on their unicity, only
unique events are enqueued, doublons are discarded. An event is unique
when the combination of its fields (wd, mask, cookie, name) is unique
among events of a same batch. After a batch of events is processed any
events is accepted again. By default this option is disabled, you have
to explictly call this function to turn it on.
@param coalesce: Optional new coalescing value. True by default.
@type coalesce: Bool
"""
self._coalesce = coalesce
if not coalesce:
self._eventset.clear() | Coalescing events. Events are usually processed by batchs, their size
depend on various factors. Thus, before processing them, events received
from inotify are aggregated in a fifo queue. If this coalescing
option is enabled events are filtered based on their unicity, only
unique events are enqueued, doublons are discarded. An event is unique
when the combination of its fields (wd, mask, cookie, name) is unique
among events of a same batch. After a batch of events is processed any
events is accepted again. By default this option is disabled, you have
to explictly call this function to turn it on.
@param coalesce: Optional new coalescing value. True by default.
@type coalesce: Bool | Below is the the instruction that describes the task:
### Input:
Coalescing events. Events are usually processed by batchs, their size
depend on various factors. Thus, before processing them, events received
from inotify are aggregated in a fifo queue. If this coalescing
option is enabled events are filtered based on their unicity, only
unique events are enqueued, doublons are discarded. An event is unique
when the combination of its fields (wd, mask, cookie, name) is unique
among events of a same batch. After a batch of events is processed any
events is accepted again. By default this option is disabled, you have
to explictly call this function to turn it on.
@param coalesce: Optional new coalescing value. True by default.
@type coalesce: Bool
### Response:
def coalesce_events(self, coalesce=True):
"""
Coalescing events. Events are usually processed by batchs, their size
depend on various factors. Thus, before processing them, events received
from inotify are aggregated in a fifo queue. If this coalescing
option is enabled events are filtered based on their unicity, only
unique events are enqueued, doublons are discarded. An event is unique
when the combination of its fields (wd, mask, cookie, name) is unique
among events of a same batch. After a batch of events is processed any
events is accepted again. By default this option is disabled, you have
to explictly call this function to turn it on.
@param coalesce: Optional new coalescing value. True by default.
@type coalesce: Bool
"""
self._coalesce = coalesce
if not coalesce:
self._eventset.clear() |
def get_pdbs_for_gene(bigg_model, bigg_gene, cache_dir=tempfile.gettempdir(), force_rerun=False):
"""Attempt to get a rank-ordered list of available PDB structures for a BiGG Model and its gene.
Args:
bigg_model: BiGG Model ID
bigg_gene: BiGG Gene ID
Returns:
list: rank-ordered list of tuples of (pdb_id, chain_id)
"""
my_structures = []
# Download gene info
gene = ssbio.utils.request_json(link='http://bigg.ucsd.edu/api/v2/models/{}/genes/{}'.format(bigg_model, bigg_gene),
outfile='{}_{}.json'.format(bigg_model, bigg_gene),
outdir=cache_dir,
force_rerun_flag=force_rerun)
uniprots = []
if 'database_links' in gene:
if 'UniProt' in gene['database_links']:
uniprots = [x['id'] for x in gene['database_links']['UniProt']]
elif 'NCBI GI' in gene['database_links']:
uniprots = []
gis = [x['id'] for x in gene['database_links']['NCBI GI']]
gi_uniprots = bs_unip.mapping(fr='P_GI', to='ACC', query=gis).values()
uniprots.extend(gi_uniprots)
uniprots = ssbio.utils.flatlist_dropdup(uniprots)
uniprots = [x for x in uniprots if ssbio.databases.uniprot.is_valid_uniprot_id(x)]
if uniprots:
for u in uniprots:
get_best_structure = ssbio.databases.pdb.best_structures(uniprot_id=u, outdir=cache_dir)
if get_best_structure:
for best_structure in get_best_structure:
my_structures.append((best_structure['pdb_id'], best_structure['chain_id']))
return my_structures | Attempt to get a rank-ordered list of available PDB structures for a BiGG Model and its gene.
Args:
bigg_model: BiGG Model ID
bigg_gene: BiGG Gene ID
Returns:
list: rank-ordered list of tuples of (pdb_id, chain_id) | Below is the the instruction that describes the task:
### Input:
Attempt to get a rank-ordered list of available PDB structures for a BiGG Model and its gene.
Args:
bigg_model: BiGG Model ID
bigg_gene: BiGG Gene ID
Returns:
list: rank-ordered list of tuples of (pdb_id, chain_id)
### Response:
def get_pdbs_for_gene(bigg_model, bigg_gene, cache_dir=tempfile.gettempdir(), force_rerun=False):
"""Attempt to get a rank-ordered list of available PDB structures for a BiGG Model and its gene.
Args:
bigg_model: BiGG Model ID
bigg_gene: BiGG Gene ID
Returns:
list: rank-ordered list of tuples of (pdb_id, chain_id)
"""
my_structures = []
# Download gene info
gene = ssbio.utils.request_json(link='http://bigg.ucsd.edu/api/v2/models/{}/genes/{}'.format(bigg_model, bigg_gene),
outfile='{}_{}.json'.format(bigg_model, bigg_gene),
outdir=cache_dir,
force_rerun_flag=force_rerun)
uniprots = []
if 'database_links' in gene:
if 'UniProt' in gene['database_links']:
uniprots = [x['id'] for x in gene['database_links']['UniProt']]
elif 'NCBI GI' in gene['database_links']:
uniprots = []
gis = [x['id'] for x in gene['database_links']['NCBI GI']]
gi_uniprots = bs_unip.mapping(fr='P_GI', to='ACC', query=gis).values()
uniprots.extend(gi_uniprots)
uniprots = ssbio.utils.flatlist_dropdup(uniprots)
uniprots = [x for x in uniprots if ssbio.databases.uniprot.is_valid_uniprot_id(x)]
if uniprots:
for u in uniprots:
get_best_structure = ssbio.databases.pdb.best_structures(uniprot_id=u, outdir=cache_dir)
if get_best_structure:
for best_structure in get_best_structure:
my_structures.append((best_structure['pdb_id'], best_structure['chain_id']))
return my_structures |
def fill_hist(hist, array, weights=None, return_indices=False):
"""Fill a ROOT histogram with a NumPy array.
Parameters
----------
hist : ROOT TH1, TH2, or TH3
The ROOT histogram to fill.
array : numpy array of shape [n_samples, n_dimensions]
The values to fill the histogram with. The number of columns must match
the dimensionality of the histogram. Supply a flat numpy array when
filling a 1D histogram.
weights : numpy array
A flat numpy array of weights for each sample in ``array``.
return_indices : bool, optional (default=False)
If True then return an array of the bin indices filled for each element
in ``array``.
Returns
-------
indices : numpy array or None
If ``return_indices`` is True, then return an array of the bin indices
filled for each element in ``array`` otherwise return None.
"""
import ROOT
array = np.asarray(array, dtype=np.double)
if weights is not None:
weights = np.asarray(weights, dtype=np.double)
if weights.shape[0] != array.shape[0]:
raise ValueError("array and weights must have the same length")
if weights.ndim != 1:
raise ValueError("weight must be 1-dimensional")
if isinstance(hist, ROOT.TH3):
if array.ndim != 2:
raise ValueError("array must be 2-dimensional")
if array.shape[1] != 3:
raise ValueError(
"length of the second dimension must equal "
"the dimension of the histogram")
return _librootnumpy.fill_h3(
ROOT.AsCObject(hist), array, weights, return_indices)
elif isinstance(hist, ROOT.TH2):
if array.ndim != 2:
raise ValueError("array must be 2-dimensional")
if array.shape[1] != 2:
raise ValueError(
"length of the second dimension must equal "
"the dimension of the histogram")
return _librootnumpy.fill_h2(
ROOT.AsCObject(hist), array, weights, return_indices)
elif isinstance(hist, ROOT.TH1):
if array.ndim != 1:
raise ValueError("array must be 1-dimensional")
return _librootnumpy.fill_h1(
ROOT.AsCObject(hist), array, weights, return_indices)
raise TypeError(
"hist must be an instance of ROOT.TH1, ROOT.TH2, or ROOT.TH3") | Fill a ROOT histogram with a NumPy array.
Parameters
----------
hist : ROOT TH1, TH2, or TH3
The ROOT histogram to fill.
array : numpy array of shape [n_samples, n_dimensions]
The values to fill the histogram with. The number of columns must match
the dimensionality of the histogram. Supply a flat numpy array when
filling a 1D histogram.
weights : numpy array
A flat numpy array of weights for each sample in ``array``.
return_indices : bool, optional (default=False)
If True then return an array of the bin indices filled for each element
in ``array``.
Returns
-------
indices : numpy array or None
If ``return_indices`` is True, then return an array of the bin indices
filled for each element in ``array`` otherwise return None. | Below is the the instruction that describes the task:
### Input:
Fill a ROOT histogram with a NumPy array.
Parameters
----------
hist : ROOT TH1, TH2, or TH3
The ROOT histogram to fill.
array : numpy array of shape [n_samples, n_dimensions]
The values to fill the histogram with. The number of columns must match
the dimensionality of the histogram. Supply a flat numpy array when
filling a 1D histogram.
weights : numpy array
A flat numpy array of weights for each sample in ``array``.
return_indices : bool, optional (default=False)
If True then return an array of the bin indices filled for each element
in ``array``.
Returns
-------
indices : numpy array or None
If ``return_indices`` is True, then return an array of the bin indices
filled for each element in ``array`` otherwise return None.
### Response:
def fill_hist(hist, array, weights=None, return_indices=False):
"""Fill a ROOT histogram with a NumPy array.
Parameters
----------
hist : ROOT TH1, TH2, or TH3
The ROOT histogram to fill.
array : numpy array of shape [n_samples, n_dimensions]
The values to fill the histogram with. The number of columns must match
the dimensionality of the histogram. Supply a flat numpy array when
filling a 1D histogram.
weights : numpy array
A flat numpy array of weights for each sample in ``array``.
return_indices : bool, optional (default=False)
If True then return an array of the bin indices filled for each element
in ``array``.
Returns
-------
indices : numpy array or None
If ``return_indices`` is True, then return an array of the bin indices
filled for each element in ``array`` otherwise return None.
"""
import ROOT
array = np.asarray(array, dtype=np.double)
if weights is not None:
weights = np.asarray(weights, dtype=np.double)
if weights.shape[0] != array.shape[0]:
raise ValueError("array and weights must have the same length")
if weights.ndim != 1:
raise ValueError("weight must be 1-dimensional")
if isinstance(hist, ROOT.TH3):
if array.ndim != 2:
raise ValueError("array must be 2-dimensional")
if array.shape[1] != 3:
raise ValueError(
"length of the second dimension must equal "
"the dimension of the histogram")
return _librootnumpy.fill_h3(
ROOT.AsCObject(hist), array, weights, return_indices)
elif isinstance(hist, ROOT.TH2):
if array.ndim != 2:
raise ValueError("array must be 2-dimensional")
if array.shape[1] != 2:
raise ValueError(
"length of the second dimension must equal "
"the dimension of the histogram")
return _librootnumpy.fill_h2(
ROOT.AsCObject(hist), array, weights, return_indices)
elif isinstance(hist, ROOT.TH1):
if array.ndim != 1:
raise ValueError("array must be 1-dimensional")
return _librootnumpy.fill_h1(
ROOT.AsCObject(hist), array, weights, return_indices)
raise TypeError(
"hist must be an instance of ROOT.TH1, ROOT.TH2, or ROOT.TH3") |
def _get_version_properties(self):
"""Parses version and model information out of 'show version' output
and uses the output to populate class properties.
"""
# Parse out version info
output = self.enable('show version')
self._version = str(output[0]['result']['version'])
match = re.match('[\d.\d]+', output[0]['result']['version'])
if match:
self._version_number = str(match.group(0))
else:
self._version_number = str(output[0]['result']['version'])
# Parse out model number
match = re.search('\d\d\d\d', output[0]['result']['modelName'])
if match:
self._model = str(match.group(0))
else:
self._model = str(output[0]['result']['modelName']) | Parses version and model information out of 'show version' output
and uses the output to populate class properties. | Below is the the instruction that describes the task:
### Input:
Parses version and model information out of 'show version' output
and uses the output to populate class properties.
### Response:
def _get_version_properties(self):
"""Parses version and model information out of 'show version' output
and uses the output to populate class properties.
"""
# Parse out version info
output = self.enable('show version')
self._version = str(output[0]['result']['version'])
match = re.match('[\d.\d]+', output[0]['result']['version'])
if match:
self._version_number = str(match.group(0))
else:
self._version_number = str(output[0]['result']['version'])
# Parse out model number
match = re.search('\d\d\d\d', output[0]['result']['modelName'])
if match:
self._model = str(match.group(0))
else:
self._model = str(output[0]['result']['modelName']) |
def search(self, key, default=None):
"""Find the first key-value pair with key *key* and return its value.
If the key was not found, return *default*. If no default was provided,
return ``None``. This method never raises a ``KeyError``.
"""
self._find_lt(key)
node = self._path[0][2]
if node is self._tail or key < node[0]:
return default
return node[1] | Find the first key-value pair with key *key* and return its value.
If the key was not found, return *default*. If no default was provided,
return ``None``. This method never raises a ``KeyError``. | Below is the the instruction that describes the task:
### Input:
Find the first key-value pair with key *key* and return its value.
If the key was not found, return *default*. If no default was provided,
return ``None``. This method never raises a ``KeyError``.
### Response:
def search(self, key, default=None):
"""Find the first key-value pair with key *key* and return its value.
If the key was not found, return *default*. If no default was provided,
return ``None``. This method never raises a ``KeyError``.
"""
self._find_lt(key)
node = self._path[0][2]
if node is self._tail or key < node[0]:
return default
return node[1] |
def kill(self, detach=False):
"""This function must/will be called when a socket is to be completely
shut down, closed by connection timeout, connection error or explicit
disconnection from the client.
It will call all of the Namespace's
:meth:`~socketio.namespace.BaseNamespace.disconnect` methods
so that you can shut-down things properly.
"""
# Clear out the callbacks
self.ack_callbacks = {}
if self.connected:
self.state = self.STATE_DISCONNECTING
self.server_queue.put_nowait(None)
self.client_queue.put_nowait(None)
if len(self.active_ns) > 0:
log.debug("Calling disconnect() on %s" % self)
self.disconnect()
if detach:
self.detach()
gevent.killall(self.jobs) | This function must/will be called when a socket is to be completely
shut down, closed by connection timeout, connection error or explicit
disconnection from the client.
It will call all of the Namespace's
:meth:`~socketio.namespace.BaseNamespace.disconnect` methods
so that you can shut-down things properly. | Below is the the instruction that describes the task:
### Input:
This function must/will be called when a socket is to be completely
shut down, closed by connection timeout, connection error or explicit
disconnection from the client.
It will call all of the Namespace's
:meth:`~socketio.namespace.BaseNamespace.disconnect` methods
so that you can shut-down things properly.
### Response:
def kill(self, detach=False):
"""This function must/will be called when a socket is to be completely
shut down, closed by connection timeout, connection error or explicit
disconnection from the client.
It will call all of the Namespace's
:meth:`~socketio.namespace.BaseNamespace.disconnect` methods
so that you can shut-down things properly.
"""
# Clear out the callbacks
self.ack_callbacks = {}
if self.connected:
self.state = self.STATE_DISCONNECTING
self.server_queue.put_nowait(None)
self.client_queue.put_nowait(None)
if len(self.active_ns) > 0:
log.debug("Calling disconnect() on %s" % self)
self.disconnect()
if detach:
self.detach()
gevent.killall(self.jobs) |
def _match(names):
'''
Since pkg_delete requires the full "pkgname-version" string, this function
will attempt to match the package name with its version. Returns a list of
partial matches and package names that match the "pkgname-version" string
required by pkg_delete, and a list of errors encountered.
'''
pkgs = list_pkgs(versions_as_list=True)
errors = []
# Look for full matches
full_pkg_strings = []
out = __salt__['cmd.run_stdout'](['pkg_info'],
output_loglevel='trace',
python_shell=False)
for line in out.splitlines():
try:
full_pkg_strings.append(line.split()[0])
except IndexError:
continue
full_matches = [x for x in names if x in full_pkg_strings]
# Look for pkgname-only matches
matches = []
ambiguous = []
for name in set(names) - set(full_matches):
cver = pkgs.get(name)
if cver is not None:
if len(cver) == 1:
matches.append('{0}-{1}'.format(name, cver[0]))
else:
ambiguous.append(name)
errors.append(
'Ambiguous package \'{0}\'. Full name/version required. '
'Possible matches: {1}'.format(
name,
', '.join(['{0}-{1}'.format(name, x) for x in cver])
)
)
# Find packages that did not match anything
not_matched = \
set(names) - set(matches) - set(full_matches) - set(ambiguous)
for name in not_matched:
errors.append('Package \'{0}\' not found'.format(name))
return matches + full_matches, errors | Since pkg_delete requires the full "pkgname-version" string, this function
will attempt to match the package name with its version. Returns a list of
partial matches and package names that match the "pkgname-version" string
required by pkg_delete, and a list of errors encountered. | Below is the the instruction that describes the task:
### Input:
Since pkg_delete requires the full "pkgname-version" string, this function
will attempt to match the package name with its version. Returns a list of
partial matches and package names that match the "pkgname-version" string
required by pkg_delete, and a list of errors encountered.
### Response:
def _match(names):
'''
Since pkg_delete requires the full "pkgname-version" string, this function
will attempt to match the package name with its version. Returns a list of
partial matches and package names that match the "pkgname-version" string
required by pkg_delete, and a list of errors encountered.
'''
pkgs = list_pkgs(versions_as_list=True)
errors = []
# Look for full matches
full_pkg_strings = []
out = __salt__['cmd.run_stdout'](['pkg_info'],
output_loglevel='trace',
python_shell=False)
for line in out.splitlines():
try:
full_pkg_strings.append(line.split()[0])
except IndexError:
continue
full_matches = [x for x in names if x in full_pkg_strings]
# Look for pkgname-only matches
matches = []
ambiguous = []
for name in set(names) - set(full_matches):
cver = pkgs.get(name)
if cver is not None:
if len(cver) == 1:
matches.append('{0}-{1}'.format(name, cver[0]))
else:
ambiguous.append(name)
errors.append(
'Ambiguous package \'{0}\'. Full name/version required. '
'Possible matches: {1}'.format(
name,
', '.join(['{0}-{1}'.format(name, x) for x in cver])
)
)
# Find packages that did not match anything
not_matched = \
set(names) - set(matches) - set(full_matches) - set(ambiguous)
for name in not_matched:
errors.append('Package \'{0}\' not found'.format(name))
return matches + full_matches, errors |
def get_interface_detail_output_interface_configured_line_speed(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_interface_detail = ET.Element("get_interface_detail")
config = get_interface_detail
output = ET.SubElement(get_interface_detail, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
configured_line_speed = ET.SubElement(interface, "configured-line-speed")
configured_line_speed.text = kwargs.pop('configured_line_speed')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_interface_detail_output_interface_configured_line_speed(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_interface_detail = ET.Element("get_interface_detail")
config = get_interface_detail
output = ET.SubElement(get_interface_detail, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
configured_line_speed = ET.SubElement(interface, "configured-line-speed")
configured_line_speed.text = kwargs.pop('configured_line_speed')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_note(self, noteid):
"""Fetch a single note
:param folderid: The UUID of the note
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/{}'.format(noteid))
return response | Fetch a single note
:param folderid: The UUID of the note | Below is the the instruction that describes the task:
### Input:
Fetch a single note
:param folderid: The UUID of the note
### Response:
def get_note(self, noteid):
"""Fetch a single note
:param folderid: The UUID of the note
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/{}'.format(noteid))
return response |
def get_name_for(self, dynamic_part):
"""
Return the name for the current dynamic field, accepting a limpyd
instance for the dynamic part
"""
dynamic_part = self.from_python(dynamic_part)
return super(DynamicRelatedFieldMixin, self).get_name_for(dynamic_part) | Return the name for the current dynamic field, accepting a limpyd
instance for the dynamic part | Below is the the instruction that describes the task:
### Input:
Return the name for the current dynamic field, accepting a limpyd
instance for the dynamic part
### Response:
def get_name_for(self, dynamic_part):
"""
Return the name for the current dynamic field, accepting a limpyd
instance for the dynamic part
"""
dynamic_part = self.from_python(dynamic_part)
return super(DynamicRelatedFieldMixin, self).get_name_for(dynamic_part) |
def encode(self, secret, algorithm='HS256'):
"""
Encode the set of claims to the JWT (JSON Web Token) format
according to the OpenID Connect specification:
http://openid.net/specs/openid-connect-basic-1_0.html#IDToken
Arguments:
claims (dict): A dictionary with the OpenID Connect claims.
secret (str): Secret used to encode the id_token.
algorithm (str): Algorithm used for encoding.
Defaults to HS256.
Returns encoded JWT token string.
"""
return jwt.encode(self.claims, secret, algorithm) | Encode the set of claims to the JWT (JSON Web Token) format
according to the OpenID Connect specification:
http://openid.net/specs/openid-connect-basic-1_0.html#IDToken
Arguments:
claims (dict): A dictionary with the OpenID Connect claims.
secret (str): Secret used to encode the id_token.
algorithm (str): Algorithm used for encoding.
Defaults to HS256.
Returns encoded JWT token string. | Below is the the instruction that describes the task:
### Input:
Encode the set of claims to the JWT (JSON Web Token) format
according to the OpenID Connect specification:
http://openid.net/specs/openid-connect-basic-1_0.html#IDToken
Arguments:
claims (dict): A dictionary with the OpenID Connect claims.
secret (str): Secret used to encode the id_token.
algorithm (str): Algorithm used for encoding.
Defaults to HS256.
Returns encoded JWT token string.
### Response:
def encode(self, secret, algorithm='HS256'):
"""
Encode the set of claims to the JWT (JSON Web Token) format
according to the OpenID Connect specification:
http://openid.net/specs/openid-connect-basic-1_0.html#IDToken
Arguments:
claims (dict): A dictionary with the OpenID Connect claims.
secret (str): Secret used to encode the id_token.
algorithm (str): Algorithm used for encoding.
Defaults to HS256.
Returns encoded JWT token string.
"""
return jwt.encode(self.claims, secret, algorithm) |
def execute_setup(self): # type: () -> Dict[str,str]
"""
for really surprising things like a dict foo in setup(**foo)
consider python3 setup.py --version
:return:
"""
ver = execute_get_text("python setup.py --version")
if not ver:
return None
if "UserWarning" in ver:
logger.warning(
"python setup.py --version won't parse, got :" + unicode(ver)
)
# UserWarning- Ther version specified ... is an invalid...
return {}
if ver:
string = unicode(ver).strip(" \n")
if "\n" in string:
string = string.split("\n")[0]
return {"setup.py --version": string.strip(" \n")}
else:
return {} | for really surprising things like a dict foo in setup(**foo)
consider python3 setup.py --version
:return: | Below is the the instruction that describes the task:
### Input:
for really surprising things like a dict foo in setup(**foo)
consider python3 setup.py --version
:return:
### Response:
def execute_setup(self): # type: () -> Dict[str,str]
"""
for really surprising things like a dict foo in setup(**foo)
consider python3 setup.py --version
:return:
"""
ver = execute_get_text("python setup.py --version")
if not ver:
return None
if "UserWarning" in ver:
logger.warning(
"python setup.py --version won't parse, got :" + unicode(ver)
)
# UserWarning- Ther version specified ... is an invalid...
return {}
if ver:
string = unicode(ver).strip(" \n")
if "\n" in string:
string = string.split("\n")[0]
return {"setup.py --version": string.strip(" \n")}
else:
return {} |
def _convolve_buf(data_g, h_g, res_g=None):
"""
buffer variant
"""
assert_bufs_type(np.float32, data_g, h_g)
prog = OCLProgram(abspath("kernels/convolve.cl"))
if res_g is None:
res_g = OCLArray.empty(data_g.shape, dtype=np.float32)
Nhs = [np.int32(n) for n in h_g.shape]
kernel_name = "convolve%sd_buf" % (len(data_g.shape))
try:
prog.run_kernel(kernel_name, data_g.shape[::-1], None,
data_g.data, h_g.data, res_g.data,
*Nhs)
except cl.cffi_cl.LogicError as e:
# this catches the logic error if the kernel is to big for constant memory
if e.code == -52:
kernel_name = "convolve%sd_buf_global" % (len(data_g.shape))
prog.run_kernel(kernel_name, data_g.shape[::-1], None,
data_g.data, h_g.data, res_g.data,
*Nhs)
else:
raise e
except cl.cffi_cl.RuntimeError as e:
# this catches the runtime error if the kernel is to big for constant memory
if e.code == -5:
kernel_name = "convolve%sd_buf_global" % (len(data_g.shape))
prog.run_kernel(kernel_name, data_g.shape[::-1], None,
data_g.data, h_g.data, res_g.data,
*Nhs)
else:
raise e
return res_g | buffer variant | Below is the the instruction that describes the task:
### Input:
buffer variant
### Response:
def _convolve_buf(data_g, h_g, res_g=None):
"""
buffer variant
"""
assert_bufs_type(np.float32, data_g, h_g)
prog = OCLProgram(abspath("kernels/convolve.cl"))
if res_g is None:
res_g = OCLArray.empty(data_g.shape, dtype=np.float32)
Nhs = [np.int32(n) for n in h_g.shape]
kernel_name = "convolve%sd_buf" % (len(data_g.shape))
try:
prog.run_kernel(kernel_name, data_g.shape[::-1], None,
data_g.data, h_g.data, res_g.data,
*Nhs)
except cl.cffi_cl.LogicError as e:
# this catches the logic error if the kernel is to big for constant memory
if e.code == -52:
kernel_name = "convolve%sd_buf_global" % (len(data_g.shape))
prog.run_kernel(kernel_name, data_g.shape[::-1], None,
data_g.data, h_g.data, res_g.data,
*Nhs)
else:
raise e
except cl.cffi_cl.RuntimeError as e:
# this catches the runtime error if the kernel is to big for constant memory
if e.code == -5:
kernel_name = "convolve%sd_buf_global" % (len(data_g.shape))
prog.run_kernel(kernel_name, data_g.shape[::-1], None,
data_g.data, h_g.data, res_g.data,
*Nhs)
else:
raise e
return res_g |
def calculate_leapdays(init_date, final_date):
"""Currently unsupported, it only works for differences in years."""
leap_days = (final_date.year - 1) // 4 - (init_date.year - 1) // 4
leap_days -= (final_date.year - 1) // 100 - (init_date.year - 1) // 100
leap_days += (final_date.year - 1) // 400 - (init_date.year - 1) // 400
# TODO: Internal date correction (e.g. init_date is 1-March or later)
return datetime.timedelta(days=leap_days) | Currently unsupported, it only works for differences in years. | Below is the the instruction that describes the task:
### Input:
Currently unsupported, it only works for differences in years.
### Response:
def calculate_leapdays(init_date, final_date):
"""Currently unsupported, it only works for differences in years."""
leap_days = (final_date.year - 1) // 4 - (init_date.year - 1) // 4
leap_days -= (final_date.year - 1) // 100 - (init_date.year - 1) // 100
leap_days += (final_date.year - 1) // 400 - (init_date.year - 1) // 400
# TODO: Internal date correction (e.g. init_date is 1-March or later)
return datetime.timedelta(days=leap_days) |
def unregister(self, *model_list):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
for model in model_list:
if model not in self.registry:
raise NotRegistered('The model %s is not registered' %
model.__name__)
del self.registry[model] | Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered. | Below is the the instruction that describes the task:
### Input:
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
### Response:
def unregister(self, *model_list):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
for model in model_list:
if model not in self.registry:
raise NotRegistered('The model %s is not registered' %
model.__name__)
del self.registry[model] |
def refresh(self):
"""Reload the current page with the same request as originally done.
Any change (`select_form`, or any value filled-in in the form) made to
the current page before refresh is discarded.
:raise ValueError: Raised if no refreshable page is loaded, e.g., when
using the shallow ``Browser`` wrapper functions.
:return: Response of the request."""
old_request = self.__state.request
if old_request is None:
raise ValueError('The current page is not refreshable. Either no '
'page is opened or low-level browser methods '
'were used to do so')
resp = self.session.send(old_request)
Browser.add_soup(resp, self.soup_config)
self.__state = _BrowserState(page=resp.soup, url=resp.url,
request=resp.request)
return resp | Reload the current page with the same request as originally done.
Any change (`select_form`, or any value filled-in in the form) made to
the current page before refresh is discarded.
:raise ValueError: Raised if no refreshable page is loaded, e.g., when
using the shallow ``Browser`` wrapper functions.
:return: Response of the request. | Below is the the instruction that describes the task:
### Input:
Reload the current page with the same request as originally done.
Any change (`select_form`, or any value filled-in in the form) made to
the current page before refresh is discarded.
:raise ValueError: Raised if no refreshable page is loaded, e.g., when
using the shallow ``Browser`` wrapper functions.
:return: Response of the request.
### Response:
def refresh(self):
"""Reload the current page with the same request as originally done.
Any change (`select_form`, or any value filled-in in the form) made to
the current page before refresh is discarded.
:raise ValueError: Raised if no refreshable page is loaded, e.g., when
using the shallow ``Browser`` wrapper functions.
:return: Response of the request."""
old_request = self.__state.request
if old_request is None:
raise ValueError('The current page is not refreshable. Either no '
'page is opened or low-level browser methods '
'were used to do so')
resp = self.session.send(old_request)
Browser.add_soup(resp, self.soup_config)
self.__state = _BrowserState(page=resp.soup, url=resp.url,
request=resp.request)
return resp |
def slice_init(func):
"""
Decorator for adding partial application functionality to a basis object.
This will add an "apply_ind" argument to a basis object initialiser that
can be used to apply the basis function to only the dimensions specified in
apply_ind. E.g.,
>>> X = np.ones((100, 20))
>>> base = LinearBasis(onescol=False, apply_ind=slice(0, 10))
>>> base.transform(X).shape
(100, 10)
"""
@wraps(func)
def new_init(self, *args, **kwargs):
apply_ind = kwargs.pop('apply_ind', None)
if np.isscalar(apply_ind):
apply_ind = [apply_ind]
func(self, *args, **kwargs)
self.apply_ind = apply_ind
return new_init | Decorator for adding partial application functionality to a basis object.
This will add an "apply_ind" argument to a basis object initialiser that
can be used to apply the basis function to only the dimensions specified in
apply_ind. E.g.,
>>> X = np.ones((100, 20))
>>> base = LinearBasis(onescol=False, apply_ind=slice(0, 10))
>>> base.transform(X).shape
(100, 10) | Below is the the instruction that describes the task:
### Input:
Decorator for adding partial application functionality to a basis object.
This will add an "apply_ind" argument to a basis object initialiser that
can be used to apply the basis function to only the dimensions specified in
apply_ind. E.g.,
>>> X = np.ones((100, 20))
>>> base = LinearBasis(onescol=False, apply_ind=slice(0, 10))
>>> base.transform(X).shape
(100, 10)
### Response:
def slice_init(func):
"""
Decorator for adding partial application functionality to a basis object.
This will add an "apply_ind" argument to a basis object initialiser that
can be used to apply the basis function to only the dimensions specified in
apply_ind. E.g.,
>>> X = np.ones((100, 20))
>>> base = LinearBasis(onescol=False, apply_ind=slice(0, 10))
>>> base.transform(X).shape
(100, 10)
"""
@wraps(func)
def new_init(self, *args, **kwargs):
apply_ind = kwargs.pop('apply_ind', None)
if np.isscalar(apply_ind):
apply_ind = [apply_ind]
func(self, *args, **kwargs)
self.apply_ind = apply_ind
return new_init |
def pd_plot_data(self):
"""
Plot data for phase diagram.
2-comp - Full hull with energies
3/4-comp - Projection into 2D or 3D Gibbs triangle.
Returns:
(lines, stable_entries, unstable_entries):
- lines is a list of list of coordinates for lines in the PD.
- stable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- unstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
lines = []
stable_entries = {}
for line in self.lines:
entry1 = entries[line[0]]
entry2 = entries[line[1]]
if self._dim < 3:
x = [data[line[0]][0], data[line[1]][0]]
y = [pd.get_form_energy_per_atom(entry1),
pd.get_form_energy_per_atom(entry2)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord(data[line, 0:2])
else:
coord = tet_coord(data[line, 0:3])
lines.append(coord)
labelcoord = list(zip(*coord))
stable_entries[labelcoord[0]] = entry1
stable_entries[labelcoord[1]] = entry2
all_entries = pd.all_entries
all_data = np.array(pd.all_entries_hulldata)
unstable_entries = dict()
stable = pd.stable_entries
for i in range(0, len(all_entries)):
entry = all_entries[i]
if entry not in stable:
if self._dim < 3:
x = [all_data[i][0], all_data[i][0]]
y = [pd.get_form_energy_per_atom(entry),
pd.get_form_energy_per_atom(entry)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord([all_data[i, 0:2],
all_data[i, 0:2]])
else:
coord = tet_coord([all_data[i, 0:3], all_data[i, 0:3],
all_data[i, 0:3]])
labelcoord = list(zip(*coord))
unstable_entries[entry] = labelcoord[0]
return lines, stable_entries, unstable_entries | Plot data for phase diagram.
2-comp - Full hull with energies
3/4-comp - Projection into 2D or 3D Gibbs triangle.
Returns:
(lines, stable_entries, unstable_entries):
- lines is a list of list of coordinates for lines in the PD.
- stable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- unstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram. | Below is the the instruction that describes the task:
### Input:
Plot data for phase diagram.
2-comp - Full hull with energies
3/4-comp - Projection into 2D or 3D Gibbs triangle.
Returns:
(lines, stable_entries, unstable_entries):
- lines is a list of list of coordinates for lines in the PD.
- stable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- unstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
### Response:
def pd_plot_data(self):
"""
Plot data for phase diagram.
2-comp - Full hull with energies
3/4-comp - Projection into 2D or 3D Gibbs triangle.
Returns:
(lines, stable_entries, unstable_entries):
- lines is a list of list of coordinates for lines in the PD.
- stable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- unstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
lines = []
stable_entries = {}
for line in self.lines:
entry1 = entries[line[0]]
entry2 = entries[line[1]]
if self._dim < 3:
x = [data[line[0]][0], data[line[1]][0]]
y = [pd.get_form_energy_per_atom(entry1),
pd.get_form_energy_per_atom(entry2)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord(data[line, 0:2])
else:
coord = tet_coord(data[line, 0:3])
lines.append(coord)
labelcoord = list(zip(*coord))
stable_entries[labelcoord[0]] = entry1
stable_entries[labelcoord[1]] = entry2
all_entries = pd.all_entries
all_data = np.array(pd.all_entries_hulldata)
unstable_entries = dict()
stable = pd.stable_entries
for i in range(0, len(all_entries)):
entry = all_entries[i]
if entry not in stable:
if self._dim < 3:
x = [all_data[i][0], all_data[i][0]]
y = [pd.get_form_energy_per_atom(entry),
pd.get_form_energy_per_atom(entry)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord([all_data[i, 0:2],
all_data[i, 0:2]])
else:
coord = tet_coord([all_data[i, 0:3], all_data[i, 0:3],
all_data[i, 0:3]])
labelcoord = list(zip(*coord))
unstable_entries[entry] = labelcoord[0]
return lines, stable_entries, unstable_entries |
def model_to_dict(instance, fields=None, exclude=None):
"""
The same implementation as django model_to_dict but editable fields are allowed
"""
return {
field.name: field_to_dict(field, instance)
for field in chain(instance._meta.concrete_fields, instance._meta.many_to_many) # pylint: disable=W0212
if not should_exclude_field(field, fields, exclude)
} | The same implementation as django model_to_dict but editable fields are allowed | Below is the the instruction that describes the task:
### Input:
The same implementation as django model_to_dict but editable fields are allowed
### Response:
def model_to_dict(instance, fields=None, exclude=None):
"""
The same implementation as django model_to_dict but editable fields are allowed
"""
return {
field.name: field_to_dict(field, instance)
for field in chain(instance._meta.concrete_fields, instance._meta.many_to_many) # pylint: disable=W0212
if not should_exclude_field(field, fields, exclude)
} |
def parse_string(cls, content, basedir=None, resolve=True, unresolved_value=DEFAULT_SUBSTITUTION):
"""Parse URL
:param content: content to parse
:type content: basestring
:param resolve: If true, resolve substitutions
:param resolve: if true, resolve substitutions
:type resolve: boolean
:param unresolved_value: assigned value value to unresolved substitution.
If overriden with a default value, it will replace all unresolved value to the default value.
If it is set to to pyhocon.STR_SUBSTITUTION then it will replace the value by its substitution expression (e.g., ${x})
:type unresolved_value: boolean
:return: Config object
:type return: Config
"""
return ConfigParser().parse(content, basedir, resolve, unresolved_value) | Parse URL
:param content: content to parse
:type content: basestring
:param resolve: If true, resolve substitutions
:param resolve: if true, resolve substitutions
:type resolve: boolean
:param unresolved_value: assigned value value to unresolved substitution.
If overriden with a default value, it will replace all unresolved value to the default value.
If it is set to to pyhocon.STR_SUBSTITUTION then it will replace the value by its substitution expression (e.g., ${x})
:type unresolved_value: boolean
:return: Config object
:type return: Config | Below is the the instruction that describes the task:
### Input:
Parse URL
:param content: content to parse
:type content: basestring
:param resolve: If true, resolve substitutions
:param resolve: if true, resolve substitutions
:type resolve: boolean
:param unresolved_value: assigned value value to unresolved substitution.
If overriden with a default value, it will replace all unresolved value to the default value.
If it is set to to pyhocon.STR_SUBSTITUTION then it will replace the value by its substitution expression (e.g., ${x})
:type unresolved_value: boolean
:return: Config object
:type return: Config
### Response:
def parse_string(cls, content, basedir=None, resolve=True, unresolved_value=DEFAULT_SUBSTITUTION):
"""Parse URL
:param content: content to parse
:type content: basestring
:param resolve: If true, resolve substitutions
:param resolve: if true, resolve substitutions
:type resolve: boolean
:param unresolved_value: assigned value value to unresolved substitution.
If overriden with a default value, it will replace all unresolved value to the default value.
If it is set to to pyhocon.STR_SUBSTITUTION then it will replace the value by its substitution expression (e.g., ${x})
:type unresolved_value: boolean
:return: Config object
:type return: Config
"""
return ConfigParser().parse(content, basedir, resolve, unresolved_value) |
def get_alias(self, alias):
"""
Given a mnemonic, get the alias name(s) it falls under. If there aren't
any, you get an empty list.
"""
alias = alias or {}
return [k for k, v in alias.items() if self.mnemonic in v] | Given a mnemonic, get the alias name(s) it falls under. If there aren't
any, you get an empty list. | Below is the the instruction that describes the task:
### Input:
Given a mnemonic, get the alias name(s) it falls under. If there aren't
any, you get an empty list.
### Response:
def get_alias(self, alias):
"""
Given a mnemonic, get the alias name(s) it falls under. If there aren't
any, you get an empty list.
"""
alias = alias or {}
return [k for k, v in alias.items() if self.mnemonic in v] |
def set_screen_config(self, size_id, rotation, config_timestamp, rate=0, timestamp=X.CurrentTime):
"""Sets the screen to the specified size, rate, rotation and reflection.
rate can be 0 to have the server select an appropriate rate.
"""
return SetScreenConfig(
display=self.display,
opcode=self.display.get_extension_major(extname),
drawable=self,
timestamp=timestamp,
config_timestamp=config_timestamp,
size_id=size_id,
rotation=rotation,
rate=rate,
) | Sets the screen to the specified size, rate, rotation and reflection.
rate can be 0 to have the server select an appropriate rate. | Below is the the instruction that describes the task:
### Input:
Sets the screen to the specified size, rate, rotation and reflection.
rate can be 0 to have the server select an appropriate rate.
### Response:
def set_screen_config(self, size_id, rotation, config_timestamp, rate=0, timestamp=X.CurrentTime):
"""Sets the screen to the specified size, rate, rotation and reflection.
rate can be 0 to have the server select an appropriate rate.
"""
return SetScreenConfig(
display=self.display,
opcode=self.display.get_extension_major(extname),
drawable=self,
timestamp=timestamp,
config_timestamp=config_timestamp,
size_id=size_id,
rotation=rotation,
rate=rate,
) |
def compress(func):
"""
Compress route return data with gzip compression
"""
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if ('gzip' in bottle.request.headers.get('Accept-Encoding', '') and
isinstance(result, string_type) and
len(result) > 1024):
if isinstance(result, unicode):
result = result.encode('utf-8')
tmp_fo = BytesIO()
with gzip.GzipFile(mode='wb', fileobj=tmp_fo) as gzip_fo:
gzip_fo.write(result)
result = tmp_fo.getvalue()
bottle.response.add_header('Content-Encoding', 'gzip')
return result
return wrapper | Compress route return data with gzip compression | Below is the the instruction that describes the task:
### Input:
Compress route return data with gzip compression
### Response:
def compress(func):
"""
Compress route return data with gzip compression
"""
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if ('gzip' in bottle.request.headers.get('Accept-Encoding', '') and
isinstance(result, string_type) and
len(result) > 1024):
if isinstance(result, unicode):
result = result.encode('utf-8')
tmp_fo = BytesIO()
with gzip.GzipFile(mode='wb', fileobj=tmp_fo) as gzip_fo:
gzip_fo.write(result)
result = tmp_fo.getvalue()
bottle.response.add_header('Content-Encoding', 'gzip')
return result
return wrapper |
def get_map(name, map_type, number, reverse=False):
"""
Return a `BrewerMap` representation of the specified color map.
Parameters
----------
name : str
Name of color map. Use `print_maps` to see available color maps.
map_type : {'Sequential', 'Diverging', 'Qualitative'}
Select color map type.
number : int
Number of defined colors in color map.
reverse : bool, optional
Set to True to get the reversed color map.
"""
number = str(number)
map_type = map_type.lower().capitalize()
# check for valid type
if map_type not in MAP_TYPES:
s = 'Invalid map type, must be one of {0}'.format(MAP_TYPES)
raise ValueError(s)
# make a dict of lower case map name to map name so this can be
# insensitive to case.
# this would be a perfect spot for a dict comprehension but going to
# wait on that to preserve 2.6 compatibility.
# map_names = {k.lower(): k for k in COLOR_MAPS[map_type].iterkeys()}
map_names = dict((k.lower(), k) for k in COLOR_MAPS[map_type].keys())
# check for valid name
if name.lower() not in map_names:
s = 'Invalid color map name {0!r} for type {1!r}.\n'
s = s.format(name, map_type)
valid_names = [str(k) for k in COLOR_MAPS[map_type].keys()]
valid_names.sort()
s += 'Valid names are: {0}'.format(valid_names)
raise ValueError(s)
name = map_names[name.lower()]
# check for valid number
if number not in COLOR_MAPS[map_type][name]:
s = 'Invalid number for map type {0!r} and name {1!r}.\n'
s = s.format(map_type, str(name))
valid_numbers = [int(k) for k in COLOR_MAPS[map_type][name].keys()]
valid_numbers.sort()
s += 'Valid numbers are : {0}'.format(valid_numbers)
raise ValueError(s)
colors = COLOR_MAPS[map_type][name][number]['Colors']
if reverse:
name += '_r'
colors = [x for x in reversed(colors)]
return BrewerMap(name, map_type, colors) | Return a `BrewerMap` representation of the specified color map.
Parameters
----------
name : str
Name of color map. Use `print_maps` to see available color maps.
map_type : {'Sequential', 'Diverging', 'Qualitative'}
Select color map type.
number : int
Number of defined colors in color map.
reverse : bool, optional
Set to True to get the reversed color map. | Below is the the instruction that describes the task:
### Input:
Return a `BrewerMap` representation of the specified color map.
Parameters
----------
name : str
Name of color map. Use `print_maps` to see available color maps.
map_type : {'Sequential', 'Diverging', 'Qualitative'}
Select color map type.
number : int
Number of defined colors in color map.
reverse : bool, optional
Set to True to get the reversed color map.
### Response:
def get_map(name, map_type, number, reverse=False):
"""
Return a `BrewerMap` representation of the specified color map.
Parameters
----------
name : str
Name of color map. Use `print_maps` to see available color maps.
map_type : {'Sequential', 'Diverging', 'Qualitative'}
Select color map type.
number : int
Number of defined colors in color map.
reverse : bool, optional
Set to True to get the reversed color map.
"""
number = str(number)
map_type = map_type.lower().capitalize()
# check for valid type
if map_type not in MAP_TYPES:
s = 'Invalid map type, must be one of {0}'.format(MAP_TYPES)
raise ValueError(s)
# make a dict of lower case map name to map name so this can be
# insensitive to case.
# this would be a perfect spot for a dict comprehension but going to
# wait on that to preserve 2.6 compatibility.
# map_names = {k.lower(): k for k in COLOR_MAPS[map_type].iterkeys()}
map_names = dict((k.lower(), k) for k in COLOR_MAPS[map_type].keys())
# check for valid name
if name.lower() not in map_names:
s = 'Invalid color map name {0!r} for type {1!r}.\n'
s = s.format(name, map_type)
valid_names = [str(k) for k in COLOR_MAPS[map_type].keys()]
valid_names.sort()
s += 'Valid names are: {0}'.format(valid_names)
raise ValueError(s)
name = map_names[name.lower()]
# check for valid number
if number not in COLOR_MAPS[map_type][name]:
s = 'Invalid number for map type {0!r} and name {1!r}.\n'
s = s.format(map_type, str(name))
valid_numbers = [int(k) for k in COLOR_MAPS[map_type][name].keys()]
valid_numbers.sort()
s += 'Valid numbers are : {0}'.format(valid_numbers)
raise ValueError(s)
colors = COLOR_MAPS[map_type][name][number]['Colors']
if reverse:
name += '_r'
colors = [x for x in reversed(colors)]
return BrewerMap(name, map_type, colors) |
def destroy(force):
"""Destroy all indexes."""
click.secho('Destroying indexes...', fg='red', bold=True, file=sys.stderr)
with click.progressbar(
current_search.delete(ignore=[400, 404] if force else None),
length=current_search.number_of_indexes) as bar:
for name, response in bar:
bar.label = name | Destroy all indexes. | Below is the the instruction that describes the task:
### Input:
Destroy all indexes.
### Response:
def destroy(force):
"""Destroy all indexes."""
click.secho('Destroying indexes...', fg='red', bold=True, file=sys.stderr)
with click.progressbar(
current_search.delete(ignore=[400, 404] if force else None),
length=current_search.number_of_indexes) as bar:
for name, response in bar:
bar.label = name |
def deserialize(self, data=None):
""" Invoke the deserializer
If the payload is a collection (more than 1 records)
then a list will be returned of normalized dict's.
If the payload is a single item then the normalized
dict will be returned (not a list)
:return: list or dict
"""
data = []
if self.req.content_type_params.get('header') != 'present':
abort(exceptions.InvalidRequestHeader(**{
'detail': 'When using text/csv your Content-Type '
'header MUST have a header=present parameter '
'& the payload MUST include a header of fields',
'links': 'tools.ietf.org/html/rfc4180#section-3'
}))
try:
reader = csv.DictReader(self.req.stream)
self._validate_field_headers(reader)
for row in reader:
Parser.run(row, reader)
row = Normalizer.run(row, reader)
row = super(Deserializer, self).deserialize(data)
data.append(row)
except csv.Error:
abort(exceptions.InvalidRequestBody)
return data | Invoke the deserializer
If the payload is a collection (more than 1 records)
then a list will be returned of normalized dict's.
If the payload is a single item then the normalized
dict will be returned (not a list)
:return: list or dict | Below is the the instruction that describes the task:
### Input:
Invoke the deserializer
If the payload is a collection (more than 1 records)
then a list will be returned of normalized dict's.
If the payload is a single item then the normalized
dict will be returned (not a list)
:return: list or dict
### Response:
def deserialize(self, data=None):
""" Invoke the deserializer
If the payload is a collection (more than 1 records)
then a list will be returned of normalized dict's.
If the payload is a single item then the normalized
dict will be returned (not a list)
:return: list or dict
"""
data = []
if self.req.content_type_params.get('header') != 'present':
abort(exceptions.InvalidRequestHeader(**{
'detail': 'When using text/csv your Content-Type '
'header MUST have a header=present parameter '
'& the payload MUST include a header of fields',
'links': 'tools.ietf.org/html/rfc4180#section-3'
}))
try:
reader = csv.DictReader(self.req.stream)
self._validate_field_headers(reader)
for row in reader:
Parser.run(row, reader)
row = Normalizer.run(row, reader)
row = super(Deserializer, self).deserialize(data)
data.append(row)
except csv.Error:
abort(exceptions.InvalidRequestBody)
return data |
def print_split(column_to_split, total_columns):
"""Print a row that splits the given column into two columns while
shifting all the following columns."""
out = ""
for _ in range(column_to_split):
out += "| "
out += "|\\"
for _ in range(column_to_split + 1, total_columns):
out += " \\"
print(out) | Print a row that splits the given column into two columns while
shifting all the following columns. | Below is the the instruction that describes the task:
### Input:
Print a row that splits the given column into two columns while
shifting all the following columns.
### Response:
def print_split(column_to_split, total_columns):
"""Print a row that splits the given column into two columns while
shifting all the following columns."""
out = ""
for _ in range(column_to_split):
out += "| "
out += "|\\"
for _ in range(column_to_split + 1, total_columns):
out += " \\"
print(out) |
def _launch_forever_coro(coro, args, kwargs, loop):
'''
This helper function launches an async main function that was tagged with
forever=True. There are two possibilities:
- The function is a normal function, which handles initializing the event
loop, which is then run forever
- The function is a coroutine, which needs to be scheduled in the event
loop, which is then run forever
- There is also the possibility that the function is a normal function
wrapping a coroutine function
The function is therefore called unconditionally and scheduled in the event
loop if the return value is a coroutine object.
The reason this is a separate function is to make absolutely sure that all
the objects created are garbage collected after all is said and done; we
do this to ensure that any exceptions raised in the tasks are collected
ASAP.
'''
# Personal note: I consider this an antipattern, as it relies on the use of
# unowned resources. The setup function dumps some stuff into the event
# loop where it just whirls in the ether without a well defined owner or
# lifetime. For this reason, there's a good chance I'll remove the
# forever=True feature from autoasync at some point in the future.
thing = coro(*args, **kwargs)
if iscoroutine(thing):
loop.create_task(thing) | This helper function launches an async main function that was tagged with
forever=True. There are two possibilities:
- The function is a normal function, which handles initializing the event
loop, which is then run forever
- The function is a coroutine, which needs to be scheduled in the event
loop, which is then run forever
- There is also the possibility that the function is a normal function
wrapping a coroutine function
The function is therefore called unconditionally and scheduled in the event
loop if the return value is a coroutine object.
The reason this is a separate function is to make absolutely sure that all
the objects created are garbage collected after all is said and done; we
do this to ensure that any exceptions raised in the tasks are collected
ASAP. | Below is the the instruction that describes the task:
### Input:
This helper function launches an async main function that was tagged with
forever=True. There are two possibilities:
- The function is a normal function, which handles initializing the event
loop, which is then run forever
- The function is a coroutine, which needs to be scheduled in the event
loop, which is then run forever
- There is also the possibility that the function is a normal function
wrapping a coroutine function
The function is therefore called unconditionally and scheduled in the event
loop if the return value is a coroutine object.
The reason this is a separate function is to make absolutely sure that all
the objects created are garbage collected after all is said and done; we
do this to ensure that any exceptions raised in the tasks are collected
ASAP.
### Response:
def _launch_forever_coro(coro, args, kwargs, loop):
'''
This helper function launches an async main function that was tagged with
forever=True. There are two possibilities:
- The function is a normal function, which handles initializing the event
loop, which is then run forever
- The function is a coroutine, which needs to be scheduled in the event
loop, which is then run forever
- There is also the possibility that the function is a normal function
wrapping a coroutine function
The function is therefore called unconditionally and scheduled in the event
loop if the return value is a coroutine object.
The reason this is a separate function is to make absolutely sure that all
the objects created are garbage collected after all is said and done; we
do this to ensure that any exceptions raised in the tasks are collected
ASAP.
'''
# Personal note: I consider this an antipattern, as it relies on the use of
# unowned resources. The setup function dumps some stuff into the event
# loop where it just whirls in the ether without a well defined owner or
# lifetime. For this reason, there's a good chance I'll remove the
# forever=True feature from autoasync at some point in the future.
thing = coro(*args, **kwargs)
if iscoroutine(thing):
loop.create_task(thing) |
def inspect(self, w):
"""Get the latest value of the wire given, if possible."""
if isinstance(w, WireVector):
w = w.name
try:
vals = self.tracer.trace[w]
except KeyError:
pass
else:
if not vals:
raise PyrtlError('No context available. Please run a simulation step')
return vals[-1]
raise PyrtlError('CompiledSimulation does not support inspecting internal WireVectors') | Get the latest value of the wire given, if possible. | Below is the the instruction that describes the task:
### Input:
Get the latest value of the wire given, if possible.
### Response:
def inspect(self, w):
"""Get the latest value of the wire given, if possible."""
if isinstance(w, WireVector):
w = w.name
try:
vals = self.tracer.trace[w]
except KeyError:
pass
else:
if not vals:
raise PyrtlError('No context available. Please run a simulation step')
return vals[-1]
raise PyrtlError('CompiledSimulation does not support inspecting internal WireVectors') |
def iterboxed(self, rows):
"""Iterator that yields each scanline in boxed row flat pixel
format. `rows` should be an iterator that yields the bytes of
each row in turn.
"""
def asvalues(raw):
"""Convert a row of raw bytes into a flat row. Result will
be a freshly allocated object, not shared with
argument.
"""
if self.bitdepth == 8:
return array('B', raw)
if self.bitdepth == 16:
raw = tostring(raw)
return array('H', struct.unpack('!%dH' % (len(raw)//2), raw))
assert self.bitdepth < 8
width = self.width
# Samples per byte
spb = 8//self.bitdepth
out = array('B')
mask = 2**self.bitdepth - 1
shifts = map(self.bitdepth.__mul__, reversed(range(spb)))
for o in raw:
out.extend(map(lambda i: mask&(o>>i), shifts))
return out[:width]
return imap(asvalues, rows) | Iterator that yields each scanline in boxed row flat pixel
format. `rows` should be an iterator that yields the bytes of
each row in turn. | Below is the the instruction that describes the task:
### Input:
Iterator that yields each scanline in boxed row flat pixel
format. `rows` should be an iterator that yields the bytes of
each row in turn.
### Response:
def iterboxed(self, rows):
"""Iterator that yields each scanline in boxed row flat pixel
format. `rows` should be an iterator that yields the bytes of
each row in turn.
"""
def asvalues(raw):
"""Convert a row of raw bytes into a flat row. Result will
be a freshly allocated object, not shared with
argument.
"""
if self.bitdepth == 8:
return array('B', raw)
if self.bitdepth == 16:
raw = tostring(raw)
return array('H', struct.unpack('!%dH' % (len(raw)//2), raw))
assert self.bitdepth < 8
width = self.width
# Samples per byte
spb = 8//self.bitdepth
out = array('B')
mask = 2**self.bitdepth - 1
shifts = map(self.bitdepth.__mul__, reversed(range(spb)))
for o in raw:
out.extend(map(lambda i: mask&(o>>i), shifts))
return out[:width]
return imap(asvalues, rows) |
def norm_join(prefix, suffix):
"""
Join ``prefix`` and ``suffix`` paths
and return the resulting path, normalized.
:param string prefix: the prefix path
:param string suffix: the suffix path
:rtype: string
"""
if (prefix is None) and (suffix is None):
return "."
if prefix is None:
return os.path.normpath(suffix)
if suffix is None:
return os.path.normpath(prefix)
return os.path.normpath(os.path.join(prefix, suffix)) | Join ``prefix`` and ``suffix`` paths
and return the resulting path, normalized.
:param string prefix: the prefix path
:param string suffix: the suffix path
:rtype: string | Below is the the instruction that describes the task:
### Input:
Join ``prefix`` and ``suffix`` paths
and return the resulting path, normalized.
:param string prefix: the prefix path
:param string suffix: the suffix path
:rtype: string
### Response:
def norm_join(prefix, suffix):
"""
Join ``prefix`` and ``suffix`` paths
and return the resulting path, normalized.
:param string prefix: the prefix path
:param string suffix: the suffix path
:rtype: string
"""
if (prefix is None) and (suffix is None):
return "."
if prefix is None:
return os.path.normpath(suffix)
if suffix is None:
return os.path.normpath(prefix)
return os.path.normpath(os.path.join(prefix, suffix)) |
def reshape_line_plot(df, x, y):
"""Reshape data from long form to "line plot form".
Line plot form has x value as the index with one column for each line.
Each column has data points as values and all metadata as column headers.
"""
idx = list(df.columns.drop(y))
if df.duplicated(idx).any():
warnings.warn('Duplicated index found.')
df = df.drop_duplicates(idx, keep='last')
df = df.set_index(idx)[y].unstack(x).T
return df | Reshape data from long form to "line plot form".
Line plot form has x value as the index with one column for each line.
Each column has data points as values and all metadata as column headers. | Below is the the instruction that describes the task:
### Input:
Reshape data from long form to "line plot form".
Line plot form has x value as the index with one column for each line.
Each column has data points as values and all metadata as column headers.
### Response:
def reshape_line_plot(df, x, y):
"""Reshape data from long form to "line plot form".
Line plot form has x value as the index with one column for each line.
Each column has data points as values and all metadata as column headers.
"""
idx = list(df.columns.drop(y))
if df.duplicated(idx).any():
warnings.warn('Duplicated index found.')
df = df.drop_duplicates(idx, keep='last')
df = df.set_index(idx)[y].unstack(x).T
return df |
def _expand_dims(x, input_shape, output_shape):
"""Expand dimensions and transpose if necessary.
Args:
x: a tf.Tensor
input_shape: a Shape
output_shape: a Shape whose dimensions are a superset of
those in input_shape
Returns:
a tf.Tensor
"""
verify_no_new_dims([output_shape], input_shape)
if input_shape == output_shape or input_shape.ndims == 0:
return x
perm = [input_shape.dims.index(d) for d in output_shape.dims
if d in input_shape.dims]
x = tf.transpose(x, perm)
for i, d in enumerate(output_shape.dims):
if d not in input_shape.dims:
x = tf.expand_dims(x, i)
return x | Expand dimensions and transpose if necessary.
Args:
x: a tf.Tensor
input_shape: a Shape
output_shape: a Shape whose dimensions are a superset of
those in input_shape
Returns:
a tf.Tensor | Below is the the instruction that describes the task:
### Input:
Expand dimensions and transpose if necessary.
Args:
x: a tf.Tensor
input_shape: a Shape
output_shape: a Shape whose dimensions are a superset of
those in input_shape
Returns:
a tf.Tensor
### Response:
def _expand_dims(x, input_shape, output_shape):
"""Expand dimensions and transpose if necessary.
Args:
x: a tf.Tensor
input_shape: a Shape
output_shape: a Shape whose dimensions are a superset of
those in input_shape
Returns:
a tf.Tensor
"""
verify_no_new_dims([output_shape], input_shape)
if input_shape == output_shape or input_shape.ndims == 0:
return x
perm = [input_shape.dims.index(d) for d in output_shape.dims
if d in input_shape.dims]
x = tf.transpose(x, perm)
for i, d in enumerate(output_shape.dims):
if d not in input_shape.dims:
x = tf.expand_dims(x, i)
return x |
def get_pubmed_citation_response(pubmed_identifiers: Iterable[str]):
"""Get the response from PubMed E-Utils for a given list of PubMed identifiers.
:param pubmed_identifiers:
:rtype: dict
"""
pubmed_identifiers = list(pubmed_identifiers)
url = EUTILS_URL_FMT.format(','.join(
pubmed_identifier
for pubmed_identifier in pubmed_identifiers
if pubmed_identifier
))
response = requests.get(url)
return response.json() | Get the response from PubMed E-Utils for a given list of PubMed identifiers.
:param pubmed_identifiers:
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Get the response from PubMed E-Utils for a given list of PubMed identifiers.
:param pubmed_identifiers:
:rtype: dict
### Response:
def get_pubmed_citation_response(pubmed_identifiers: Iterable[str]):
"""Get the response from PubMed E-Utils for a given list of PubMed identifiers.
:param pubmed_identifiers:
:rtype: dict
"""
pubmed_identifiers = list(pubmed_identifiers)
url = EUTILS_URL_FMT.format(','.join(
pubmed_identifier
for pubmed_identifier in pubmed_identifiers
if pubmed_identifier
))
response = requests.get(url)
return response.json() |
def getPolicyValue(self):
"""Get the policy and value vectors."""
self._cur.execute("SELECT action FROM policy")
r = self._cur.fetchall()
policy = [x[0] for x in r]
self._cur.execute("SELECT value FROM V")
r = self._cur.fetchall()
value = [x[0] for x in r]
return policy, value | Get the policy and value vectors. | Below is the the instruction that describes the task:
### Input:
Get the policy and value vectors.
### Response:
def getPolicyValue(self):
"""Get the policy and value vectors."""
self._cur.execute("SELECT action FROM policy")
r = self._cur.fetchall()
policy = [x[0] for x in r]
self._cur.execute("SELECT value FROM V")
r = self._cur.fetchall()
value = [x[0] for x in r]
return policy, value |
def get_menu_checked(self, request):
"""
获取用户或者用户组checked的菜单列表
usermenu_form.html 中定义
usermenu 这两个model的定义类似,比如menus_checked和menus_show
groupmenu
@return eg. ['1', '8', '9', '10' ]
获取用户或者用户组的check_ids,会给出app_label, model_name, pk eg. /easyui/menulistview/?app_label=easyui&model_name=UserMenu&pk=1
"""
checked_id = []
qd = request.GET
query_dict = dict(qd.items())
if query_dict:
#object = get_object(**query_dict)
app_label = query_dict['app_label']
model_name = query_dict['model_name']
pk = query_dict['pk']
model = get_model(app_label, model_name)
object = model.objects.get(pk=pk)
checked_id = object.menus_checked.split(',')
return checked_id | 获取用户或者用户组checked的菜单列表
usermenu_form.html 中定义
usermenu 这两个model的定义类似,比如menus_checked和menus_show
groupmenu
@return eg. ['1', '8', '9', '10' ]
获取用户或者用户组的check_ids,会给出app_label, model_name, pk eg. /easyui/menulistview/?app_label=easyui&model_name=UserMenu&pk=1 | Below is the the instruction that describes the task:
### Input:
获取用户或者用户组checked的菜单列表
usermenu_form.html 中定义
usermenu 这两个model的定义类似,比如menus_checked和menus_show
groupmenu
@return eg. ['1', '8', '9', '10' ]
获取用户或者用户组的check_ids,会给出app_label, model_name, pk eg. /easyui/menulistview/?app_label=easyui&model_name=UserMenu&pk=1
### Response:
def get_menu_checked(self, request):
"""
获取用户或者用户组checked的菜单列表
usermenu_form.html 中定义
usermenu 这两个model的定义类似,比如menus_checked和menus_show
groupmenu
@return eg. ['1', '8', '9', '10' ]
获取用户或者用户组的check_ids,会给出app_label, model_name, pk eg. /easyui/menulistview/?app_label=easyui&model_name=UserMenu&pk=1
"""
checked_id = []
qd = request.GET
query_dict = dict(qd.items())
if query_dict:
#object = get_object(**query_dict)
app_label = query_dict['app_label']
model_name = query_dict['model_name']
pk = query_dict['pk']
model = get_model(app_label, model_name)
object = model.objects.get(pk=pk)
checked_id = object.menus_checked.split(',')
return checked_id |
def on_action_end(self, action, logs={}):
""" Called at end of each action for each callback in callbackList"""
for callback in self.callbacks:
if callable(getattr(callback, 'on_action_end', None)):
callback.on_action_end(action, logs=logs) | Called at end of each action for each callback in callbackList | Below is the the instruction that describes the task:
### Input:
Called at end of each action for each callback in callbackList
### Response:
def on_action_end(self, action, logs={}):
""" Called at end of each action for each callback in callbackList"""
for callback in self.callbacks:
if callable(getattr(callback, 'on_action_end', None)):
callback.on_action_end(action, logs=logs) |
def DownloadXilinx(self, bitfile):
"""We hijack this call to perform the socket connect"""
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect((self.simulation_host, self.simulation_port))
self._iface = PickleInterface(self._sock)
return True | We hijack this call to perform the socket connect | Below is the the instruction that describes the task:
### Input:
We hijack this call to perform the socket connect
### Response:
def DownloadXilinx(self, bitfile):
"""We hijack this call to perform the socket connect"""
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect((self.simulation_host, self.simulation_port))
self._iface = PickleInterface(self._sock)
return True |
def policy_iteration(mdp):
"Solve an MDP by policy iteration [Fig. 17.7]"
U = dict([(s, 0) for s in mdp.states])
pi = dict([(s, random.choice(mdp.actions(s))) for s in mdp.states])
while True:
U = policy_evaluation(pi, U, mdp)
unchanged = True
for s in mdp.states:
a = argmax(mdp.actions(s), lambda a: expected_utility(a,s,U,mdp))
if a != pi[s]:
pi[s] = a
unchanged = False
if unchanged:
return pi | Solve an MDP by policy iteration [Fig. 17.7] | Below is the the instruction that describes the task:
### Input:
Solve an MDP by policy iteration [Fig. 17.7]
### Response:
def policy_iteration(mdp):
"Solve an MDP by policy iteration [Fig. 17.7]"
U = dict([(s, 0) for s in mdp.states])
pi = dict([(s, random.choice(mdp.actions(s))) for s in mdp.states])
while True:
U = policy_evaluation(pi, U, mdp)
unchanged = True
for s in mdp.states:
a = argmax(mdp.actions(s), lambda a: expected_utility(a,s,U,mdp))
if a != pi[s]:
pi[s] = a
unchanged = False
if unchanged:
return pi |
async def index_page(self, request):
"""
Return index page with initial state for admin
"""
context = {"initial_state": self.schema.to_json()}
return render_template(
self.template,
request,
context,
app_key=TEMPLATE_APP_KEY,
) | Return index page with initial state for admin | Below is the the instruction that describes the task:
### Input:
Return index page with initial state for admin
### Response:
async def index_page(self, request):
"""
Return index page with initial state for admin
"""
context = {"initial_state": self.schema.to_json()}
return render_template(
self.template,
request,
context,
app_key=TEMPLATE_APP_KEY,
) |
def get_records(self, hql, parameters=None):
"""
Get a set of records from Presto
"""
try:
return super().get_records(
self._strip_sql(hql), parameters)
except DatabaseError as e:
raise PrestoException(self._get_pretty_exception_message(e)) | Get a set of records from Presto | Below is the the instruction that describes the task:
### Input:
Get a set of records from Presto
### Response:
def get_records(self, hql, parameters=None):
"""
Get a set of records from Presto
"""
try:
return super().get_records(
self._strip_sql(hql), parameters)
except DatabaseError as e:
raise PrestoException(self._get_pretty_exception_message(e)) |
def _setup_log(self):
'''
Setup the log object.
'''
logging_level = CONFIG.LOGGING_LEVEL.get(self.log_level.lower())
logging.basicConfig(format=self.log_format,
level=logging_level) | Setup the log object. | Below is the the instruction that describes the task:
### Input:
Setup the log object.
### Response:
def _setup_log(self):
'''
Setup the log object.
'''
logging_level = CONFIG.LOGGING_LEVEL.get(self.log_level.lower())
logging.basicConfig(format=self.log_format,
level=logging_level) |
def EnsureSConsVersion(self, major, minor, revision=0):
"""Exit abnormally if the SCons version is not late enough."""
# split string to avoid replacement during build process
if SCons.__version__ == '__' + 'VERSION__':
SCons.Warnings.warn(SCons.Warnings.DevelopmentVersionWarning,
"EnsureSConsVersion is ignored for development version")
return
scons_ver = self._get_major_minor_revision(SCons.__version__)
if scons_ver < (major, minor, revision):
if revision:
scons_ver_string = '%d.%d.%d' % (major, minor, revision)
else:
scons_ver_string = '%d.%d' % (major, minor)
print("SCons %s or greater required, but you have SCons %s" % \
(scons_ver_string, SCons.__version__))
sys.exit(2) | Exit abnormally if the SCons version is not late enough. | Below is the the instruction that describes the task:
### Input:
Exit abnormally if the SCons version is not late enough.
### Response:
def EnsureSConsVersion(self, major, minor, revision=0):
"""Exit abnormally if the SCons version is not late enough."""
# split string to avoid replacement during build process
if SCons.__version__ == '__' + 'VERSION__':
SCons.Warnings.warn(SCons.Warnings.DevelopmentVersionWarning,
"EnsureSConsVersion is ignored for development version")
return
scons_ver = self._get_major_minor_revision(SCons.__version__)
if scons_ver < (major, minor, revision):
if revision:
scons_ver_string = '%d.%d.%d' % (major, minor, revision)
else:
scons_ver_string = '%d.%d' % (major, minor)
print("SCons %s or greater required, but you have SCons %s" % \
(scons_ver_string, SCons.__version__))
sys.exit(2) |
def console_load_asc(con: tcod.console.Console, filename: str) -> bool:
"""Update a console from a non-delimited ASCII `.asc` file."""
return bool(
lib.TCOD_console_load_asc(_console(con), filename.encode("utf-8"))
) | Update a console from a non-delimited ASCII `.asc` file. | Below is the the instruction that describes the task:
### Input:
Update a console from a non-delimited ASCII `.asc` file.
### Response:
def console_load_asc(con: tcod.console.Console, filename: str) -> bool:
"""Update a console from a non-delimited ASCII `.asc` file."""
return bool(
lib.TCOD_console_load_asc(_console(con), filename.encode("utf-8"))
) |
def get_nodes_by_namespace(graph: BELGraph, namespaces: Strings) -> Set[BaseEntity]:
"""Get all nodes identified by the given namespace(s)."""
return get_nodes(graph, namespace_inclusion_builder(namespaces)) | Get all nodes identified by the given namespace(s). | Below is the the instruction that describes the task:
### Input:
Get all nodes identified by the given namespace(s).
### Response:
def get_nodes_by_namespace(graph: BELGraph, namespaces: Strings) -> Set[BaseEntity]:
"""Get all nodes identified by the given namespace(s)."""
return get_nodes(graph, namespace_inclusion_builder(namespaces)) |
def slice_query(self, slice_id):
"""
This method exposes an API endpoint to
get the database query string for this slice
"""
viz_obj = get_viz(slice_id)
security_manager.assert_datasource_permission(viz_obj.datasource)
return self.get_query_string_response(viz_obj) | This method exposes an API endpoint to
get the database query string for this slice | Below is the the instruction that describes the task:
### Input:
This method exposes an API endpoint to
get the database query string for this slice
### Response:
def slice_query(self, slice_id):
"""
This method exposes an API endpoint to
get the database query string for this slice
"""
viz_obj = get_viz(slice_id)
security_manager.assert_datasource_permission(viz_obj.datasource)
return self.get_query_string_response(viz_obj) |
def format_level_1_memory(memory):
""" Format an experiment result memory object for measurement level 1.
Args:
memory (list): Memory from experiment with `meas_level==1`. `avg` or
`single` will be inferred from shape of result memory.
Returns:
np.ndarray: Measurement level 1 complex numpy array
Raises:
QiskitError: If the returned numpy array does not have 1 (avg) or 2 (single)
indicies.
"""
formatted_memory = _list_to_complex_array(memory)
# infer meas_return from shape of returned data.
if not 1 <= len(formatted_memory.shape) <= 2:
raise QiskitError('Level one memory is not of correct shape.')
return formatted_memory | Format an experiment result memory object for measurement level 1.
Args:
memory (list): Memory from experiment with `meas_level==1`. `avg` or
`single` will be inferred from shape of result memory.
Returns:
np.ndarray: Measurement level 1 complex numpy array
Raises:
QiskitError: If the returned numpy array does not have 1 (avg) or 2 (single)
indicies. | Below is the the instruction that describes the task:
### Input:
Format an experiment result memory object for measurement level 1.
Args:
memory (list): Memory from experiment with `meas_level==1`. `avg` or
`single` will be inferred from shape of result memory.
Returns:
np.ndarray: Measurement level 1 complex numpy array
Raises:
QiskitError: If the returned numpy array does not have 1 (avg) or 2 (single)
indicies.
### Response:
def format_level_1_memory(memory):
""" Format an experiment result memory object for measurement level 1.
Args:
memory (list): Memory from experiment with `meas_level==1`. `avg` or
`single` will be inferred from shape of result memory.
Returns:
np.ndarray: Measurement level 1 complex numpy array
Raises:
QiskitError: If the returned numpy array does not have 1 (avg) or 2 (single)
indicies.
"""
formatted_memory = _list_to_complex_array(memory)
# infer meas_return from shape of returned data.
if not 1 <= len(formatted_memory.shape) <= 2:
raise QiskitError('Level one memory is not of correct shape.')
return formatted_memory |
def new(self, inlineparent = None):
'''
Compatible to Parser.new()
'''
v = list(range(0, self.size))
for i in range(0, self.size):
v[i] = self.innerparser.new()
return v | Compatible to Parser.new() | Below is the the instruction that describes the task:
### Input:
Compatible to Parser.new()
### Response:
def new(self, inlineparent = None):
'''
Compatible to Parser.new()
'''
v = list(range(0, self.size))
for i in range(0, self.size):
v[i] = self.innerparser.new()
return v |
def _write_header(self, image, hdu):
"""Write header from image object to given HDU."""
hduhdr = hdu.header
# Ginga image header object for the given extension only.
# Cannot use get_header() because that might also return PRI hdr.
ghdr = image.metadata['header']
for key in ghdr:
# Need this to avoid duplication because COMMENT is a weird field
if key.upper() == 'COMMENT':
continue
bnch = ghdr.get_card(key)
# Insert new keyword
if key not in hduhdr:
hduhdr[key] = (bnch.value, bnch.comment)
# Update existing keyword
elif hduhdr[key] != bnch.value:
hduhdr[key] = bnch.value | Write header from image object to given HDU. | Below is the the instruction that describes the task:
### Input:
Write header from image object to given HDU.
### Response:
def _write_header(self, image, hdu):
"""Write header from image object to given HDU."""
hduhdr = hdu.header
# Ginga image header object for the given extension only.
# Cannot use get_header() because that might also return PRI hdr.
ghdr = image.metadata['header']
for key in ghdr:
# Need this to avoid duplication because COMMENT is a weird field
if key.upper() == 'COMMENT':
continue
bnch = ghdr.get_card(key)
# Insert new keyword
if key not in hduhdr:
hduhdr[key] = (bnch.value, bnch.comment)
# Update existing keyword
elif hduhdr[key] != bnch.value:
hduhdr[key] = bnch.value |
def get_nameserver_detail_output_show_nameserver_nameserver_redirect(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_nameserver_detail = ET.Element("get_nameserver_detail")
config = get_nameserver_detail
output = ET.SubElement(get_nameserver_detail, "output")
show_nameserver = ET.SubElement(output, "show-nameserver")
nameserver_portid_key = ET.SubElement(show_nameserver, "nameserver-portid")
nameserver_portid_key.text = kwargs.pop('nameserver_portid')
nameserver_redirect = ET.SubElement(show_nameserver, "nameserver-redirect")
nameserver_redirect.text = kwargs.pop('nameserver_redirect')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_nameserver_detail_output_show_nameserver_nameserver_redirect(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_nameserver_detail = ET.Element("get_nameserver_detail")
config = get_nameserver_detail
output = ET.SubElement(get_nameserver_detail, "output")
show_nameserver = ET.SubElement(output, "show-nameserver")
nameserver_portid_key = ET.SubElement(show_nameserver, "nameserver-portid")
nameserver_portid_key.text = kwargs.pop('nameserver_portid')
nameserver_redirect = ET.SubElement(show_nameserver, "nameserver-redirect")
nameserver_redirect.text = kwargs.pop('nameserver_redirect')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _check_load_paths(load_path):
'''
Checks the validity of the load_path, returns a sanitized version
with invalid paths removed.
'''
if load_path is None or not isinstance(load_path, six.string_types):
return None
_paths = []
for _path in load_path.split(':'):
if os.path.isabs(_path) and os.path.isdir(_path):
_paths.append(_path)
else:
log.info('Invalid augeas_cfg load_path entry: %s removed', _path)
if not _paths:
return None
return ':'.join(_paths) | Checks the validity of the load_path, returns a sanitized version
with invalid paths removed. | Below is the the instruction that describes the task:
### Input:
Checks the validity of the load_path, returns a sanitized version
with invalid paths removed.
### Response:
def _check_load_paths(load_path):
'''
Checks the validity of the load_path, returns a sanitized version
with invalid paths removed.
'''
if load_path is None or not isinstance(load_path, six.string_types):
return None
_paths = []
for _path in load_path.split(':'):
if os.path.isabs(_path) and os.path.isdir(_path):
_paths.append(_path)
else:
log.info('Invalid augeas_cfg load_path entry: %s removed', _path)
if not _paths:
return None
return ':'.join(_paths) |
def stem(self, words):
"""
Use the porter stemmer to generate consistent forms of
words, e.g.::
from walrus.search.utils import PorterStemmer
stemmer = PorterStemmer()
for word in ['faith', 'faiths', 'faithful']:
print s.stem(word, 0, len(word) - 1)
# Prints:
# faith
# faith
# faith
"""
stemmer = PorterStemmer()
_stem = stemmer.stem
for word in words:
yield _stem(word, 0, len(word) - 1) | Use the porter stemmer to generate consistent forms of
words, e.g.::
from walrus.search.utils import PorterStemmer
stemmer = PorterStemmer()
for word in ['faith', 'faiths', 'faithful']:
print s.stem(word, 0, len(word) - 1)
# Prints:
# faith
# faith
# faith | Below is the the instruction that describes the task:
### Input:
Use the porter stemmer to generate consistent forms of
words, e.g.::
from walrus.search.utils import PorterStemmer
stemmer = PorterStemmer()
for word in ['faith', 'faiths', 'faithful']:
print s.stem(word, 0, len(word) - 1)
# Prints:
# faith
# faith
# faith
### Response:
def stem(self, words):
"""
Use the porter stemmer to generate consistent forms of
words, e.g.::
from walrus.search.utils import PorterStemmer
stemmer = PorterStemmer()
for word in ['faith', 'faiths', 'faithful']:
print s.stem(word, 0, len(word) - 1)
# Prints:
# faith
# faith
# faith
"""
stemmer = PorterStemmer()
_stem = stemmer.stem
for word in words:
yield _stem(word, 0, len(word) - 1) |
def _stat(self, axis=None, func=None, name=None, keepdims=False):
"""
Compute a statistic over an axis.
Can provide either a function (for use in a reduce)
or a name (for use by a stat counter).
Parameters
----------
axis : tuple or int, optional, default=None
Axis to compute statistic over, if None
will compute over all axes
func : function, optional, default=None
Function for reduce, see BoltArraySpark.reduce
name : str
A named statistic, see StatCounter
keepdims : boolean, optional, default=False
Keep axis remaining after operation with size 1.
"""
if axis is None:
axis = list(range(len(self.shape)))
axis = tupleize(axis)
if func and not name:
return self.reduce(func, axis, keepdims)
if name and not func:
from bolt.local.array import BoltArrayLocal
swapped = self._align(axis)
def reducer(left, right):
return left.combine(right)
counter = swapped._rdd.values()\
.mapPartitions(lambda i: [StatCounter(values=i, stats=name)])\
.treeReduce(reducer, depth=3)
arr = getattr(counter, name)
if keepdims:
for i in axis:
arr = expand_dims(arr, axis=i)
return BoltArrayLocal(arr).toscalar()
else:
raise ValueError('Must specify either a function or a statistic name.') | Compute a statistic over an axis.
Can provide either a function (for use in a reduce)
or a name (for use by a stat counter).
Parameters
----------
axis : tuple or int, optional, default=None
Axis to compute statistic over, if None
will compute over all axes
func : function, optional, default=None
Function for reduce, see BoltArraySpark.reduce
name : str
A named statistic, see StatCounter
keepdims : boolean, optional, default=False
Keep axis remaining after operation with size 1. | Below is the the instruction that describes the task:
### Input:
Compute a statistic over an axis.
Can provide either a function (for use in a reduce)
or a name (for use by a stat counter).
Parameters
----------
axis : tuple or int, optional, default=None
Axis to compute statistic over, if None
will compute over all axes
func : function, optional, default=None
Function for reduce, see BoltArraySpark.reduce
name : str
A named statistic, see StatCounter
keepdims : boolean, optional, default=False
Keep axis remaining after operation with size 1.
### Response:
def _stat(self, axis=None, func=None, name=None, keepdims=False):
"""
Compute a statistic over an axis.
Can provide either a function (for use in a reduce)
or a name (for use by a stat counter).
Parameters
----------
axis : tuple or int, optional, default=None
Axis to compute statistic over, if None
will compute over all axes
func : function, optional, default=None
Function for reduce, see BoltArraySpark.reduce
name : str
A named statistic, see StatCounter
keepdims : boolean, optional, default=False
Keep axis remaining after operation with size 1.
"""
if axis is None:
axis = list(range(len(self.shape)))
axis = tupleize(axis)
if func and not name:
return self.reduce(func, axis, keepdims)
if name and not func:
from bolt.local.array import BoltArrayLocal
swapped = self._align(axis)
def reducer(left, right):
return left.combine(right)
counter = swapped._rdd.values()\
.mapPartitions(lambda i: [StatCounter(values=i, stats=name)])\
.treeReduce(reducer, depth=3)
arr = getattr(counter, name)
if keepdims:
for i in axis:
arr = expand_dims(arr, axis=i)
return BoltArrayLocal(arr).toscalar()
else:
raise ValueError('Must specify either a function or a statistic name.') |
def close(self, force=False):
"""This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores
SIGINT)."""
if not self.closed:
self.pty.close()
self.fileobj.close()
self._server.close()
# Give kernel time to update process status.
time.sleep(self.delayafterclose)
if self.isalive():
if not self.terminate(force):
raise IOError('Could not terminate the child.')
self.fd = -1
self.closed = True
del self.pty
self.pty = None | This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores
SIGINT). | Below is the the instruction that describes the task:
### Input:
This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores
SIGINT).
### Response:
def close(self, force=False):
"""This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores
SIGINT)."""
if not self.closed:
self.pty.close()
self.fileobj.close()
self._server.close()
# Give kernel time to update process status.
time.sleep(self.delayafterclose)
if self.isalive():
if not self.terminate(force):
raise IOError('Could not terminate the child.')
self.fd = -1
self.closed = True
del self.pty
self.pty = None |
def list_meta_fields():
'''
Show all meta data fields for this company.
CLI Example:
salt myminion bamboohr.list_meta_fields
'''
ret = {}
status, result = _query(action='meta', command='fields')
root = ET.fromstring(result)
fields = root.getchildren()
for field in fields:
field_id = None
field_ret = {'name': field.text}
for item in field.items():
field_ret[item[0]] = item[1]
if item[0] == 'id':
field_id = item[1]
ret[field_id] = field_ret
return ret | Show all meta data fields for this company.
CLI Example:
salt myminion bamboohr.list_meta_fields | Below is the the instruction that describes the task:
### Input:
Show all meta data fields for this company.
CLI Example:
salt myminion bamboohr.list_meta_fields
### Response:
def list_meta_fields():
'''
Show all meta data fields for this company.
CLI Example:
salt myminion bamboohr.list_meta_fields
'''
ret = {}
status, result = _query(action='meta', command='fields')
root = ET.fromstring(result)
fields = root.getchildren()
for field in fields:
field_id = None
field_ret = {'name': field.text}
for item in field.items():
field_ret[item[0]] = item[1]
if item[0] == 'id':
field_id = item[1]
ret[field_id] = field_ret
return ret |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.