code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def run_hooks(obj, hooks, *args):
"""Run each function in `hooks' with args"""
for hook in hooks:
if hook(obj, *args): return True
pass
return False | Run each function in `hooks' with args | Below is the the instruction that describes the task:
### Input:
Run each function in `hooks' with args
### Response:
def run_hooks(obj, hooks, *args):
"""Run each function in `hooks' with args"""
for hook in hooks:
if hook(obj, *args): return True
pass
return False |
def _get_part(pointlist, strokes):
"""Get some strokes of pointlist
Parameters
----------
pointlist : list of lists of dicts
strokes : list of integers
Returns
-------
list of lists of dicts
"""
result = []
strokes = sorted(strokes)
for stroke_index in strokes:
result.append(pointlist[stroke_index])
return result | Get some strokes of pointlist
Parameters
----------
pointlist : list of lists of dicts
strokes : list of integers
Returns
-------
list of lists of dicts | Below is the the instruction that describes the task:
### Input:
Get some strokes of pointlist
Parameters
----------
pointlist : list of lists of dicts
strokes : list of integers
Returns
-------
list of lists of dicts
### Response:
def _get_part(pointlist, strokes):
"""Get some strokes of pointlist
Parameters
----------
pointlist : list of lists of dicts
strokes : list of integers
Returns
-------
list of lists of dicts
"""
result = []
strokes = sorted(strokes)
for stroke_index in strokes:
result.append(pointlist[stroke_index])
return result |
def paint(self, painter, option, index):
"""Uses the :meth:`paint<sparkle.gui.stim.components.qcomponents.QStimulusComponent.paint>`
method of the component it represents to fill in an appropriately
sized rectange. :qtdoc:`Re-implemented<QStyledItemDelegate.paint>`"""
component = index.model().data(index, role=QtCore.Qt.UserRole)
painter.drawRect(option.rect)
component.paint(painter, option.rect, option.palette) | Uses the :meth:`paint<sparkle.gui.stim.components.qcomponents.QStimulusComponent.paint>`
method of the component it represents to fill in an appropriately
sized rectange. :qtdoc:`Re-implemented<QStyledItemDelegate.paint>` | Below is the the instruction that describes the task:
### Input:
Uses the :meth:`paint<sparkle.gui.stim.components.qcomponents.QStimulusComponent.paint>`
method of the component it represents to fill in an appropriately
sized rectange. :qtdoc:`Re-implemented<QStyledItemDelegate.paint>`
### Response:
def paint(self, painter, option, index):
"""Uses the :meth:`paint<sparkle.gui.stim.components.qcomponents.QStimulusComponent.paint>`
method of the component it represents to fill in an appropriately
sized rectange. :qtdoc:`Re-implemented<QStyledItemDelegate.paint>`"""
component = index.model().data(index, role=QtCore.Qt.UserRole)
painter.drawRect(option.rect)
component.paint(painter, option.rect, option.palette) |
def stack_xi_direction_brute(xis, bestMasses, bestXis, direction_num,
req_match, massRangeParams, metricParams, fUpper,
scaleFactor=0.8, numIterations=3000):
"""
This function is used to assess the depth of the xi_space in a specified
dimension at a specified point in the higher dimensions. It does this by
iteratively throwing points at the space to find maxima and minima.
Parameters
-----------
xis : list or array
Position in the xi space at which to assess the depth. This can be only
a subset of the higher dimensions than that being sampled.
bestMasses : list
Contains [totalMass, eta, spin1z, spin2z]. Is a physical position
mapped to xi coordinates in bestXis that is close to the xis point.
This is aimed to give the code a starting point.
bestXis : list
Contains the position of bestMasses in the xi coordinate system.
direction_num : int
The dimension that you want to assess the depth of (0 = 1, 1 = 2 ...)
req_match : float
When considering points to assess the depth with, only consider points
with a mismatch that is smaller than this with xis.
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper that was used when obtaining the xi_i
coordinates. This lets us know how to rotate potential physical points
into the correct xi_i space. This must be a key in metricParams.evals,
metricParams.evecs and metricParams.evecsCV
(ie. we must know how to do the transformation for
the given value of fUpper)
scaleFactor : float, optional (default = 0.8)
The value of the scale factor that is used when calling
pycbc.tmpltbank.get_mass_distribution.
numIterations : int, optional (default = 3000)
The number of times to make calls to get_mass_distribution when
assessing the maximum/minimum of this parameter space. Making this
smaller makes the code faster, but at the cost of accuracy.
Returns
--------
xi_min : float
The minimal value of the specified dimension at the specified point in
parameter space.
xi_max : float
The maximal value of the specified dimension at the specified point in
parameter space.
"""
# Find minimum
ximin = find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, \
req_match, massRangeParams, metricParams, \
fUpper, find_minimum=True, \
scaleFactor=scaleFactor, \
numIterations=numIterations)
# Find maximum
ximax = find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, \
req_match, massRangeParams, metricParams, \
fUpper, find_minimum=False, \
scaleFactor=scaleFactor, \
numIterations=numIterations)
return ximin, ximax | This function is used to assess the depth of the xi_space in a specified
dimension at a specified point in the higher dimensions. It does this by
iteratively throwing points at the space to find maxima and minima.
Parameters
-----------
xis : list or array
Position in the xi space at which to assess the depth. This can be only
a subset of the higher dimensions than that being sampled.
bestMasses : list
Contains [totalMass, eta, spin1z, spin2z]. Is a physical position
mapped to xi coordinates in bestXis that is close to the xis point.
This is aimed to give the code a starting point.
bestXis : list
Contains the position of bestMasses in the xi coordinate system.
direction_num : int
The dimension that you want to assess the depth of (0 = 1, 1 = 2 ...)
req_match : float
When considering points to assess the depth with, only consider points
with a mismatch that is smaller than this with xis.
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper that was used when obtaining the xi_i
coordinates. This lets us know how to rotate potential physical points
into the correct xi_i space. This must be a key in metricParams.evals,
metricParams.evecs and metricParams.evecsCV
(ie. we must know how to do the transformation for
the given value of fUpper)
scaleFactor : float, optional (default = 0.8)
The value of the scale factor that is used when calling
pycbc.tmpltbank.get_mass_distribution.
numIterations : int, optional (default = 3000)
The number of times to make calls to get_mass_distribution when
assessing the maximum/minimum of this parameter space. Making this
smaller makes the code faster, but at the cost of accuracy.
Returns
--------
xi_min : float
The minimal value of the specified dimension at the specified point in
parameter space.
xi_max : float
The maximal value of the specified dimension at the specified point in
parameter space. | Below is the the instruction that describes the task:
### Input:
This function is used to assess the depth of the xi_space in a specified
dimension at a specified point in the higher dimensions. It does this by
iteratively throwing points at the space to find maxima and minima.
Parameters
-----------
xis : list or array
Position in the xi space at which to assess the depth. This can be only
a subset of the higher dimensions than that being sampled.
bestMasses : list
Contains [totalMass, eta, spin1z, spin2z]. Is a physical position
mapped to xi coordinates in bestXis that is close to the xis point.
This is aimed to give the code a starting point.
bestXis : list
Contains the position of bestMasses in the xi coordinate system.
direction_num : int
The dimension that you want to assess the depth of (0 = 1, 1 = 2 ...)
req_match : float
When considering points to assess the depth with, only consider points
with a mismatch that is smaller than this with xis.
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper that was used when obtaining the xi_i
coordinates. This lets us know how to rotate potential physical points
into the correct xi_i space. This must be a key in metricParams.evals,
metricParams.evecs and metricParams.evecsCV
(ie. we must know how to do the transformation for
the given value of fUpper)
scaleFactor : float, optional (default = 0.8)
The value of the scale factor that is used when calling
pycbc.tmpltbank.get_mass_distribution.
numIterations : int, optional (default = 3000)
The number of times to make calls to get_mass_distribution when
assessing the maximum/minimum of this parameter space. Making this
smaller makes the code faster, but at the cost of accuracy.
Returns
--------
xi_min : float
The minimal value of the specified dimension at the specified point in
parameter space.
xi_max : float
The maximal value of the specified dimension at the specified point in
parameter space.
### Response:
def stack_xi_direction_brute(xis, bestMasses, bestXis, direction_num,
req_match, massRangeParams, metricParams, fUpper,
scaleFactor=0.8, numIterations=3000):
"""
This function is used to assess the depth of the xi_space in a specified
dimension at a specified point in the higher dimensions. It does this by
iteratively throwing points at the space to find maxima and minima.
Parameters
-----------
xis : list or array
Position in the xi space at which to assess the depth. This can be only
a subset of the higher dimensions than that being sampled.
bestMasses : list
Contains [totalMass, eta, spin1z, spin2z]. Is a physical position
mapped to xi coordinates in bestXis that is close to the xis point.
This is aimed to give the code a starting point.
bestXis : list
Contains the position of bestMasses in the xi coordinate system.
direction_num : int
The dimension that you want to assess the depth of (0 = 1, 1 = 2 ...)
req_match : float
When considering points to assess the depth with, only consider points
with a mismatch that is smaller than this with xis.
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper that was used when obtaining the xi_i
coordinates. This lets us know how to rotate potential physical points
into the correct xi_i space. This must be a key in metricParams.evals,
metricParams.evecs and metricParams.evecsCV
(ie. we must know how to do the transformation for
the given value of fUpper)
scaleFactor : float, optional (default = 0.8)
The value of the scale factor that is used when calling
pycbc.tmpltbank.get_mass_distribution.
numIterations : int, optional (default = 3000)
The number of times to make calls to get_mass_distribution when
assessing the maximum/minimum of this parameter space. Making this
smaller makes the code faster, but at the cost of accuracy.
Returns
--------
xi_min : float
The minimal value of the specified dimension at the specified point in
parameter space.
xi_max : float
The maximal value of the specified dimension at the specified point in
parameter space.
"""
# Find minimum
ximin = find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, \
req_match, massRangeParams, metricParams, \
fUpper, find_minimum=True, \
scaleFactor=scaleFactor, \
numIterations=numIterations)
# Find maximum
ximax = find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, \
req_match, massRangeParams, metricParams, \
fUpper, find_minimum=False, \
scaleFactor=scaleFactor, \
numIterations=numIterations)
return ximin, ximax |
def boolean(value):
"""Parse the string ``"true"`` or ``"false"`` as a boolean (case
insensitive). Also accepts ``"1"`` and ``"0"`` as ``True``/``False``
(respectively). If the input is from the request JSON body, the type is
already a native python boolean, and will be passed through without
further parsing.
"""
if isinstance(value, bool):
return value
if not value:
raise ValueError("boolean type must be non-null")
value = value.lower()
if value in ('true', '1',):
return True
if value in ('false', '0',):
return False
raise ValueError("Invalid literal for boolean(): {0}".format(value)) | Parse the string ``"true"`` or ``"false"`` as a boolean (case
insensitive). Also accepts ``"1"`` and ``"0"`` as ``True``/``False``
(respectively). If the input is from the request JSON body, the type is
already a native python boolean, and will be passed through without
further parsing. | Below is the the instruction that describes the task:
### Input:
Parse the string ``"true"`` or ``"false"`` as a boolean (case
insensitive). Also accepts ``"1"`` and ``"0"`` as ``True``/``False``
(respectively). If the input is from the request JSON body, the type is
already a native python boolean, and will be passed through without
further parsing.
### Response:
def boolean(value):
"""Parse the string ``"true"`` or ``"false"`` as a boolean (case
insensitive). Also accepts ``"1"`` and ``"0"`` as ``True``/``False``
(respectively). If the input is from the request JSON body, the type is
already a native python boolean, and will be passed through without
further parsing.
"""
if isinstance(value, bool):
return value
if not value:
raise ValueError("boolean type must be non-null")
value = value.lower()
if value in ('true', '1',):
return True
if value in ('false', '0',):
return False
raise ValueError("Invalid literal for boolean(): {0}".format(value)) |
def _ensure_pinned_rows(dashboard):
'''Pin rows to the top of the dashboard.'''
pinned_row_titles = __salt__['pillar.get'](_PINNED_ROWS_PILLAR)
if not pinned_row_titles:
return
pinned_row_titles_lower = []
for title in pinned_row_titles:
pinned_row_titles_lower.append(title.lower())
rows = dashboard.get('rows', [])
pinned_rows = []
for i, row in enumerate(rows):
if row.get('title', '').lower() in pinned_row_titles_lower:
del rows[i]
pinned_rows.append(row)
rows = pinned_rows + rows | Pin rows to the top of the dashboard. | Below is the the instruction that describes the task:
### Input:
Pin rows to the top of the dashboard.
### Response:
def _ensure_pinned_rows(dashboard):
'''Pin rows to the top of the dashboard.'''
pinned_row_titles = __salt__['pillar.get'](_PINNED_ROWS_PILLAR)
if not pinned_row_titles:
return
pinned_row_titles_lower = []
for title in pinned_row_titles:
pinned_row_titles_lower.append(title.lower())
rows = dashboard.get('rows', [])
pinned_rows = []
for i, row in enumerate(rows):
if row.get('title', '').lower() in pinned_row_titles_lower:
del rows[i]
pinned_rows.append(row)
rows = pinned_rows + rows |
def runExperiment(args):
"""
Runs the experiment. The code is organized around what we need for specific
figures in the paper.
args is a dict representing the various parameters. We do it this way to
support multiprocessing. The function returns the args dict updated with a
number of additional keys containing performance metrics.
"""
numObjects = args.get("numObjects", 10)
numSequences = args.get("numSequences", 10)
numFeatures = args.get("numFeatures", 10)
seqLength = args.get("seqLength", 10)
numPoints = args.get("numPoints", 10)
trialNum = args.get("trialNum", 42)
inputSize = args.get("inputSize", 1024)
numLocations = args.get("numLocations", 100000)
numInputBits = args.get("inputBits", 20)
settlingTime = args.get("settlingTime", 1)
numRepetitions = args.get("numRepetitions", 5)
figure = args.get("figure", False)
synPermProximalDecL2 = args.get("synPermProximalDecL2", 0.001)
minThresholdProximalL2 = args.get("minThresholdProximalL2", 10)
sampleSizeProximalL2 = args.get("sampleSizeProximalL2", 15)
basalPredictedSegmentDecrement = args.get(
"basalPredictedSegmentDecrement", 0.0006)
stripStats = args.get("stripStats", True)
random.seed(trialNum)
#####################################################
#
# Create the sequences and objects, and make sure they share the
# same features and locations.
sequences = createObjectMachine(
machineType="sequence",
numInputBits=numInputBits,
sensorInputSize=inputSize,
externalInputSize=1024,
numCorticalColumns=1,
numFeatures=numFeatures,
numLocations=numLocations,
seed=trialNum
)
sequences.createRandomSequences(numSequences, seqLength)
objects = createObjectMachine(
machineType="simple",
numInputBits=numInputBits,
sensorInputSize=inputSize,
externalInputSize=1024,
numCorticalColumns=1,
numFeatures=numFeatures,
numLocations=numLocations,
seed=trialNum
)
# Make sure they share the same features and locations
objects.locations = sequences.locations
objects.features = sequences.features
objects.createRandomObjects(numObjects, numPoints=numPoints,
numLocations=numLocations,
numFeatures=numFeatures)
#####################################################
#
# Setup experiment and train the network
name = "combined_sequences_S%03d_O%03d_F%03d_L%03d_T%03d" % (
numSequences, numObjects, numFeatures, numLocations, trialNum
)
exp = L4TMExperiment(
name=name,
numCorticalColumns=1,
inputSize=inputSize,
numExternalInputBits=numInputBits,
externalInputSize=1024,
numInputBits=numInputBits,
seed=trialNum,
L2Overrides={"synPermProximalDec": synPermProximalDecL2,
"minThresholdProximal": minThresholdProximalL2,
"sampleSizeProximal": sampleSizeProximalL2,
"initialProximalPermanence": 0.45,
"synPermProximalDec": 0.002,
},
TMOverrides={
"basalPredictedSegmentDecrement": basalPredictedSegmentDecrement
},
L4Overrides={"initialPermanence": 0.21,
"activationThreshold": 18,
"minThreshold": 18,
"basalPredictedSegmentDecrement": basalPredictedSegmentDecrement,
},
)
printDiagnostics(exp, sequences, objects, args, verbosity=0)
# Train the network on all the sequences and then all the objects.
if figure in ["S", "6", "7"]:
trainSuperimposedSequenceObjects(exp, numRepetitions, sequences, objects)
else:
trainObjects(objects, exp, numRepetitions)
trainSequences(sequences, exp, numObjects)
##########################################################################
#
# Run inference
print "Running inference"
if figure in ["6"]:
# We have trained the system on both temporal sequences and
# objects. We test the system by randomly switching between sequences and
# objects. To replicate the graph, we want to run sequences and objects in a
# specific order
for trial,itemType in enumerate(["sequence", "object", "sequence", "object",
"sequence", "sequence", "object",
"sequence", ]):
if itemType == "sequence":
objectId = random.randint(0, numSequences-1)
inferSequence(exp, objectId, sequences, objectId+numObjects)
else:
objectId = random.randint(0, numObjects-1)
inferObject(exp, objectId, objects, objectId)
elif figure in ["7"]:
# For figure 7 we have trained the system on both temporal sequences and
# objects. We test the system by superimposing randomly chosen sequences and
# objects.
for trial in range(10):
sequenceId = random.randint(0, numSequences - 1)
objectId = random.randint(0, numObjects - 1)
inferSuperimposedSequenceObjects(exp, sequenceId=sequenceId,
objectId=objectId, sequences=sequences, objects=objects)
else:
# By default run inference on every sequence and object in order.
for objectId in objects:
inferObject(exp, objectId, objects, objectId)
for seqId in sequences:
inferSequence(exp, seqId, sequences, seqId+numObjects)
##########################################################################
#
# Debugging diagnostics
printDiagnosticsAfterTraining(exp)
##########################################################################
#
# Compute a number of overall inference statistics
print "# Sequences {} # features {} trial # {}\n".format(
numSequences, numFeatures, trialNum)
convergencePoint, sequenceAccuracyL2 = exp.averageConvergencePoint(
"L2 Representation", 30, 40, 1, numObjects)
print "L2 accuracy for sequences:", sequenceAccuracyL2
convergencePoint, objectAccuracyL2 = exp.averageConvergencePoint(
"L2 Representation", 30, 40, 1, 0, numObjects)
print "L2 accuracy for objects:", objectAccuracyL2
objectCorrectSparsityTM, _ = exp.averageSequenceAccuracy(15, 25, 0, numObjects)
print "TM accuracy for objects:", objectCorrectSparsityTM
sequenceCorrectSparsityTM, sequenceCorrectClassificationsTM = \
exp.averageSequenceAccuracy(15, 25, numObjects)
print "TM accuracy for sequences:", sequenceCorrectClassificationsTM
infStats = exp.getInferenceStats()
predictedActive = numpy.zeros(len(infStats))
predicted = numpy.zeros(len(infStats))
predictedActiveL4 = numpy.zeros(len(infStats))
predictedL4 = numpy.zeros(len(infStats))
for i,stat in enumerate(infStats):
predictedActive[i] = float(sum(stat["TM PredictedActive C0"][2:])) / len(
stat["TM PredictedActive C0"][2:])
predicted[i] = float(sum(stat["TM NextPredicted C0"][2:])) / len(
stat["TM NextPredicted C0"][2:])
predictedActiveL4[i] = float(sum(stat["L4 PredictedActive C0"])) / len(
stat["L4 PredictedActive C0"])
predictedL4[i] = float(sum(stat["L4 Predicted C0"])) / len(
stat["L4 Predicted C0"])
# Return a bunch of metrics we will use in plots
args.update({"sequences": sequences.getObjects()})
args.update({"objects": objects.getObjects()})
args.update({"convergencePoint":convergencePoint})
args.update({"objectAccuracyL2": objectAccuracyL2})
args.update({"sequenceAccuracyL2": sequenceAccuracyL2})
args.update({"sequenceCorrectSparsityTM": sequenceCorrectSparsityTM})
args.update({"sequenceCorrectClassificationsTM": sequenceCorrectClassificationsTM})
args.update({"objectCorrectSparsityTM": objectCorrectSparsityTM})
args.update({"averagePredictions": predicted.mean()})
args.update({"averagePredictedActive": predictedActive.mean()})
args.update({"averagePredictionsL4": predictedL4.mean()})
args.update({"averagePredictedActiveL4": predictedActiveL4.mean()})
if stripStats:
exp.stripStats()
args.update({"name": exp.name})
args.update({"statistics": exp.statistics})
args.update({"networkConfig": exp.config})
return args | Runs the experiment. The code is organized around what we need for specific
figures in the paper.
args is a dict representing the various parameters. We do it this way to
support multiprocessing. The function returns the args dict updated with a
number of additional keys containing performance metrics. | Below is the the instruction that describes the task:
### Input:
Runs the experiment. The code is organized around what we need for specific
figures in the paper.
args is a dict representing the various parameters. We do it this way to
support multiprocessing. The function returns the args dict updated with a
number of additional keys containing performance metrics.
### Response:
def runExperiment(args):
"""
Runs the experiment. The code is organized around what we need for specific
figures in the paper.
args is a dict representing the various parameters. We do it this way to
support multiprocessing. The function returns the args dict updated with a
number of additional keys containing performance metrics.
"""
numObjects = args.get("numObjects", 10)
numSequences = args.get("numSequences", 10)
numFeatures = args.get("numFeatures", 10)
seqLength = args.get("seqLength", 10)
numPoints = args.get("numPoints", 10)
trialNum = args.get("trialNum", 42)
inputSize = args.get("inputSize", 1024)
numLocations = args.get("numLocations", 100000)
numInputBits = args.get("inputBits", 20)
settlingTime = args.get("settlingTime", 1)
numRepetitions = args.get("numRepetitions", 5)
figure = args.get("figure", False)
synPermProximalDecL2 = args.get("synPermProximalDecL2", 0.001)
minThresholdProximalL2 = args.get("minThresholdProximalL2", 10)
sampleSizeProximalL2 = args.get("sampleSizeProximalL2", 15)
basalPredictedSegmentDecrement = args.get(
"basalPredictedSegmentDecrement", 0.0006)
stripStats = args.get("stripStats", True)
random.seed(trialNum)
#####################################################
#
# Create the sequences and objects, and make sure they share the
# same features and locations.
sequences = createObjectMachine(
machineType="sequence",
numInputBits=numInputBits,
sensorInputSize=inputSize,
externalInputSize=1024,
numCorticalColumns=1,
numFeatures=numFeatures,
numLocations=numLocations,
seed=trialNum
)
sequences.createRandomSequences(numSequences, seqLength)
objects = createObjectMachine(
machineType="simple",
numInputBits=numInputBits,
sensorInputSize=inputSize,
externalInputSize=1024,
numCorticalColumns=1,
numFeatures=numFeatures,
numLocations=numLocations,
seed=trialNum
)
# Make sure they share the same features and locations
objects.locations = sequences.locations
objects.features = sequences.features
objects.createRandomObjects(numObjects, numPoints=numPoints,
numLocations=numLocations,
numFeatures=numFeatures)
#####################################################
#
# Setup experiment and train the network
name = "combined_sequences_S%03d_O%03d_F%03d_L%03d_T%03d" % (
numSequences, numObjects, numFeatures, numLocations, trialNum
)
exp = L4TMExperiment(
name=name,
numCorticalColumns=1,
inputSize=inputSize,
numExternalInputBits=numInputBits,
externalInputSize=1024,
numInputBits=numInputBits,
seed=trialNum,
L2Overrides={"synPermProximalDec": synPermProximalDecL2,
"minThresholdProximal": minThresholdProximalL2,
"sampleSizeProximal": sampleSizeProximalL2,
"initialProximalPermanence": 0.45,
"synPermProximalDec": 0.002,
},
TMOverrides={
"basalPredictedSegmentDecrement": basalPredictedSegmentDecrement
},
L4Overrides={"initialPermanence": 0.21,
"activationThreshold": 18,
"minThreshold": 18,
"basalPredictedSegmentDecrement": basalPredictedSegmentDecrement,
},
)
printDiagnostics(exp, sequences, objects, args, verbosity=0)
# Train the network on all the sequences and then all the objects.
if figure in ["S", "6", "7"]:
trainSuperimposedSequenceObjects(exp, numRepetitions, sequences, objects)
else:
trainObjects(objects, exp, numRepetitions)
trainSequences(sequences, exp, numObjects)
##########################################################################
#
# Run inference
print "Running inference"
if figure in ["6"]:
# We have trained the system on both temporal sequences and
# objects. We test the system by randomly switching between sequences and
# objects. To replicate the graph, we want to run sequences and objects in a
# specific order
for trial,itemType in enumerate(["sequence", "object", "sequence", "object",
"sequence", "sequence", "object",
"sequence", ]):
if itemType == "sequence":
objectId = random.randint(0, numSequences-1)
inferSequence(exp, objectId, sequences, objectId+numObjects)
else:
objectId = random.randint(0, numObjects-1)
inferObject(exp, objectId, objects, objectId)
elif figure in ["7"]:
# For figure 7 we have trained the system on both temporal sequences and
# objects. We test the system by superimposing randomly chosen sequences and
# objects.
for trial in range(10):
sequenceId = random.randint(0, numSequences - 1)
objectId = random.randint(0, numObjects - 1)
inferSuperimposedSequenceObjects(exp, sequenceId=sequenceId,
objectId=objectId, sequences=sequences, objects=objects)
else:
# By default run inference on every sequence and object in order.
for objectId in objects:
inferObject(exp, objectId, objects, objectId)
for seqId in sequences:
inferSequence(exp, seqId, sequences, seqId+numObjects)
##########################################################################
#
# Debugging diagnostics
printDiagnosticsAfterTraining(exp)
##########################################################################
#
# Compute a number of overall inference statistics
print "# Sequences {} # features {} trial # {}\n".format(
numSequences, numFeatures, trialNum)
convergencePoint, sequenceAccuracyL2 = exp.averageConvergencePoint(
"L2 Representation", 30, 40, 1, numObjects)
print "L2 accuracy for sequences:", sequenceAccuracyL2
convergencePoint, objectAccuracyL2 = exp.averageConvergencePoint(
"L2 Representation", 30, 40, 1, 0, numObjects)
print "L2 accuracy for objects:", objectAccuracyL2
objectCorrectSparsityTM, _ = exp.averageSequenceAccuracy(15, 25, 0, numObjects)
print "TM accuracy for objects:", objectCorrectSparsityTM
sequenceCorrectSparsityTM, sequenceCorrectClassificationsTM = \
exp.averageSequenceAccuracy(15, 25, numObjects)
print "TM accuracy for sequences:", sequenceCorrectClassificationsTM
infStats = exp.getInferenceStats()
predictedActive = numpy.zeros(len(infStats))
predicted = numpy.zeros(len(infStats))
predictedActiveL4 = numpy.zeros(len(infStats))
predictedL4 = numpy.zeros(len(infStats))
for i,stat in enumerate(infStats):
predictedActive[i] = float(sum(stat["TM PredictedActive C0"][2:])) / len(
stat["TM PredictedActive C0"][2:])
predicted[i] = float(sum(stat["TM NextPredicted C0"][2:])) / len(
stat["TM NextPredicted C0"][2:])
predictedActiveL4[i] = float(sum(stat["L4 PredictedActive C0"])) / len(
stat["L4 PredictedActive C0"])
predictedL4[i] = float(sum(stat["L4 Predicted C0"])) / len(
stat["L4 Predicted C0"])
# Return a bunch of metrics we will use in plots
args.update({"sequences": sequences.getObjects()})
args.update({"objects": objects.getObjects()})
args.update({"convergencePoint":convergencePoint})
args.update({"objectAccuracyL2": objectAccuracyL2})
args.update({"sequenceAccuracyL2": sequenceAccuracyL2})
args.update({"sequenceCorrectSparsityTM": sequenceCorrectSparsityTM})
args.update({"sequenceCorrectClassificationsTM": sequenceCorrectClassificationsTM})
args.update({"objectCorrectSparsityTM": objectCorrectSparsityTM})
args.update({"averagePredictions": predicted.mean()})
args.update({"averagePredictedActive": predictedActive.mean()})
args.update({"averagePredictionsL4": predictedL4.mean()})
args.update({"averagePredictedActiveL4": predictedActiveL4.mean()})
if stripStats:
exp.stripStats()
args.update({"name": exp.name})
args.update({"statistics": exp.statistics})
args.update({"networkConfig": exp.config})
return args |
def write_shortstr(self, s):
"""Write a string up to 255 bytes long (after any encoding).
If passed a unicode string, encode with UTF-8.
"""
self._flushbits()
if isinstance(s, string):
s = s.encode('utf-8')
if len(s) > 255:
raise FrameSyntaxError(
'Shortstring overflow ({0} > 255)'.format(len(s)))
self.write_octet(len(s))
self.out.write(s) | Write a string up to 255 bytes long (after any encoding).
If passed a unicode string, encode with UTF-8. | Below is the the instruction that describes the task:
### Input:
Write a string up to 255 bytes long (after any encoding).
If passed a unicode string, encode with UTF-8.
### Response:
def write_shortstr(self, s):
"""Write a string up to 255 bytes long (after any encoding).
If passed a unicode string, encode with UTF-8.
"""
self._flushbits()
if isinstance(s, string):
s = s.encode('utf-8')
if len(s) > 255:
raise FrameSyntaxError(
'Shortstring overflow ({0} > 255)'.format(len(s)))
self.write_octet(len(s))
self.out.write(s) |
def verify_fft_options(opt,parser):
"""Parses the FFT options and verifies that they are
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes.
parser : object
OptionParser instance.
"""
if opt.fftw_measure_level not in [0,1,2,3]:
parser.error("{0} is not a valid FFTW measure level.".format(opt.fftw_measure_level))
if opt.fftw_import_system_wisdom and ((opt.fftw_input_float_wisdom_file is not None)
or (opt.fftw_input_double_wisdom_file is not None)):
parser.error("If --fftw-import-system-wisdom is given, then you cannot give"
" either of --fftw-input-float-wisdom-file or --fftw-input-double-wisdom-file")
if opt.fftw_threads_backend is not None:
if opt.fftw_threads_backend not in ['openmp','pthreads','unthreaded']:
parser.error("Invalid threads backend; must be 'openmp', 'pthreads' or 'unthreaded'") | Parses the FFT options and verifies that they are
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes.
parser : object
OptionParser instance. | Below is the the instruction that describes the task:
### Input:
Parses the FFT options and verifies that they are
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes.
parser : object
OptionParser instance.
### Response:
def verify_fft_options(opt,parser):
"""Parses the FFT options and verifies that they are
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes.
parser : object
OptionParser instance.
"""
if opt.fftw_measure_level not in [0,1,2,3]:
parser.error("{0} is not a valid FFTW measure level.".format(opt.fftw_measure_level))
if opt.fftw_import_system_wisdom and ((opt.fftw_input_float_wisdom_file is not None)
or (opt.fftw_input_double_wisdom_file is not None)):
parser.error("If --fftw-import-system-wisdom is given, then you cannot give"
" either of --fftw-input-float-wisdom-file or --fftw-input-double-wisdom-file")
if opt.fftw_threads_backend is not None:
if opt.fftw_threads_backend not in ['openmp','pthreads','unthreaded']:
parser.error("Invalid threads backend; must be 'openmp', 'pthreads' or 'unthreaded'") |
def is_uid(uid):
"""Checks if the passed in uid is a valid UID
:param uid: The uid to check
:type uid: string
:return: True if the uid is a valid 32 alphanumeric uid or '0'
:rtype: bool
"""
if not isinstance(uid, basestring):
return False
if uid != "0" and len(uid) != 32:
return False
return True | Checks if the passed in uid is a valid UID
:param uid: The uid to check
:type uid: string
:return: True if the uid is a valid 32 alphanumeric uid or '0'
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Checks if the passed in uid is a valid UID
:param uid: The uid to check
:type uid: string
:return: True if the uid is a valid 32 alphanumeric uid or '0'
:rtype: bool
### Response:
def is_uid(uid):
"""Checks if the passed in uid is a valid UID
:param uid: The uid to check
:type uid: string
:return: True if the uid is a valid 32 alphanumeric uid or '0'
:rtype: bool
"""
if not isinstance(uid, basestring):
return False
if uid != "0" and len(uid) != 32:
return False
return True |
def drawing_update(self):
'''update line drawing'''
from MAVProxy.modules.mavproxy_map import mp_slipmap
if self.draw_callback is None:
return
self.draw_line.append(self.click_position)
if len(self.draw_line) > 1:
self.mpstate.map.add_object(mp_slipmap.SlipPolygon('drawing', self.draw_line,
layer='Drawing', linewidth=2, colour=(128,128,255))) | update line drawing | Below is the the instruction that describes the task:
### Input:
update line drawing
### Response:
def drawing_update(self):
'''update line drawing'''
from MAVProxy.modules.mavproxy_map import mp_slipmap
if self.draw_callback is None:
return
self.draw_line.append(self.click_position)
if len(self.draw_line) > 1:
self.mpstate.map.add_object(mp_slipmap.SlipPolygon('drawing', self.draw_line,
layer='Drawing', linewidth=2, colour=(128,128,255))) |
def search_definition(self, module, keyword, arg):
"""Search for a defintion with `keyword` `name`
Search the module and its submodules."""
r = module.search_one(keyword, arg)
if r is not None:
return r
for i in module.search('include'):
modulename = i.arg
m = self.ctx.search_module(i.pos, modulename)
if m is not None:
r = m.search_one(keyword, arg)
if r is not None:
return r
return None | Search for a defintion with `keyword` `name`
Search the module and its submodules. | Below is the the instruction that describes the task:
### Input:
Search for a defintion with `keyword` `name`
Search the module and its submodules.
### Response:
def search_definition(self, module, keyword, arg):
"""Search for a defintion with `keyword` `name`
Search the module and its submodules."""
r = module.search_one(keyword, arg)
if r is not None:
return r
for i in module.search('include'):
modulename = i.arg
m = self.ctx.search_module(i.pos, modulename)
if m is not None:
r = m.search_one(keyword, arg)
if r is not None:
return r
return None |
def from_dict(cls, dict_in):
"""Initialize the class from a dict.
:param dict_in: The dictionary that contains the item content. Required
fields are listed class variable by that name
:type dict_in: dict
"""
kwargs = dict_in.copy()
args = [kwargs.pop(key) for key in cls.required_fields]
return cls(*args, **kwargs) | Initialize the class from a dict.
:param dict_in: The dictionary that contains the item content. Required
fields are listed class variable by that name
:type dict_in: dict | Below is the the instruction that describes the task:
### Input:
Initialize the class from a dict.
:param dict_in: The dictionary that contains the item content. Required
fields are listed class variable by that name
:type dict_in: dict
### Response:
def from_dict(cls, dict_in):
"""Initialize the class from a dict.
:param dict_in: The dictionary that contains the item content. Required
fields are listed class variable by that name
:type dict_in: dict
"""
kwargs = dict_in.copy()
args = [kwargs.pop(key) for key in cls.required_fields]
return cls(*args, **kwargs) |
def list(self,header,choices):
"Display list of choices. As many as we can get in a page."
if not self.__list_window:
(y,x)=self.__main.getmaxyx()
self.__list_window = self.__main.subwin(35,x,0,0)
_lw=self.__list_window
_lw.keypad(1)
(y_max,x_max)=_lw.getmaxyx()
(y_0, x_0)=_lw.getbegyx()
x_start=1+x_0
_lw.box()
## Number of list items allowed.
### first entry in the list appears at page_top
page_top=y_0+2
### the last entry display will be at page_bottom
page_bottom = y_max-2
### break the list into chunks.
max_items_per_page = page_bottom-page_top
### start at the top of the list
top_item=0
f=open('log.msg','w')
first_item=page_top
current_item=0
item_list=[]
while 1:
_lw.erase()
_lw.box()
_lw.addstr(page_top-1,x_start,header)
if top_item > len(choices):
top_item=0
for i in range(max_items_per_page):
item=i+top_item
if not item in range(len(choices)):
break
_lw.addstr(i+page_top,x_start,choices[item])
### provide a hint that there is more info in the list
### setup where we are in the list
last_item=item
if top_item > 0 :
_lw.addstr(page_bottom,x_start,"P(revious)")
if last_item < len(choices):
_lw.addstr(page_bottom,x_max-8,"N(ext)")
while 1:
c=_lw.getch(current_item-top_item+page_top,x_start)
if c==curses.KEY_UP:
current_item=current_item-1
elif c==curses.KEY_DOWN:
current_item=current_item+1
elif c==ord(' '):
if current_item in item_list:
_lw.addstr(choices[current_item])
item_list.remove(current_item)
else:
_lw.addstr(choices[current_item],curses.A_REVERSE)
item_list.append(current_item)
elif c==ord('P'):
top_item=top_item-max_items_per_page
current_item=top_item
break
elif c==ord('N'):
top_item=top_item + max_items_per_page
current_item=top_item
break
elif c==10:
return(item_list)
elif c==ord('q'):
_lw.erase()
return(None)
elif c==ord('x'):
choices[current_item]=choices[current_item][:4]+" "+choices[current_item][5:]
_lw.addstr(choices[current_item])
else:
choices[current_item]=choices[current_item][:7]+chr(c).capitalize()+choices[current_item][8:]
_lw.addstr(choices[current_item])
if current_item > last_item-1:
if last_item < len(choices):
top_item = top_item+1
break
else:
current_item=current_item-1
if current_item < top_item :
if top_item > 0:
top_item = top_item-1
break
else:
current_item=current_item+1 | Display list of choices. As many as we can get in a page. | Below is the the instruction that describes the task:
### Input:
Display list of choices. As many as we can get in a page.
### Response:
def list(self,header,choices):
"Display list of choices. As many as we can get in a page."
if not self.__list_window:
(y,x)=self.__main.getmaxyx()
self.__list_window = self.__main.subwin(35,x,0,0)
_lw=self.__list_window
_lw.keypad(1)
(y_max,x_max)=_lw.getmaxyx()
(y_0, x_0)=_lw.getbegyx()
x_start=1+x_0
_lw.box()
## Number of list items allowed.
### first entry in the list appears at page_top
page_top=y_0+2
### the last entry display will be at page_bottom
page_bottom = y_max-2
### break the list into chunks.
max_items_per_page = page_bottom-page_top
### start at the top of the list
top_item=0
f=open('log.msg','w')
first_item=page_top
current_item=0
item_list=[]
while 1:
_lw.erase()
_lw.box()
_lw.addstr(page_top-1,x_start,header)
if top_item > len(choices):
top_item=0
for i in range(max_items_per_page):
item=i+top_item
if not item in range(len(choices)):
break
_lw.addstr(i+page_top,x_start,choices[item])
### provide a hint that there is more info in the list
### setup where we are in the list
last_item=item
if top_item > 0 :
_lw.addstr(page_bottom,x_start,"P(revious)")
if last_item < len(choices):
_lw.addstr(page_bottom,x_max-8,"N(ext)")
while 1:
c=_lw.getch(current_item-top_item+page_top,x_start)
if c==curses.KEY_UP:
current_item=current_item-1
elif c==curses.KEY_DOWN:
current_item=current_item+1
elif c==ord(' '):
if current_item in item_list:
_lw.addstr(choices[current_item])
item_list.remove(current_item)
else:
_lw.addstr(choices[current_item],curses.A_REVERSE)
item_list.append(current_item)
elif c==ord('P'):
top_item=top_item-max_items_per_page
current_item=top_item
break
elif c==ord('N'):
top_item=top_item + max_items_per_page
current_item=top_item
break
elif c==10:
return(item_list)
elif c==ord('q'):
_lw.erase()
return(None)
elif c==ord('x'):
choices[current_item]=choices[current_item][:4]+" "+choices[current_item][5:]
_lw.addstr(choices[current_item])
else:
choices[current_item]=choices[current_item][:7]+chr(c).capitalize()+choices[current_item][8:]
_lw.addstr(choices[current_item])
if current_item > last_item-1:
if last_item < len(choices):
top_item = top_item+1
break
else:
current_item=current_item-1
if current_item < top_item :
if top_item > 0:
top_item = top_item-1
break
else:
current_item=current_item+1 |
def apt_key_exists(keyid):
"""
Check if the given key id exists in apt keyring.
"""
# Command extracted from apt-key source
gpg_cmd = 'gpg --ignore-time-conflict --no-options --no-default-keyring --keyring /etc/apt/trusted.gpg'
with settings(hide('everything'), warn_only=True):
res = run('%(gpg_cmd)s --fingerprint %(keyid)s' % locals())
return res.succeeded | Check if the given key id exists in apt keyring. | Below is the the instruction that describes the task:
### Input:
Check if the given key id exists in apt keyring.
### Response:
def apt_key_exists(keyid):
"""
Check if the given key id exists in apt keyring.
"""
# Command extracted from apt-key source
gpg_cmd = 'gpg --ignore-time-conflict --no-options --no-default-keyring --keyring /etc/apt/trusted.gpg'
with settings(hide('everything'), warn_only=True):
res = run('%(gpg_cmd)s --fingerprint %(keyid)s' % locals())
return res.succeeded |
def start_after(self, document_fields):
"""Start query after a cursor with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.start_after` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor.
"""
query = query_mod.Query(self)
return query.start_after(document_fields) | Start query after a cursor with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.start_after` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor. | Below is the the instruction that describes the task:
### Input:
Start query after a cursor with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.start_after` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor.
### Response:
def start_after(self, document_fields):
"""Start query after a cursor with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.start_after` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor.
"""
query = query_mod.Query(self)
return query.start_after(document_fields) |
def DSYR_numpy(A, x, alpha=1.):
"""
Performs a symmetric rank-1 update operation:
A <- A + alpha * np.dot(x,x.T)
:param A: Symmetric NxN np.array
:param x: Nx1 np.array
:param alpha: scalar
"""
A += alpha * np.dot(x[:, None], x[None, :]) | Performs a symmetric rank-1 update operation:
A <- A + alpha * np.dot(x,x.T)
:param A: Symmetric NxN np.array
:param x: Nx1 np.array
:param alpha: scalar | Below is the the instruction that describes the task:
### Input:
Performs a symmetric rank-1 update operation:
A <- A + alpha * np.dot(x,x.T)
:param A: Symmetric NxN np.array
:param x: Nx1 np.array
:param alpha: scalar
### Response:
def DSYR_numpy(A, x, alpha=1.):
"""
Performs a symmetric rank-1 update operation:
A <- A + alpha * np.dot(x,x.T)
:param A: Symmetric NxN np.array
:param x: Nx1 np.array
:param alpha: scalar
"""
A += alpha * np.dot(x[:, None], x[None, :]) |
def read_namespaced_deployment(self, name, namespace, **kwargs):
"""
read the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_deployment(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_deployment_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_deployment_with_http_info(name, namespace, **kwargs)
return data | read the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_deployment(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
read the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_deployment(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
### Response:
def read_namespaced_deployment(self, name, namespace, **kwargs):
"""
read the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_deployment(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_deployment_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_deployment_with_http_info(name, namespace, **kwargs)
return data |
def retrieve_console_log(self, filename=None, dir=None):
"""Retrieves the application console log (standard out and error)
files for this PE and saves them as a plain text file.
An existing file with the same name will be overwritten.
Args:
filename (str): name of the created file. Defaults to `pe_<id>_<timestamp>.stdouterr` where `id` is the PE identifier and `timestamp` is the number of seconds since the Unix epoch, for example ``pe_83_1511995995.trace``.
dir (str): a valid directory in which to save the file. Defaults to the current directory.
Returns:
str: the path to the created file, or None if retrieving a job's logs is not supported in the version of streams to which the job is submitted.
.. versionadded:: 1.9
"""
if hasattr(self, "consoleLog") and self.consoleLog is not None:
logger.debug("Retrieving PE console log: " + self.consoleLog)
if not filename:
filename = _file_name('pe', self.id, '.stdouterr')
return self.rest_client._retrieve_file(self.consoleLog, filename, dir, 'text/plain')
else:
return None | Retrieves the application console log (standard out and error)
files for this PE and saves them as a plain text file.
An existing file with the same name will be overwritten.
Args:
filename (str): name of the created file. Defaults to `pe_<id>_<timestamp>.stdouterr` where `id` is the PE identifier and `timestamp` is the number of seconds since the Unix epoch, for example ``pe_83_1511995995.trace``.
dir (str): a valid directory in which to save the file. Defaults to the current directory.
Returns:
str: the path to the created file, or None if retrieving a job's logs is not supported in the version of streams to which the job is submitted.
.. versionadded:: 1.9 | Below is the the instruction that describes the task:
### Input:
Retrieves the application console log (standard out and error)
files for this PE and saves them as a plain text file.
An existing file with the same name will be overwritten.
Args:
filename (str): name of the created file. Defaults to `pe_<id>_<timestamp>.stdouterr` where `id` is the PE identifier and `timestamp` is the number of seconds since the Unix epoch, for example ``pe_83_1511995995.trace``.
dir (str): a valid directory in which to save the file. Defaults to the current directory.
Returns:
str: the path to the created file, or None if retrieving a job's logs is not supported in the version of streams to which the job is submitted.
.. versionadded:: 1.9
### Response:
def retrieve_console_log(self, filename=None, dir=None):
"""Retrieves the application console log (standard out and error)
files for this PE and saves them as a plain text file.
An existing file with the same name will be overwritten.
Args:
filename (str): name of the created file. Defaults to `pe_<id>_<timestamp>.stdouterr` where `id` is the PE identifier and `timestamp` is the number of seconds since the Unix epoch, for example ``pe_83_1511995995.trace``.
dir (str): a valid directory in which to save the file. Defaults to the current directory.
Returns:
str: the path to the created file, or None if retrieving a job's logs is not supported in the version of streams to which the job is submitted.
.. versionadded:: 1.9
"""
if hasattr(self, "consoleLog") and self.consoleLog is not None:
logger.debug("Retrieving PE console log: " + self.consoleLog)
if not filename:
filename = _file_name('pe', self.id, '.stdouterr')
return self.rest_client._retrieve_file(self.consoleLog, filename, dir, 'text/plain')
else:
return None |
def global_set_option(self, opt, value):
"""set option on the correct option provider"""
self._all_options[opt].set_option(opt, value) | set option on the correct option provider | Below is the the instruction that describes the task:
### Input:
set option on the correct option provider
### Response:
def global_set_option(self, opt, value):
"""set option on the correct option provider"""
self._all_options[opt].set_option(opt, value) |
def _handle_uninitialized_read(self, addr, inspect=True, events=True):
"""
The default uninitialized read handler. Returns symbolic bytes.
"""
if self._uninitialized_read_handler is None:
v = self.state.solver.Unconstrained("%s_%s" % (self.id, addr), self.width*self.state.arch.byte_width, key=self.variable_key_prefix + (addr,), inspect=inspect, events=events)
return v.reversed if self.endness == "Iend_LE" else v
else:
return self._uninitialized_read_handler(self, addr, inspect=inspect, events=events) | The default uninitialized read handler. Returns symbolic bytes. | Below is the the instruction that describes the task:
### Input:
The default uninitialized read handler. Returns symbolic bytes.
### Response:
def _handle_uninitialized_read(self, addr, inspect=True, events=True):
"""
The default uninitialized read handler. Returns symbolic bytes.
"""
if self._uninitialized_read_handler is None:
v = self.state.solver.Unconstrained("%s_%s" % (self.id, addr), self.width*self.state.arch.byte_width, key=self.variable_key_prefix + (addr,), inspect=inspect, events=events)
return v.reversed if self.endness == "Iend_LE" else v
else:
return self._uninitialized_read_handler(self, addr, inspect=inspect, events=events) |
def counter(self, key, **dims):
"""Adds counter with dimensions to the registry"""
return super(RegexRegistry, self).counter(self._get_key(key), **dims) | Adds counter with dimensions to the registry | Below is the the instruction that describes the task:
### Input:
Adds counter with dimensions to the registry
### Response:
def counter(self, key, **dims):
"""Adds counter with dimensions to the registry"""
return super(RegexRegistry, self).counter(self._get_key(key), **dims) |
def _fS1(self, pos_pairs, A):
"""The gradient of the similarity constraint function w.r.t. A.
f = \sum_{ij}(x_i-x_j)A(x_i-x_j)' = \sum_{ij}d_ij*A*d_ij'
df/dA = d(d_ij*A*d_ij')/dA
Note that d_ij*A*d_ij' = tr(d_ij*A*d_ij') = tr(d_ij'*d_ij*A)
so, d(d_ij*A*d_ij')/dA = d_ij'*d_ij
"""
dim = pos_pairs.shape[2]
diff = pos_pairs[:, 0, :] - pos_pairs[:, 1, :]
return np.einsum('ij,ik->jk', diff, diff) | The gradient of the similarity constraint function w.r.t. A.
f = \sum_{ij}(x_i-x_j)A(x_i-x_j)' = \sum_{ij}d_ij*A*d_ij'
df/dA = d(d_ij*A*d_ij')/dA
Note that d_ij*A*d_ij' = tr(d_ij*A*d_ij') = tr(d_ij'*d_ij*A)
so, d(d_ij*A*d_ij')/dA = d_ij'*d_ij | Below is the the instruction that describes the task:
### Input:
The gradient of the similarity constraint function w.r.t. A.
f = \sum_{ij}(x_i-x_j)A(x_i-x_j)' = \sum_{ij}d_ij*A*d_ij'
df/dA = d(d_ij*A*d_ij')/dA
Note that d_ij*A*d_ij' = tr(d_ij*A*d_ij') = tr(d_ij'*d_ij*A)
so, d(d_ij*A*d_ij')/dA = d_ij'*d_ij
### Response:
def _fS1(self, pos_pairs, A):
"""The gradient of the similarity constraint function w.r.t. A.
f = \sum_{ij}(x_i-x_j)A(x_i-x_j)' = \sum_{ij}d_ij*A*d_ij'
df/dA = d(d_ij*A*d_ij')/dA
Note that d_ij*A*d_ij' = tr(d_ij*A*d_ij') = tr(d_ij'*d_ij*A)
so, d(d_ij*A*d_ij')/dA = d_ij'*d_ij
"""
dim = pos_pairs.shape[2]
diff = pos_pairs[:, 0, :] - pos_pairs[:, 1, :]
return np.einsum('ij,ik->jk', diff, diff) |
def multi_ops(data_stream, *funcs):
""" fork a generator with multiple operations/functions
data_stream - an iterable data structure (ie: list/generator/tuple)
funcs - every function that will be applied to the data_stream """
assert all(callable(func) for func in funcs), 'multi_ops can only apply functions to the first argument'
assert len(funcs), 'multi_ops needs at least one function to apply to data_stream'
for i in data_stream:
if len(funcs) > 1:
yield tuple(func(i) for func in funcs)
elif len(funcs) == 1:
yield funcs[0](i) | fork a generator with multiple operations/functions
data_stream - an iterable data structure (ie: list/generator/tuple)
funcs - every function that will be applied to the data_stream | Below is the the instruction that describes the task:
### Input:
fork a generator with multiple operations/functions
data_stream - an iterable data structure (ie: list/generator/tuple)
funcs - every function that will be applied to the data_stream
### Response:
def multi_ops(data_stream, *funcs):
""" fork a generator with multiple operations/functions
data_stream - an iterable data structure (ie: list/generator/tuple)
funcs - every function that will be applied to the data_stream """
assert all(callable(func) for func in funcs), 'multi_ops can only apply functions to the first argument'
assert len(funcs), 'multi_ops needs at least one function to apply to data_stream'
for i in data_stream:
if len(funcs) > 1:
yield tuple(func(i) for func in funcs)
elif len(funcs) == 1:
yield funcs[0](i) |
def ret_dump(self, d_ret, **kwargs):
"""
JSON print results to console (or caller)
"""
b_print = True
for k, v in kwargs.items():
if k == 'JSONprint': b_print = bool(v)
if b_print:
print(
json.dumps(
d_ret,
indent = 4,
sort_keys = True
)
) | JSON print results to console (or caller) | Below is the the instruction that describes the task:
### Input:
JSON print results to console (or caller)
### Response:
def ret_dump(self, d_ret, **kwargs):
"""
JSON print results to console (or caller)
"""
b_print = True
for k, v in kwargs.items():
if k == 'JSONprint': b_print = bool(v)
if b_print:
print(
json.dumps(
d_ret,
indent = 4,
sort_keys = True
)
) |
def init_widget(self):
""" Set the listeners
"""
w = self.window
d = self.declaration
self.set_background_color(d.background_color)
self.set_touchable(d.touchable)
self.set_outside_touchable(d.outside_touchable)
# Listen for events
w.setOnDismissListener(w.getId())
w.onDismiss.connect(self.on_dismiss)
super(AndroidPopupWindow, self).init_widget() | Set the listeners | Below is the the instruction that describes the task:
### Input:
Set the listeners
### Response:
def init_widget(self):
""" Set the listeners
"""
w = self.window
d = self.declaration
self.set_background_color(d.background_color)
self.set_touchable(d.touchable)
self.set_outside_touchable(d.outside_touchable)
# Listen for events
w.setOnDismissListener(w.getId())
w.onDismiss.connect(self.on_dismiss)
super(AndroidPopupWindow, self).init_widget() |
def _update_failure_type(self):
"""
Updates the failure type of this Note's Job.
Set the linked Job's failure type to that of the most recent JobNote or
set to Not Classified if there are no JobNotes.
This is called when JobNotes are created (via .save()) and deleted (via
.delete()) and is used to resolved the FailureClassification which has
been denormalised onto Job.
"""
# update the job classification
note = JobNote.objects.filter(job=self.job).order_by('-created').first()
if note:
self.job.failure_classification_id = note.failure_classification.id
else:
self.job.failure_classification_id = FailureClassification.objects.get(name='not classified').id
self.job.save() | Updates the failure type of this Note's Job.
Set the linked Job's failure type to that of the most recent JobNote or
set to Not Classified if there are no JobNotes.
This is called when JobNotes are created (via .save()) and deleted (via
.delete()) and is used to resolved the FailureClassification which has
been denormalised onto Job. | Below is the the instruction that describes the task:
### Input:
Updates the failure type of this Note's Job.
Set the linked Job's failure type to that of the most recent JobNote or
set to Not Classified if there are no JobNotes.
This is called when JobNotes are created (via .save()) and deleted (via
.delete()) and is used to resolved the FailureClassification which has
been denormalised onto Job.
### Response:
def _update_failure_type(self):
"""
Updates the failure type of this Note's Job.
Set the linked Job's failure type to that of the most recent JobNote or
set to Not Classified if there are no JobNotes.
This is called when JobNotes are created (via .save()) and deleted (via
.delete()) and is used to resolved the FailureClassification which has
been denormalised onto Job.
"""
# update the job classification
note = JobNote.objects.filter(job=self.job).order_by('-created').first()
if note:
self.job.failure_classification_id = note.failure_classification.id
else:
self.job.failure_classification_id = FailureClassification.objects.get(name='not classified').id
self.job.save() |
def get_lines_without_comments(filename: str) -> List[str]:
"""
Reads a file, and returns all lines as a list, left- and right-stripping
the lines and removing everything on a line after the first ``#``.
NOTE: does not cope well with quoted ``#`` symbols!
"""
lines = []
with open(filename) as f:
for line in f:
line = line.partition('#')[0] # the part before the first #
line = line.rstrip()
line = line.lstrip()
if line:
lines.append(line)
return lines | Reads a file, and returns all lines as a list, left- and right-stripping
the lines and removing everything on a line after the first ``#``.
NOTE: does not cope well with quoted ``#`` symbols! | Below is the the instruction that describes the task:
### Input:
Reads a file, and returns all lines as a list, left- and right-stripping
the lines and removing everything on a line after the first ``#``.
NOTE: does not cope well with quoted ``#`` symbols!
### Response:
def get_lines_without_comments(filename: str) -> List[str]:
"""
Reads a file, and returns all lines as a list, left- and right-stripping
the lines and removing everything on a line after the first ``#``.
NOTE: does not cope well with quoted ``#`` symbols!
"""
lines = []
with open(filename) as f:
for line in f:
line = line.partition('#')[0] # the part before the first #
line = line.rstrip()
line = line.lstrip()
if line:
lines.append(line)
return lines |
def canvas_resize(self, scale):
""" Resize this region against the entire axis space. """
self._top *= scale
self._bottom *= scale
self._left *= scale
self._right *= scale
self._calibrate_to_rect() | Resize this region against the entire axis space. | Below is the the instruction that describes the task:
### Input:
Resize this region against the entire axis space.
### Response:
def canvas_resize(self, scale):
""" Resize this region against the entire axis space. """
self._top *= scale
self._bottom *= scale
self._left *= scale
self._right *= scale
self._calibrate_to_rect() |
def time_to_repeats(self, bins, integration_time):
"""Convert integration time to number of repeats"""
return math.ceil((self.device.sample_rate * integration_time) / bins) | Convert integration time to number of repeats | Below is the the instruction that describes the task:
### Input:
Convert integration time to number of repeats
### Response:
def time_to_repeats(self, bins, integration_time):
"""Convert integration time to number of repeats"""
return math.ceil((self.device.sample_rate * integration_time) / bins) |
def _migrate_ledger(data_directory,
old_ledger_file, new_ledger_file,
serializer: MappingSerializer = None):
"""
Test for the directory, open old and new ledger, migrate data, rename directories
"""
# we should have ChunkedFileStorage implementation of the Ledger
if not os.path.isdir(os.path.join(data_directory, old_ledger_file)):
msg = 'Could not find directory {} for migration.'.format(
old_ledger_file)
logger.error(msg)
raise Exception(msg)
# open the old ledger using the specified serializer
old_ledger_file_backup = old_ledger_file + "_new"
old_txn_log_store = ChunkedFileStore(data_directory,
old_ledger_file_backup,
isLineNoKey=True,
storeContentHash=False)
old_ledger = Ledger(CompactMerkleTree(),
dataDir=data_directory,
txn_serializer=serializer,
hash_serializer=serializer,
fileName=old_ledger_file_backup,
transactionLogStore=old_txn_log_store)
# open the new ledger with new serialization
new_ledger = Ledger(CompactMerkleTree(),
dataDir=data_directory,
fileName=new_ledger_file)
logger.info("new size for {}: {}".format(
old_ledger_file_backup, str(new_ledger.size)))
# add all txns into the old ledger
for _, txn in new_ledger.getAllTxn():
old_ledger.add(txn)
logger.info("old size for {}: {}".format(
new_ledger_file, str(old_ledger.size)))
old_ledger.stop()
new_ledger.stop()
# now that everything succeeded, remove the new files and move the old
# files into place
shutil.rmtree(
os.path.join(data_directory, new_ledger_file))
os.rename(
os.path.join(data_directory, old_ledger_file_backup),
os.path.join(data_directory, old_ledger_file)) | Test for the directory, open old and new ledger, migrate data, rename directories | Below is the the instruction that describes the task:
### Input:
Test for the directory, open old and new ledger, migrate data, rename directories
### Response:
def _migrate_ledger(data_directory,
old_ledger_file, new_ledger_file,
serializer: MappingSerializer = None):
"""
Test for the directory, open old and new ledger, migrate data, rename directories
"""
# we should have ChunkedFileStorage implementation of the Ledger
if not os.path.isdir(os.path.join(data_directory, old_ledger_file)):
msg = 'Could not find directory {} for migration.'.format(
old_ledger_file)
logger.error(msg)
raise Exception(msg)
# open the old ledger using the specified serializer
old_ledger_file_backup = old_ledger_file + "_new"
old_txn_log_store = ChunkedFileStore(data_directory,
old_ledger_file_backup,
isLineNoKey=True,
storeContentHash=False)
old_ledger = Ledger(CompactMerkleTree(),
dataDir=data_directory,
txn_serializer=serializer,
hash_serializer=serializer,
fileName=old_ledger_file_backup,
transactionLogStore=old_txn_log_store)
# open the new ledger with new serialization
new_ledger = Ledger(CompactMerkleTree(),
dataDir=data_directory,
fileName=new_ledger_file)
logger.info("new size for {}: {}".format(
old_ledger_file_backup, str(new_ledger.size)))
# add all txns into the old ledger
for _, txn in new_ledger.getAllTxn():
old_ledger.add(txn)
logger.info("old size for {}: {}".format(
new_ledger_file, str(old_ledger.size)))
old_ledger.stop()
new_ledger.stop()
# now that everything succeeded, remove the new files and move the old
# files into place
shutil.rmtree(
os.path.join(data_directory, new_ledger_file))
os.rename(
os.path.join(data_directory, old_ledger_file_backup),
os.path.join(data_directory, old_ledger_file)) |
def download_url(self, url, **kwargs):
"""
Download a URL to the workspace.
Args:
url (string): URL to download to directory
**kwargs : See :py:mod:`ocrd.resolver.Resolver`
Returns:
The local filename of the downloaded file
"""
if self.baseurl and '://' not in url:
url = join(self.baseurl, url)
return self.resolver.download_to_directory(self.directory, url, **kwargs) | Download a URL to the workspace.
Args:
url (string): URL to download to directory
**kwargs : See :py:mod:`ocrd.resolver.Resolver`
Returns:
The local filename of the downloaded file | Below is the the instruction that describes the task:
### Input:
Download a URL to the workspace.
Args:
url (string): URL to download to directory
**kwargs : See :py:mod:`ocrd.resolver.Resolver`
Returns:
The local filename of the downloaded file
### Response:
def download_url(self, url, **kwargs):
"""
Download a URL to the workspace.
Args:
url (string): URL to download to directory
**kwargs : See :py:mod:`ocrd.resolver.Resolver`
Returns:
The local filename of the downloaded file
"""
if self.baseurl and '://' not in url:
url = join(self.baseurl, url)
return self.resolver.download_to_directory(self.directory, url, **kwargs) |
def _intersperse_insertion_rows_and_columns(self, pairwise_pvals):
"""Return pvals matrix with inserted NaN rows and columns, as numpy.ndarray.
Each insertion (a header or a subtotal) creates an offset in the calculated
pvals. These need to be taken into account when converting each pval to a
corresponding column letter. For this reason, we need to insert an all-NaN
row and a column at the right indices. These are the inserted indices of each
insertion, along respective dimensions.
"""
for i in self._insertion_indices:
pairwise_pvals = np.insert(pairwise_pvals, i, np.nan, axis=0)
pairwise_pvals = np.insert(pairwise_pvals, i, np.nan, axis=1)
return pairwise_pvals | Return pvals matrix with inserted NaN rows and columns, as numpy.ndarray.
Each insertion (a header or a subtotal) creates an offset in the calculated
pvals. These need to be taken into account when converting each pval to a
corresponding column letter. For this reason, we need to insert an all-NaN
row and a column at the right indices. These are the inserted indices of each
insertion, along respective dimensions. | Below is the the instruction that describes the task:
### Input:
Return pvals matrix with inserted NaN rows and columns, as numpy.ndarray.
Each insertion (a header or a subtotal) creates an offset in the calculated
pvals. These need to be taken into account when converting each pval to a
corresponding column letter. For this reason, we need to insert an all-NaN
row and a column at the right indices. These are the inserted indices of each
insertion, along respective dimensions.
### Response:
def _intersperse_insertion_rows_and_columns(self, pairwise_pvals):
"""Return pvals matrix with inserted NaN rows and columns, as numpy.ndarray.
Each insertion (a header or a subtotal) creates an offset in the calculated
pvals. These need to be taken into account when converting each pval to a
corresponding column letter. For this reason, we need to insert an all-NaN
row and a column at the right indices. These are the inserted indices of each
insertion, along respective dimensions.
"""
for i in self._insertion_indices:
pairwise_pvals = np.insert(pairwise_pvals, i, np.nan, axis=0)
pairwise_pvals = np.insert(pairwise_pvals, i, np.nan, axis=1)
return pairwise_pvals |
def find_sparse_mode(self, core, additional, scaling, weights={}):
"""Find a sparse mode containing reactions of the core subset.
Return an iterator of the support of a sparse mode that contains as
many reactions from core as possible, and as few reactions from
additional as possible (approximately). A dictionary of weights can be
supplied which gives further penalties for including specific
additional reactions.
"""
if len(core) == 0:
return
self.lp7(core)
k = set()
for reaction_id in core:
flux = self.get_flux(reaction_id)
if self.is_flipped(reaction_id):
flux *= -1
if flux >= self._epsilon:
k.add(reaction_id)
if len(k) == 0:
return
self.lp10(k, additional, weights)
for reaction_id in self._model.reactions:
flux = self.get_flux(reaction_id)
if abs(flux) >= self._epsilon / scaling:
yield reaction_id | Find a sparse mode containing reactions of the core subset.
Return an iterator of the support of a sparse mode that contains as
many reactions from core as possible, and as few reactions from
additional as possible (approximately). A dictionary of weights can be
supplied which gives further penalties for including specific
additional reactions. | Below is the the instruction that describes the task:
### Input:
Find a sparse mode containing reactions of the core subset.
Return an iterator of the support of a sparse mode that contains as
many reactions from core as possible, and as few reactions from
additional as possible (approximately). A dictionary of weights can be
supplied which gives further penalties for including specific
additional reactions.
### Response:
def find_sparse_mode(self, core, additional, scaling, weights={}):
"""Find a sparse mode containing reactions of the core subset.
Return an iterator of the support of a sparse mode that contains as
many reactions from core as possible, and as few reactions from
additional as possible (approximately). A dictionary of weights can be
supplied which gives further penalties for including specific
additional reactions.
"""
if len(core) == 0:
return
self.lp7(core)
k = set()
for reaction_id in core:
flux = self.get_flux(reaction_id)
if self.is_flipped(reaction_id):
flux *= -1
if flux >= self._epsilon:
k.add(reaction_id)
if len(k) == 0:
return
self.lp10(k, additional, weights)
for reaction_id in self._model.reactions:
flux = self.get_flux(reaction_id)
if abs(flux) >= self._epsilon / scaling:
yield reaction_id |
def layout(self):
""" Calculate the widths of the columns to set the table """
ret = []
for row in self.rows:
if len(row) > len(ret):
ret += [0] * (len(row) - len(ret))
for n, field in enumerate(row):
if field is self.empty_value:
field = ''
ret[n] = max(ret[n], len(field))
return ret | Calculate the widths of the columns to set the table | Below is the the instruction that describes the task:
### Input:
Calculate the widths of the columns to set the table
### Response:
def layout(self):
""" Calculate the widths of the columns to set the table """
ret = []
for row in self.rows:
if len(row) > len(ret):
ret += [0] * (len(row) - len(ret))
for n, field in enumerate(row):
if field is self.empty_value:
field = ''
ret[n] = max(ret[n], len(field))
return ret |
def B(u, v, dfs_data):
"""The branch at u containing v is the set of all edges incident on v or any descendant of v, if a(v) == u."""
"""Bu(v) = {wx | w is in S*(v)}"""
if a(v, dfs_data) != u:
return None
return list(set([edge_id for w in S_star(v, dfs_data) for edge_id in dfs_data['graph'].get_node(w)['edges']])) | The branch at u containing v is the set of all edges incident on v or any descendant of v, if a(v) == u. | Below is the the instruction that describes the task:
### Input:
The branch at u containing v is the set of all edges incident on v or any descendant of v, if a(v) == u.
### Response:
def B(u, v, dfs_data):
"""The branch at u containing v is the set of all edges incident on v or any descendant of v, if a(v) == u."""
"""Bu(v) = {wx | w is in S*(v)}"""
if a(v, dfs_data) != u:
return None
return list(set([edge_id for w in S_star(v, dfs_data) for edge_id in dfs_data['graph'].get_node(w)['edges']])) |
def node(self, node):
"""
Return the other node
"""
if node == self.node1:
return self.node2
elif node == self.node2:
return self.node1
else:
return None | Return the other node | Below is the the instruction that describes the task:
### Input:
Return the other node
### Response:
def node(self, node):
"""
Return the other node
"""
if node == self.node1:
return self.node2
elif node == self.node2:
return self.node1
else:
return None |
def _enable_read_access(self):
"""! @brief Ensure flash is accessible by initing the algo for verify.
Not all flash memories are always accessible. For instance, external QSPI. Initing the
flash algo for the VERIFY operation is the canonical way to ensure that the flash is
memory mapped and accessible.
"""
if not self.algo_inited_for_read:
self.flash.init(self.flash.Operation.VERIFY)
self.algo_inited_for_read = True | ! @brief Ensure flash is accessible by initing the algo for verify.
Not all flash memories are always accessible. For instance, external QSPI. Initing the
flash algo for the VERIFY operation is the canonical way to ensure that the flash is
memory mapped and accessible. | Below is the the instruction that describes the task:
### Input:
! @brief Ensure flash is accessible by initing the algo for verify.
Not all flash memories are always accessible. For instance, external QSPI. Initing the
flash algo for the VERIFY operation is the canonical way to ensure that the flash is
memory mapped and accessible.
### Response:
def _enable_read_access(self):
"""! @brief Ensure flash is accessible by initing the algo for verify.
Not all flash memories are always accessible. For instance, external QSPI. Initing the
flash algo for the VERIFY operation is the canonical way to ensure that the flash is
memory mapped and accessible.
"""
if not self.algo_inited_for_read:
self.flash.init(self.flash.Operation.VERIFY)
self.algo_inited_for_read = True |
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt) | Return a string which represents a row of data cells. | Below is the the instruction that describes the task:
### Input:
Return a string which represents a row of data cells.
### Response:
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt) |
def _iter_names(self):
"""
Generate a key/value pair for each name in this table. The key is a
(platform_id, name_id) 2-tuple and the value is the unicode text
corresponding to that key.
"""
table_format, count, strings_offset = self._table_header
table_bytes = self._table_bytes
for idx in range(count):
platform_id, name_id, name = self._read_name(
table_bytes, idx, strings_offset
)
if name is None:
continue
yield ((platform_id, name_id), name) | Generate a key/value pair for each name in this table. The key is a
(platform_id, name_id) 2-tuple and the value is the unicode text
corresponding to that key. | Below is the the instruction that describes the task:
### Input:
Generate a key/value pair for each name in this table. The key is a
(platform_id, name_id) 2-tuple and the value is the unicode text
corresponding to that key.
### Response:
def _iter_names(self):
"""
Generate a key/value pair for each name in this table. The key is a
(platform_id, name_id) 2-tuple and the value is the unicode text
corresponding to that key.
"""
table_format, count, strings_offset = self._table_header
table_bytes = self._table_bytes
for idx in range(count):
platform_id, name_id, name = self._read_name(
table_bytes, idx, strings_offset
)
if name is None:
continue
yield ((platform_id, name_id), name) |
def get_dtype_counts(self):
"""Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object.
"""
if hasattr(self, "dtype"):
return pandas.Series({str(self.dtype): 1})
result = self.dtypes.value_counts()
result.index = result.index.map(lambda x: str(x))
return result | Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object. | Below is the the instruction that describes the task:
### Input:
Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object.
### Response:
def get_dtype_counts(self):
"""Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object.
"""
if hasattr(self, "dtype"):
return pandas.Series({str(self.dtype): 1})
result = self.dtypes.value_counts()
result.index = result.index.map(lambda x: str(x))
return result |
def cutout_data(self, x1, y1, x2, y2, xstep=1, ystep=1, astype=None):
"""cut out data area based on coords.
"""
view = np.s_[y1:y2:ystep, x1:x2:xstep]
data = self._slice(view)
if astype:
data = data.astype(astype, copy=False)
return data | cut out data area based on coords. | Below is the the instruction that describes the task:
### Input:
cut out data area based on coords.
### Response:
def cutout_data(self, x1, y1, x2, y2, xstep=1, ystep=1, astype=None):
"""cut out data area based on coords.
"""
view = np.s_[y1:y2:ystep, x1:x2:xstep]
data = self._slice(view)
if astype:
data = data.astype(astype, copy=False)
return data |
def enhance(self):
"""Load metadata from a data service to improve naming.
:raises tvrenamer.exceptions.ShowNotFound:
when unable to find show/series name based on parsed name
:raises tvrenamer.exceptions.EpisodeNotFound:
when unable to find episode name(s) based on parsed data
"""
series, error = self.api.get_series_by_name(self.series_name)
if series is None:
self.messages.append(str(error))
LOG.info(self.messages[-1])
raise exc.ShowNotFound(str(error))
self.series_name = self.api.get_series_name(series)
self.episode_names, error = self.api.get_episode_name(
series, self.episode_numbers, self.season_number)
if self.episode_names is None:
self.messages.append(str(error))
LOG.info(self.messages[-1])
raise exc.EpisodeNotFound(str(error)) | Load metadata from a data service to improve naming.
:raises tvrenamer.exceptions.ShowNotFound:
when unable to find show/series name based on parsed name
:raises tvrenamer.exceptions.EpisodeNotFound:
when unable to find episode name(s) based on parsed data | Below is the the instruction that describes the task:
### Input:
Load metadata from a data service to improve naming.
:raises tvrenamer.exceptions.ShowNotFound:
when unable to find show/series name based on parsed name
:raises tvrenamer.exceptions.EpisodeNotFound:
when unable to find episode name(s) based on parsed data
### Response:
def enhance(self):
"""Load metadata from a data service to improve naming.
:raises tvrenamer.exceptions.ShowNotFound:
when unable to find show/series name based on parsed name
:raises tvrenamer.exceptions.EpisodeNotFound:
when unable to find episode name(s) based on parsed data
"""
series, error = self.api.get_series_by_name(self.series_name)
if series is None:
self.messages.append(str(error))
LOG.info(self.messages[-1])
raise exc.ShowNotFound(str(error))
self.series_name = self.api.get_series_name(series)
self.episode_names, error = self.api.get_episode_name(
series, self.episode_numbers, self.season_number)
if self.episode_names is None:
self.messages.append(str(error))
LOG.info(self.messages[-1])
raise exc.EpisodeNotFound(str(error)) |
def get_options(self):
"""
Return current query options for the Impala session
"""
query = 'SET'
return dict(row[:2] for row in self.con.fetchall(query)) | Return current query options for the Impala session | Below is the the instruction that describes the task:
### Input:
Return current query options for the Impala session
### Response:
def get_options(self):
"""
Return current query options for the Impala session
"""
query = 'SET'
return dict(row[:2] for row in self.con.fetchall(query)) |
def set_brightness(self, brightness):
"""set general brightness in range 0...1"""
brightness = min([1.0, max([brightness, 0.0])]) # enforces range 0 ... 1
self.state.brightness = brightness
self._repeat_last_frame()
sequence_number = self.zmq_publisher.publish_brightness(brightness)
logging.debug("Set brightness to {brightPercent:05.1f}%".format(brightPercent=brightness*100))
return (True, sequence_number, "OK") | set general brightness in range 0...1 | Below is the the instruction that describes the task:
### Input:
set general brightness in range 0...1
### Response:
def set_brightness(self, brightness):
"""set general brightness in range 0...1"""
brightness = min([1.0, max([brightness, 0.0])]) # enforces range 0 ... 1
self.state.brightness = brightness
self._repeat_last_frame()
sequence_number = self.zmq_publisher.publish_brightness(brightness)
logging.debug("Set brightness to {brightPercent:05.1f}%".format(brightPercent=brightness*100))
return (True, sequence_number, "OK") |
def get_command(self, ctx, name):
"""Retrieve the appropriate method from the Resource,
decorate it as a click command, and return that method.
"""
# Sanity check: Does a method exist corresponding to this
# command? If not, None is returned for click to raise
# exception.
if not hasattr(self.resource, name):
return None
# Get the method.
method = getattr(self.resource, name)
# Get any attributes that were given at command-declaration
# time.
attrs = getattr(method, '_cli_command_attrs', {})
# If the help message comes from the docstring, then
# convert it into a message specifically for this resource.
help_text = inspect.getdoc(method)
attrs['help'] = self._auto_help_text(help_text or '')
# On some methods, we ignore the defaults, which are intended
# for writing and not reading; process this.
ignore_defaults = attrs.pop('ignore_defaults', False)
# Wrap the method, such that it outputs its final return
# value rather than returning it.
new_method = self._echo_method(method)
# Soft copy the "__click_params__", if any exist.
# This is the internal holding method that the click library
# uses to store @click.option and @click.argument directives
# before the method is converted into a command.
#
# Because self._echo_method uses @functools.wraps, this is
# actually preserved; the purpose of copying it over is
# so we can get our resource fields at the top of the help;
# the easiest way to do this is to load them in before the
# conversion takes place. (This is a happy result of Armin's
# work to get around Python's processing decorators
# bottom-to-top.)
click_params = getattr(method, '__click_params__', [])
new_method.__click_params__ = copy(click_params)
new_method = with_global_options(new_method)
# Write options based on the fields available on this resource.
fao = attrs.pop('use_fields_as_options', True)
if fao:
for field in reversed(self.resource.fields):
if not field.is_option:
continue
# If we got an iterable rather than a boolean,
# then it is a list of fields to use; check for
# presence in that list.
if not isinstance(fao, bool) and field.name not in fao:
continue
# Create the initial arguments based on the
# option value. If we have a different key to use
# (which is what gets routed to the Tower API),
# ensure that is the first argument.
args = [field.option]
if field.key:
args.insert(0, field.key)
# short name aliases for common flags
short_fields = {
'name': 'n',
'description': 'd',
'inventory': 'i',
'extra_vars': 'e'
}
if field.name in short_fields:
args.append('-'+short_fields[field.name])
# Apply the option to the method.
option_help = field.help
if isinstance(field.type, StructuredInput):
option_help += ' Use @ to get JSON or YAML from a file.'
if field.required:
option_help = '[REQUIRED] ' + option_help
elif field.read_only:
option_help = '[READ ONLY] ' + option_help
option_help = '[FIELD]' + option_help
click.option(
*args,
default=field.default if not ignore_defaults else None,
help=option_help,
type=field.type,
show_default=field.show_default,
multiple=field.multiple,
is_eager=False
)(new_method)
# Make a click Command instance using this method
# as the callback, and return it.
cmd = click.command(name=name, cls=ActionSubcommand, **attrs)(new_method)
# If this method has a `pk` positional argument,
# then add a click argument for it.
code = six.get_function_code(method)
if 'pk' in code.co_varnames:
click.argument('pk', nargs=1, required=False, type=str, metavar='[ID]')(cmd)
# Done; return the command.
return cmd | Retrieve the appropriate method from the Resource,
decorate it as a click command, and return that method. | Below is the the instruction that describes the task:
### Input:
Retrieve the appropriate method from the Resource,
decorate it as a click command, and return that method.
### Response:
def get_command(self, ctx, name):
"""Retrieve the appropriate method from the Resource,
decorate it as a click command, and return that method.
"""
# Sanity check: Does a method exist corresponding to this
# command? If not, None is returned for click to raise
# exception.
if not hasattr(self.resource, name):
return None
# Get the method.
method = getattr(self.resource, name)
# Get any attributes that were given at command-declaration
# time.
attrs = getattr(method, '_cli_command_attrs', {})
# If the help message comes from the docstring, then
# convert it into a message specifically for this resource.
help_text = inspect.getdoc(method)
attrs['help'] = self._auto_help_text(help_text or '')
# On some methods, we ignore the defaults, which are intended
# for writing and not reading; process this.
ignore_defaults = attrs.pop('ignore_defaults', False)
# Wrap the method, such that it outputs its final return
# value rather than returning it.
new_method = self._echo_method(method)
# Soft copy the "__click_params__", if any exist.
# This is the internal holding method that the click library
# uses to store @click.option and @click.argument directives
# before the method is converted into a command.
#
# Because self._echo_method uses @functools.wraps, this is
# actually preserved; the purpose of copying it over is
# so we can get our resource fields at the top of the help;
# the easiest way to do this is to load them in before the
# conversion takes place. (This is a happy result of Armin's
# work to get around Python's processing decorators
# bottom-to-top.)
click_params = getattr(method, '__click_params__', [])
new_method.__click_params__ = copy(click_params)
new_method = with_global_options(new_method)
# Write options based on the fields available on this resource.
fao = attrs.pop('use_fields_as_options', True)
if fao:
for field in reversed(self.resource.fields):
if not field.is_option:
continue
# If we got an iterable rather than a boolean,
# then it is a list of fields to use; check for
# presence in that list.
if not isinstance(fao, bool) and field.name not in fao:
continue
# Create the initial arguments based on the
# option value. If we have a different key to use
# (which is what gets routed to the Tower API),
# ensure that is the first argument.
args = [field.option]
if field.key:
args.insert(0, field.key)
# short name aliases for common flags
short_fields = {
'name': 'n',
'description': 'd',
'inventory': 'i',
'extra_vars': 'e'
}
if field.name in short_fields:
args.append('-'+short_fields[field.name])
# Apply the option to the method.
option_help = field.help
if isinstance(field.type, StructuredInput):
option_help += ' Use @ to get JSON or YAML from a file.'
if field.required:
option_help = '[REQUIRED] ' + option_help
elif field.read_only:
option_help = '[READ ONLY] ' + option_help
option_help = '[FIELD]' + option_help
click.option(
*args,
default=field.default if not ignore_defaults else None,
help=option_help,
type=field.type,
show_default=field.show_default,
multiple=field.multiple,
is_eager=False
)(new_method)
# Make a click Command instance using this method
# as the callback, and return it.
cmd = click.command(name=name, cls=ActionSubcommand, **attrs)(new_method)
# If this method has a `pk` positional argument,
# then add a click argument for it.
code = six.get_function_code(method)
if 'pk' in code.co_varnames:
click.argument('pk', nargs=1, required=False, type=str, metavar='[ID]')(cmd)
# Done; return the command.
return cmd |
def _convert_content(self, rdtype, content):
"""
Converts type dependent record content into well formed and fully qualified
content for domain zone and returns content.
"""
if rdtype == 'TXT':
if content[0] != '"':
content = '"' + content
if content[-1] != '"':
content += '"'
if rdtype in ('CNAME', 'MX', 'NS', 'SRV'):
if content[-1] != '.':
content = self._fqdn_name(content)
return content | Converts type dependent record content into well formed and fully qualified
content for domain zone and returns content. | Below is the the instruction that describes the task:
### Input:
Converts type dependent record content into well formed and fully qualified
content for domain zone and returns content.
### Response:
def _convert_content(self, rdtype, content):
"""
Converts type dependent record content into well formed and fully qualified
content for domain zone and returns content.
"""
if rdtype == 'TXT':
if content[0] != '"':
content = '"' + content
if content[-1] != '"':
content += '"'
if rdtype in ('CNAME', 'MX', 'NS', 'SRV'):
if content[-1] != '.':
content = self._fqdn_name(content)
return content |
def smooth(x, window_len=7, window='hanning'):
"""
Smooth the data in x using convolution with a window of requested
size and type.
Parameters
----------
x : array_like(float)
A flat NumPy array containing the data to smooth
window_len : scalar(int), optional
An odd integer giving the length of the window. Defaults to 7.
window : string
A string giving the window type. Possible values are 'flat',
'hanning', 'hamming', 'bartlett' or 'blackman'
Returns
-------
array_like(float)
The smoothed values
Notes
-----
Application of the smoothing window at the top and bottom of x is
done by reflecting x around these points to extend it sufficiently
in each direction.
"""
if len(x) < window_len:
raise ValueError("Input vector length must be >= window length.")
if window_len < 3:
raise ValueError("Window length must be at least 3.")
if not window_len % 2: # window_len is even
window_len += 1
print("Window length reset to {}".format(window_len))
windows = {'hanning': np.hanning,
'hamming': np.hamming,
'bartlett': np.bartlett,
'blackman': np.blackman,
'flat': np.ones # moving average
}
# === Reflect x around x[0] and x[-1] prior to convolution === #
k = int(window_len / 2)
xb = x[:k] # First k elements
xt = x[-k:] # Last k elements
s = np.concatenate((xb[::-1], x, xt[::-1]))
# === Select window values === #
if window in windows.keys():
w = windows[window](window_len)
else:
msg = "Unrecognized window type '{}'".format(window)
print(msg + " Defaulting to hanning")
w = windows['hanning'](window_len)
return np.convolve(w / w.sum(), s, mode='valid') | Smooth the data in x using convolution with a window of requested
size and type.
Parameters
----------
x : array_like(float)
A flat NumPy array containing the data to smooth
window_len : scalar(int), optional
An odd integer giving the length of the window. Defaults to 7.
window : string
A string giving the window type. Possible values are 'flat',
'hanning', 'hamming', 'bartlett' or 'blackman'
Returns
-------
array_like(float)
The smoothed values
Notes
-----
Application of the smoothing window at the top and bottom of x is
done by reflecting x around these points to extend it sufficiently
in each direction. | Below is the the instruction that describes the task:
### Input:
Smooth the data in x using convolution with a window of requested
size and type.
Parameters
----------
x : array_like(float)
A flat NumPy array containing the data to smooth
window_len : scalar(int), optional
An odd integer giving the length of the window. Defaults to 7.
window : string
A string giving the window type. Possible values are 'flat',
'hanning', 'hamming', 'bartlett' or 'blackman'
Returns
-------
array_like(float)
The smoothed values
Notes
-----
Application of the smoothing window at the top and bottom of x is
done by reflecting x around these points to extend it sufficiently
in each direction.
### Response:
def smooth(x, window_len=7, window='hanning'):
"""
Smooth the data in x using convolution with a window of requested
size and type.
Parameters
----------
x : array_like(float)
A flat NumPy array containing the data to smooth
window_len : scalar(int), optional
An odd integer giving the length of the window. Defaults to 7.
window : string
A string giving the window type. Possible values are 'flat',
'hanning', 'hamming', 'bartlett' or 'blackman'
Returns
-------
array_like(float)
The smoothed values
Notes
-----
Application of the smoothing window at the top and bottom of x is
done by reflecting x around these points to extend it sufficiently
in each direction.
"""
if len(x) < window_len:
raise ValueError("Input vector length must be >= window length.")
if window_len < 3:
raise ValueError("Window length must be at least 3.")
if not window_len % 2: # window_len is even
window_len += 1
print("Window length reset to {}".format(window_len))
windows = {'hanning': np.hanning,
'hamming': np.hamming,
'bartlett': np.bartlett,
'blackman': np.blackman,
'flat': np.ones # moving average
}
# === Reflect x around x[0] and x[-1] prior to convolution === #
k = int(window_len / 2)
xb = x[:k] # First k elements
xt = x[-k:] # Last k elements
s = np.concatenate((xb[::-1], x, xt[::-1]))
# === Select window values === #
if window in windows.keys():
w = windows[window](window_len)
else:
msg = "Unrecognized window type '{}'".format(window)
print(msg + " Defaulting to hanning")
w = windows['hanning'](window_len)
return np.convolve(w / w.sum(), s, mode='valid') |
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
# setup command line parser
parser = U.OptionParser(version="%prog version: $Id$",
usage=usage,
description=globals()["__doc__"])
group = U.OptionGroup(parser, "count-specific options")
parser.add_option("--wide-format-cell-counts", dest="wide_format_cell_counts",
action="store_true",
default=False,
help=("output the cell counts in a wide format "
"(rows=genes, columns=cells)"))
parser.add_option_group(group)
# add common options (-h/--help, ...) and parse command line
(options, args) = U.Start(parser, argv=argv, add_group_dedup_options=False)
options.per_gene = True # hardcodes counting to per-gene only
U.validateSamOptions(options, group=False)
if options.random_seed:
np.random.seed(options.random_seed)
if options.stdin != sys.stdin:
in_name = options.stdin.name
options.stdin.close()
else:
raise ValueError("Input on standard in not currently supported")
if options.in_sam:
in_mode = "r"
else:
in_mode = "rb"
infile = pysam.Samfile(in_name, in_mode)
# write out to tempfile and then sort to stdout
tmpfilename = U.getTempFilename(dir=options.tmpdir)
tmpfile = U.openFile(tmpfilename, mode="w")
nInput, nOutput, input_reads = 0, 0, 0
gene_tag = options.gene_tag
metacontig2contig = None
if options.chrom:
inreads = infile.fetch(reference=options.chrom)
else:
if options.gene_transcript_map:
metacontig2contig = sam_methods.getMetaContig2contig(
infile, options.gene_transcript_map)
metatag = "MC"
inreads = sam_methods.metafetcher(infile, metacontig2contig, metatag)
gene_tag = metatag
else:
inreads = infile.fetch()
bundle_iterator = sam_methods.get_bundles(
options,
only_count_reads=True,
metacontig_contig=metacontig2contig)
# set up UMIClusterer functor with methods specific to
# specified options.method
processor = network.UMIClusterer(options.method)
for bundle, key, status in bundle_iterator(inreads):
if status == "single_read":
continue
gene, cell = key
umis = bundle.keys()
counts = {umi: bundle[umi]["count"] for umi in umis}
nInput += sum(counts.values())
while nInput >= input_reads + 1000000:
input_reads += 1000000
U.info("Parsed %i input reads" % input_reads)
# group the umis
groups = processor(
counts,
threshold=options.threshold)
gene_count = len(groups)
if options.per_cell:
tmpfile.write("%s\n" % "\t".join((gene, cell.decode(), str(gene_count))))
else:
tmpfile.write("%s\n" % "\t".join((gene, str(gene_count))))
nOutput += gene_count
tmpfile.close()
if options.per_cell:
gene_counts_dict = {}
with U.openFile(tmpfilename, mode="r") as inf:
genes = set()
cells = set()
for line in inf:
gene, cell, gene_count = line.strip().split("\t")
genes.add(gene)
cells.add(cell)
if gene not in gene_counts_dict:
gene_counts_dict[gene] = {}
gene_counts_dict[gene][cell] = gene_count
if options.wide_format_cell_counts: # write out in wide format
options.stdout.write(
"%s\t%s\n" % ("gene", "\t".join(sorted(cells))))
for gene in sorted(genes):
counts = []
for cell in sorted(cells):
if cell in gene_counts_dict[gene]:
counts.append(gene_counts_dict[gene][cell])
else:
counts.append(0)
options.stdout.write(
"%s\t%s\n" % (gene, "\t".join(map(str, counts))))
else: # write out in long format
options.stdout.write("%s\t%s\t%s\n" % ("gene", "cell", "count"))
for gene in sorted(genes):
for cell in sorted(list(gene_counts_dict[gene].keys())):
options.stdout.write("%s\t%s\t%s\n" % (
gene, cell, gene_counts_dict[gene][cell]))
else:
options.stdout.write("%s\t%s\n" % ("gene", "count"))
with U.openFile(tmpfilename, mode="r") as inf:
for line in inf:
options.stdout.write(line)
os.unlink(tmpfilename)
# output reads events and benchmark information.
for event in bundle_iterator.read_events.most_common():
U.info("%s: %s" % (event[0], event[1]))
U.info("Number of (post deduplication) reads counted: %i" % nOutput)
U.Stop() | script main.
parses command line options in sys.argv, unless *argv* is given. | Below is the the instruction that describes the task:
### Input:
script main.
parses command line options in sys.argv, unless *argv* is given.
### Response:
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
# setup command line parser
parser = U.OptionParser(version="%prog version: $Id$",
usage=usage,
description=globals()["__doc__"])
group = U.OptionGroup(parser, "count-specific options")
parser.add_option("--wide-format-cell-counts", dest="wide_format_cell_counts",
action="store_true",
default=False,
help=("output the cell counts in a wide format "
"(rows=genes, columns=cells)"))
parser.add_option_group(group)
# add common options (-h/--help, ...) and parse command line
(options, args) = U.Start(parser, argv=argv, add_group_dedup_options=False)
options.per_gene = True # hardcodes counting to per-gene only
U.validateSamOptions(options, group=False)
if options.random_seed:
np.random.seed(options.random_seed)
if options.stdin != sys.stdin:
in_name = options.stdin.name
options.stdin.close()
else:
raise ValueError("Input on standard in not currently supported")
if options.in_sam:
in_mode = "r"
else:
in_mode = "rb"
infile = pysam.Samfile(in_name, in_mode)
# write out to tempfile and then sort to stdout
tmpfilename = U.getTempFilename(dir=options.tmpdir)
tmpfile = U.openFile(tmpfilename, mode="w")
nInput, nOutput, input_reads = 0, 0, 0
gene_tag = options.gene_tag
metacontig2contig = None
if options.chrom:
inreads = infile.fetch(reference=options.chrom)
else:
if options.gene_transcript_map:
metacontig2contig = sam_methods.getMetaContig2contig(
infile, options.gene_transcript_map)
metatag = "MC"
inreads = sam_methods.metafetcher(infile, metacontig2contig, metatag)
gene_tag = metatag
else:
inreads = infile.fetch()
bundle_iterator = sam_methods.get_bundles(
options,
only_count_reads=True,
metacontig_contig=metacontig2contig)
# set up UMIClusterer functor with methods specific to
# specified options.method
processor = network.UMIClusterer(options.method)
for bundle, key, status in bundle_iterator(inreads):
if status == "single_read":
continue
gene, cell = key
umis = bundle.keys()
counts = {umi: bundle[umi]["count"] for umi in umis}
nInput += sum(counts.values())
while nInput >= input_reads + 1000000:
input_reads += 1000000
U.info("Parsed %i input reads" % input_reads)
# group the umis
groups = processor(
counts,
threshold=options.threshold)
gene_count = len(groups)
if options.per_cell:
tmpfile.write("%s\n" % "\t".join((gene, cell.decode(), str(gene_count))))
else:
tmpfile.write("%s\n" % "\t".join((gene, str(gene_count))))
nOutput += gene_count
tmpfile.close()
if options.per_cell:
gene_counts_dict = {}
with U.openFile(tmpfilename, mode="r") as inf:
genes = set()
cells = set()
for line in inf:
gene, cell, gene_count = line.strip().split("\t")
genes.add(gene)
cells.add(cell)
if gene not in gene_counts_dict:
gene_counts_dict[gene] = {}
gene_counts_dict[gene][cell] = gene_count
if options.wide_format_cell_counts: # write out in wide format
options.stdout.write(
"%s\t%s\n" % ("gene", "\t".join(sorted(cells))))
for gene in sorted(genes):
counts = []
for cell in sorted(cells):
if cell in gene_counts_dict[gene]:
counts.append(gene_counts_dict[gene][cell])
else:
counts.append(0)
options.stdout.write(
"%s\t%s\n" % (gene, "\t".join(map(str, counts))))
else: # write out in long format
options.stdout.write("%s\t%s\t%s\n" % ("gene", "cell", "count"))
for gene in sorted(genes):
for cell in sorted(list(gene_counts_dict[gene].keys())):
options.stdout.write("%s\t%s\t%s\n" % (
gene, cell, gene_counts_dict[gene][cell]))
else:
options.stdout.write("%s\t%s\n" % ("gene", "count"))
with U.openFile(tmpfilename, mode="r") as inf:
for line in inf:
options.stdout.write(line)
os.unlink(tmpfilename)
# output reads events and benchmark information.
for event in bundle_iterator.read_events.most_common():
U.info("%s: %s" % (event[0], event[1]))
U.info("Number of (post deduplication) reads counted: %i" % nOutput)
U.Stop() |
def title(msg, **options):
"""print something like a title"""
return echo(Style.BRIGHT + Fore.CYAN + "__{}__________________________".format(msg.upper().strip()) + Style.RESET_ALL + Fore.RESET, **options) | print something like a title | Below is the the instruction that describes the task:
### Input:
print something like a title
### Response:
def title(msg, **options):
"""print something like a title"""
return echo(Style.BRIGHT + Fore.CYAN + "__{}__________________________".format(msg.upper().strip()) + Style.RESET_ALL + Fore.RESET, **options) |
def socks_proxy(self, value):
"""
Sets socks proxy setting.
:Args:
- value: The socks proxy value.
"""
self._verify_proxy_type_compatibility(ProxyType.MANUAL)
self.proxyType = ProxyType.MANUAL
self.socksProxy = value | Sets socks proxy setting.
:Args:
- value: The socks proxy value. | Below is the the instruction that describes the task:
### Input:
Sets socks proxy setting.
:Args:
- value: The socks proxy value.
### Response:
def socks_proxy(self, value):
"""
Sets socks proxy setting.
:Args:
- value: The socks proxy value.
"""
self._verify_proxy_type_compatibility(ProxyType.MANUAL)
self.proxyType = ProxyType.MANUAL
self.socksProxy = value |
def get_assessment_parts_by_banks(self, bank_ids):
"""Gets the list of assessment part corresponding to a list of ``Banks``.
arg: bank_ids (osid.id.IdList): list of bank ``Ids``
return: (osid.assessment.authoring.AssessmentPartList) - list of
assessment parts
raise: NullArgument - ``bank_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resources_by_bins
assessment_part_list = []
for bank_id in bank_ids:
assessment_part_list += list(
self.get_assessment_parts_by_bank(bank_id))
return objects.AssessmentPartList(assessment_part_list) | Gets the list of assessment part corresponding to a list of ``Banks``.
arg: bank_ids (osid.id.IdList): list of bank ``Ids``
return: (osid.assessment.authoring.AssessmentPartList) - list of
assessment parts
raise: NullArgument - ``bank_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the list of assessment part corresponding to a list of ``Banks``.
arg: bank_ids (osid.id.IdList): list of bank ``Ids``
return: (osid.assessment.authoring.AssessmentPartList) - list of
assessment parts
raise: NullArgument - ``bank_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_assessment_parts_by_banks(self, bank_ids):
"""Gets the list of assessment part corresponding to a list of ``Banks``.
arg: bank_ids (osid.id.IdList): list of bank ``Ids``
return: (osid.assessment.authoring.AssessmentPartList) - list of
assessment parts
raise: NullArgument - ``bank_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resources_by_bins
assessment_part_list = []
for bank_id in bank_ids:
assessment_part_list += list(
self.get_assessment_parts_by_bank(bank_id))
return objects.AssessmentPartList(assessment_part_list) |
def columns(self):
"""
Returns the list of column names that this index will be expecting as \
inputs when it is called.
:return [<str>, ..]
"""
schema = self.schema()
return [schema.column(col) for col in self.__columns] | Returns the list of column names that this index will be expecting as \
inputs when it is called.
:return [<str>, ..] | Below is the the instruction that describes the task:
### Input:
Returns the list of column names that this index will be expecting as \
inputs when it is called.
:return [<str>, ..]
### Response:
def columns(self):
"""
Returns the list of column names that this index will be expecting as \
inputs when it is called.
:return [<str>, ..]
"""
schema = self.schema()
return [schema.column(col) for col in self.__columns] |
def _phase(self, line, pos):
"""
Places a phase / control circle on a qubit line at a given position.
:param int line: Qubit line at which to place the circle.
:param float pos: Position at which to place the circle.
:return: Latex string representing a control circle at the given position.
:rtype: string
"""
phase_str = "\n\\node[phase] ({}) at ({},-{}) {{}};"
return phase_str.format(self._op(line), pos, line) | Places a phase / control circle on a qubit line at a given position.
:param int line: Qubit line at which to place the circle.
:param float pos: Position at which to place the circle.
:return: Latex string representing a control circle at the given position.
:rtype: string | Below is the the instruction that describes the task:
### Input:
Places a phase / control circle on a qubit line at a given position.
:param int line: Qubit line at which to place the circle.
:param float pos: Position at which to place the circle.
:return: Latex string representing a control circle at the given position.
:rtype: string
### Response:
def _phase(self, line, pos):
"""
Places a phase / control circle on a qubit line at a given position.
:param int line: Qubit line at which to place the circle.
:param float pos: Position at which to place the circle.
:return: Latex string representing a control circle at the given position.
:rtype: string
"""
phase_str = "\n\\node[phase] ({}) at ({},-{}) {{}};"
return phase_str.format(self._op(line), pos, line) |
def insert(self, item, safe=None): # pragma: nocover
''' [DEPRECATED] Please use save() instead. This actually calls
the underlying save function, so the name is confusing.
Insert an item into the work queue and flushes.'''
warnings.warn('Insert will be deprecated soon and removed in 1.0. Please use insert',
PendingDeprecationWarning)
self.add(item, safe=safe) | [DEPRECATED] Please use save() instead. This actually calls
the underlying save function, so the name is confusing.
Insert an item into the work queue and flushes. | Below is the the instruction that describes the task:
### Input:
[DEPRECATED] Please use save() instead. This actually calls
the underlying save function, so the name is confusing.
Insert an item into the work queue and flushes.
### Response:
def insert(self, item, safe=None): # pragma: nocover
''' [DEPRECATED] Please use save() instead. This actually calls
the underlying save function, so the name is confusing.
Insert an item into the work queue and flushes.'''
warnings.warn('Insert will be deprecated soon and removed in 1.0. Please use insert',
PendingDeprecationWarning)
self.add(item, safe=safe) |
def linear_interpolate_by_datetime(datetime_axis, y_axis, datetime_new_axis,
enable_warning=True):
"""A datetime-version that takes datetime object list as x_axis
"""
numeric_datetime_axis = [
totimestamp(a_datetime) for a_datetime in datetime_axis
]
numeric_datetime_new_axis = [
totimestamp(a_datetime) for a_datetime in datetime_new_axis
]
return linear_interpolate(
numeric_datetime_axis, y_axis, numeric_datetime_new_axis,
enable_warning=enable_warning) | A datetime-version that takes datetime object list as x_axis | Below is the the instruction that describes the task:
### Input:
A datetime-version that takes datetime object list as x_axis
### Response:
def linear_interpolate_by_datetime(datetime_axis, y_axis, datetime_new_axis,
enable_warning=True):
"""A datetime-version that takes datetime object list as x_axis
"""
numeric_datetime_axis = [
totimestamp(a_datetime) for a_datetime in datetime_axis
]
numeric_datetime_new_axis = [
totimestamp(a_datetime) for a_datetime in datetime_new_axis
]
return linear_interpolate(
numeric_datetime_axis, y_axis, numeric_datetime_new_axis,
enable_warning=enable_warning) |
def snake_to_camel(value):
"""
Converts a snake_case_string to a camelCaseString.
>>> snake_to_camel("foo_bar_baz")
'fooBarBaz'
"""
camel = "".join(word.title() for word in value.split("_"))
return value[:1].lower() + camel[1:] | Converts a snake_case_string to a camelCaseString.
>>> snake_to_camel("foo_bar_baz")
'fooBarBaz' | Below is the the instruction that describes the task:
### Input:
Converts a snake_case_string to a camelCaseString.
>>> snake_to_camel("foo_bar_baz")
'fooBarBaz'
### Response:
def snake_to_camel(value):
"""
Converts a snake_case_string to a camelCaseString.
>>> snake_to_camel("foo_bar_baz")
'fooBarBaz'
"""
camel = "".join(word.title() for word in value.split("_"))
return value[:1].lower() + camel[1:] |
def copy_module(target_path, my_directory_full_path, my_module):
'''
Helper function for copy_module_to_local(). Provides the actual copy
functionality, with highly cautious safeguards against copying over
important things.
Parameters
----------
target_path : string
String, file path to target location
my_directory_full_path: string
String, full pathname to this file's directory
my_module : string
String, name of the module to copy
Returns
-------
none
'''
if target_path == 'q' or target_path == 'Q':
print("Goodbye!")
return
elif target_path == os.path.expanduser("~") or os.path.normpath(target_path) == os.path.expanduser("~"):
print("You have indicated that the target location is "+target_path+" -- that is, you want to wipe out your home directory with the contents of "+my_module+". My programming does not allow me to do that.\n\nGoodbye!")
return
elif os.path.exists(target_path):
print("There is already a file or directory at the location "+target_path+". For safety reasons this code does not overwrite existing files.\nPlease remove the file at "+target_path+" and try again.")
return
else:
user_input = input("""You have indicated you want to copy module:\n """+ my_module
+ """\nto:\n """+ target_path +"""\nIs that correct? Please indicate: y / [n]\n\n""")
if user_input == 'y' or user_input == 'Y':
#print("copy_tree(",my_directory_full_path,",", target_path,")")
copy_tree(my_directory_full_path, target_path)
else:
print("Goodbye!")
return | Helper function for copy_module_to_local(). Provides the actual copy
functionality, with highly cautious safeguards against copying over
important things.
Parameters
----------
target_path : string
String, file path to target location
my_directory_full_path: string
String, full pathname to this file's directory
my_module : string
String, name of the module to copy
Returns
-------
none | Below is the the instruction that describes the task:
### Input:
Helper function for copy_module_to_local(). Provides the actual copy
functionality, with highly cautious safeguards against copying over
important things.
Parameters
----------
target_path : string
String, file path to target location
my_directory_full_path: string
String, full pathname to this file's directory
my_module : string
String, name of the module to copy
Returns
-------
none
### Response:
def copy_module(target_path, my_directory_full_path, my_module):
'''
Helper function for copy_module_to_local(). Provides the actual copy
functionality, with highly cautious safeguards against copying over
important things.
Parameters
----------
target_path : string
String, file path to target location
my_directory_full_path: string
String, full pathname to this file's directory
my_module : string
String, name of the module to copy
Returns
-------
none
'''
if target_path == 'q' or target_path == 'Q':
print("Goodbye!")
return
elif target_path == os.path.expanduser("~") or os.path.normpath(target_path) == os.path.expanduser("~"):
print("You have indicated that the target location is "+target_path+" -- that is, you want to wipe out your home directory with the contents of "+my_module+". My programming does not allow me to do that.\n\nGoodbye!")
return
elif os.path.exists(target_path):
print("There is already a file or directory at the location "+target_path+". For safety reasons this code does not overwrite existing files.\nPlease remove the file at "+target_path+" and try again.")
return
else:
user_input = input("""You have indicated you want to copy module:\n """+ my_module
+ """\nto:\n """+ target_path +"""\nIs that correct? Please indicate: y / [n]\n\n""")
if user_input == 'y' or user_input == 'Y':
#print("copy_tree(",my_directory_full_path,",", target_path,")")
copy_tree(my_directory_full_path, target_path)
else:
print("Goodbye!")
return |
def setup_docstring_style_convention(self, text):
"""Handle convention changes."""
if text == 'Custom':
self.docstring_style_select.label.setText(
_("Show the following errors:"))
self.docstring_style_ignore.label.setText(
_("Ignore the following errors:"))
else:
self.docstring_style_select.label.setText(
_("Show the following errors in addition "
"to the specified convention:"))
self.docstring_style_ignore.label.setText(
_("Ignore the following errors in addition "
"to the specified convention:")) | Handle convention changes. | Below is the the instruction that describes the task:
### Input:
Handle convention changes.
### Response:
def setup_docstring_style_convention(self, text):
"""Handle convention changes."""
if text == 'Custom':
self.docstring_style_select.label.setText(
_("Show the following errors:"))
self.docstring_style_ignore.label.setText(
_("Ignore the following errors:"))
else:
self.docstring_style_select.label.setText(
_("Show the following errors in addition "
"to the specified convention:"))
self.docstring_style_ignore.label.setText(
_("Ignore the following errors in addition "
"to the specified convention:")) |
def keywords(self):
"""
Returns a list of all keywords that this rule object has defined.
A keyword is considered defined if the value it returns != None.
"""
defined_keywords = [
('allowempty_map', 'allowempty_map'),
('assertion', 'assertion'),
('default', 'default'),
('class', 'class'),
('desc', 'desc'),
('enum', 'enum'),
('example', 'example'),
('extensions', 'extensions'),
('format', 'format'),
('func', 'func'),
('ident', 'ident'),
('include_name', 'include'),
('length', 'length'),
('map_regex_rule', 'map_regex_rule'),
('mapping', 'mapping'),
('matching', 'matching'),
('matching_rule', 'matching_rule'),
('name', 'name'),
('nullable', 'nullable')
('parent', 'parent'),
('pattern', 'pattern'),
('pattern_regexp', 'pattern_regexp'),
('range', 'range'),
('regex_mappings', 'regex_mappings'),
('required', 'required'),
('schema', 'schema'),
('schema_str', 'schema_str'),
('sequence', 'sequence'),
('type', 'type'),
('type_class', 'type_class'),
('unique', 'unique'),
('version', 'version'),
]
found_keywords = []
for var_name, keyword_name in defined_keywords:
if getattr(self, var_name, None):
found_keywords.append(keyword_name)
return found_keywords | Returns a list of all keywords that this rule object has defined.
A keyword is considered defined if the value it returns != None. | Below is the the instruction that describes the task:
### Input:
Returns a list of all keywords that this rule object has defined.
A keyword is considered defined if the value it returns != None.
### Response:
def keywords(self):
"""
Returns a list of all keywords that this rule object has defined.
A keyword is considered defined if the value it returns != None.
"""
defined_keywords = [
('allowempty_map', 'allowempty_map'),
('assertion', 'assertion'),
('default', 'default'),
('class', 'class'),
('desc', 'desc'),
('enum', 'enum'),
('example', 'example'),
('extensions', 'extensions'),
('format', 'format'),
('func', 'func'),
('ident', 'ident'),
('include_name', 'include'),
('length', 'length'),
('map_regex_rule', 'map_regex_rule'),
('mapping', 'mapping'),
('matching', 'matching'),
('matching_rule', 'matching_rule'),
('name', 'name'),
('nullable', 'nullable')
('parent', 'parent'),
('pattern', 'pattern'),
('pattern_regexp', 'pattern_regexp'),
('range', 'range'),
('regex_mappings', 'regex_mappings'),
('required', 'required'),
('schema', 'schema'),
('schema_str', 'schema_str'),
('sequence', 'sequence'),
('type', 'type'),
('type_class', 'type_class'),
('unique', 'unique'),
('version', 'version'),
]
found_keywords = []
for var_name, keyword_name in defined_keywords:
if getattr(self, var_name, None):
found_keywords.append(keyword_name)
return found_keywords |
def tag(ctx, corpus, output):
"""Tag chemical entities and write CHEMDNER annotations predictions file."""
click.echo('chemdataextractor.chemdner.tag')
for line in corpus:
pmid, title, abstract = line.strip().split(u'\t')
# print(pmid)
counter = 1
d = Document(Title(title), Paragraph(abstract))
for t, section in [(d.elements[0], u'T'), (d.elements[1], u'A')]:
for cem in t.cems:
code = u'%s:%s:%s' % (section, cem.start, cem.end)
output.write(u'\t'.join([pmid, code, six.text_type(counter), u'1']))
output.write(u'\n')
counter += 1 | Tag chemical entities and write CHEMDNER annotations predictions file. | Below is the the instruction that describes the task:
### Input:
Tag chemical entities and write CHEMDNER annotations predictions file.
### Response:
def tag(ctx, corpus, output):
"""Tag chemical entities and write CHEMDNER annotations predictions file."""
click.echo('chemdataextractor.chemdner.tag')
for line in corpus:
pmid, title, abstract = line.strip().split(u'\t')
# print(pmid)
counter = 1
d = Document(Title(title), Paragraph(abstract))
for t, section in [(d.elements[0], u'T'), (d.elements[1], u'A')]:
for cem in t.cems:
code = u'%s:%s:%s' % (section, cem.start, cem.end)
output.write(u'\t'.join([pmid, code, six.text_type(counter), u'1']))
output.write(u'\n')
counter += 1 |
def _translate_shortcut(self, name):
"""Maps a given shortcut to corresponding name
* 'run_X' or 'r_X' to 'run_XXXXXXXXX'
* 'crun' to the current run name in case of a
single run instance if trajectory is used via `v_crun`
* 'par' 'parameters'
* 'dpar' to 'derived_parameters'
* 'res' to 'results'
* 'conf' to 'config'
:return: True or False and the mapped name.
"""
if isinstance(name, int):
return True, self._root_instance.f_wildcard('$', name)
if name.startswith('run_') or name.startswith('r_'):
split_name = name.split('_')
if len(split_name) == 2:
index = split_name[1]
if index.isdigit():
return True, self._root_instance.f_wildcard('$', int(index))
elif index == 'A':
return True, self._root_instance.f_wildcard('$', -1)
if name.startswith('runtoset_') or name.startswith('rts_'):
split_name = name.split('_')
if len(split_name) == 2:
index = split_name[1]
if index.isdigit():
return True, self._root_instance.f_wildcard('$set', int(index))
elif index == 'A':
return True, self._root_instance.f_wildcard('$set', -1)
if name in SHORTCUT_SET:
if name == 'par':
return True, 'parameters'
elif name == 'dpar':
return True, 'derived_parameters'
elif name == 'res':
return True, 'results'
elif name == 'conf':
return True, 'config'
else:
raise RuntimeError('You shall not pass!')
return False, name | Maps a given shortcut to corresponding name
* 'run_X' or 'r_X' to 'run_XXXXXXXXX'
* 'crun' to the current run name in case of a
single run instance if trajectory is used via `v_crun`
* 'par' 'parameters'
* 'dpar' to 'derived_parameters'
* 'res' to 'results'
* 'conf' to 'config'
:return: True or False and the mapped name. | Below is the the instruction that describes the task:
### Input:
Maps a given shortcut to corresponding name
* 'run_X' or 'r_X' to 'run_XXXXXXXXX'
* 'crun' to the current run name in case of a
single run instance if trajectory is used via `v_crun`
* 'par' 'parameters'
* 'dpar' to 'derived_parameters'
* 'res' to 'results'
* 'conf' to 'config'
:return: True or False and the mapped name.
### Response:
def _translate_shortcut(self, name):
"""Maps a given shortcut to corresponding name
* 'run_X' or 'r_X' to 'run_XXXXXXXXX'
* 'crun' to the current run name in case of a
single run instance if trajectory is used via `v_crun`
* 'par' 'parameters'
* 'dpar' to 'derived_parameters'
* 'res' to 'results'
* 'conf' to 'config'
:return: True or False and the mapped name.
"""
if isinstance(name, int):
return True, self._root_instance.f_wildcard('$', name)
if name.startswith('run_') or name.startswith('r_'):
split_name = name.split('_')
if len(split_name) == 2:
index = split_name[1]
if index.isdigit():
return True, self._root_instance.f_wildcard('$', int(index))
elif index == 'A':
return True, self._root_instance.f_wildcard('$', -1)
if name.startswith('runtoset_') or name.startswith('rts_'):
split_name = name.split('_')
if len(split_name) == 2:
index = split_name[1]
if index.isdigit():
return True, self._root_instance.f_wildcard('$set', int(index))
elif index == 'A':
return True, self._root_instance.f_wildcard('$set', -1)
if name in SHORTCUT_SET:
if name == 'par':
return True, 'parameters'
elif name == 'dpar':
return True, 'derived_parameters'
elif name == 'res':
return True, 'results'
elif name == 'conf':
return True, 'config'
else:
raise RuntimeError('You shall not pass!')
return False, name |
def load(self, days=PRELOAD_DAYS, only_cameras=None,
date_from=None, date_to=None, limit=None):
"""Load Arlo videos from the given criteria
:param days: number of days to retrieve
:param only_cameras: retrieve only <ArloCamera> on that list
:param date_from: refine from initial date
:param date_to: refine final date
:param limit: define number of objects to return
"""
videos = []
url = LIBRARY_ENDPOINT
if not (date_from and date_to):
now = datetime.today()
date_from = (now - timedelta(days=days)).strftime('%Y%m%d')
date_to = now.strftime('%Y%m%d')
params = {'dateFrom': date_from, 'dateTo': date_to}
data = self._session.query(url,
method='POST',
extra_params=params).get('data')
# get all cameras to append to create ArloVideo object
all_cameras = self._session.cameras
for video in data:
# pylint: disable=cell-var-from-loop
srccam = \
list(filter(
lambda cam: cam.device_id == video.get('deviceId'),
all_cameras)
)[0]
# make sure only_cameras is a list
if only_cameras and \
not isinstance(only_cameras, list):
only_cameras = [(only_cameras)]
# filter by camera only
if only_cameras:
if list(filter(lambda cam: cam.device_id == srccam.device_id,
list(only_cameras))):
videos.append(ArloVideo(video, srccam, self._session))
else:
videos.append(ArloVideo(video, srccam, self._session))
if limit:
return videos[:limit]
return videos | Load Arlo videos from the given criteria
:param days: number of days to retrieve
:param only_cameras: retrieve only <ArloCamera> on that list
:param date_from: refine from initial date
:param date_to: refine final date
:param limit: define number of objects to return | Below is the the instruction that describes the task:
### Input:
Load Arlo videos from the given criteria
:param days: number of days to retrieve
:param only_cameras: retrieve only <ArloCamera> on that list
:param date_from: refine from initial date
:param date_to: refine final date
:param limit: define number of objects to return
### Response:
def load(self, days=PRELOAD_DAYS, only_cameras=None,
date_from=None, date_to=None, limit=None):
"""Load Arlo videos from the given criteria
:param days: number of days to retrieve
:param only_cameras: retrieve only <ArloCamera> on that list
:param date_from: refine from initial date
:param date_to: refine final date
:param limit: define number of objects to return
"""
videos = []
url = LIBRARY_ENDPOINT
if not (date_from and date_to):
now = datetime.today()
date_from = (now - timedelta(days=days)).strftime('%Y%m%d')
date_to = now.strftime('%Y%m%d')
params = {'dateFrom': date_from, 'dateTo': date_to}
data = self._session.query(url,
method='POST',
extra_params=params).get('data')
# get all cameras to append to create ArloVideo object
all_cameras = self._session.cameras
for video in data:
# pylint: disable=cell-var-from-loop
srccam = \
list(filter(
lambda cam: cam.device_id == video.get('deviceId'),
all_cameras)
)[0]
# make sure only_cameras is a list
if only_cameras and \
not isinstance(only_cameras, list):
only_cameras = [(only_cameras)]
# filter by camera only
if only_cameras:
if list(filter(lambda cam: cam.device_id == srccam.device_id,
list(only_cameras))):
videos.append(ArloVideo(video, srccam, self._session))
else:
videos.append(ArloVideo(video, srccam, self._session))
if limit:
return videos[:limit]
return videos |
def _onOutgoingMessageReceived(self, conn, message):
"""
Callback for receiving a message on a new outgoing connection. Used only if encryption is enabled to exchange the random keys.
Once the key exchange is done, this triggers the onNodeConnected callback, and further messages are deferred to the onMessageReceived callback.
:param conn: connection object
:type conn: TcpConnection
:param message: received message
:type message: any
"""
if not conn.sendRandKey:
conn.sendRandKey = message
conn.send(self._selfNode.address)
node = self._connToNode(conn)
conn.setOnMessageReceivedCallback(functools.partial(self._onMessageReceived, node))
self._onNodeConnected(node) | Callback for receiving a message on a new outgoing connection. Used only if encryption is enabled to exchange the random keys.
Once the key exchange is done, this triggers the onNodeConnected callback, and further messages are deferred to the onMessageReceived callback.
:param conn: connection object
:type conn: TcpConnection
:param message: received message
:type message: any | Below is the the instruction that describes the task:
### Input:
Callback for receiving a message on a new outgoing connection. Used only if encryption is enabled to exchange the random keys.
Once the key exchange is done, this triggers the onNodeConnected callback, and further messages are deferred to the onMessageReceived callback.
:param conn: connection object
:type conn: TcpConnection
:param message: received message
:type message: any
### Response:
def _onOutgoingMessageReceived(self, conn, message):
"""
Callback for receiving a message on a new outgoing connection. Used only if encryption is enabled to exchange the random keys.
Once the key exchange is done, this triggers the onNodeConnected callback, and further messages are deferred to the onMessageReceived callback.
:param conn: connection object
:type conn: TcpConnection
:param message: received message
:type message: any
"""
if not conn.sendRandKey:
conn.sendRandKey = message
conn.send(self._selfNode.address)
node = self._connToNode(conn)
conn.setOnMessageReceivedCallback(functools.partial(self._onMessageReceived, node))
self._onNodeConnected(node) |
def hsplit(self, location=None, new=False, text=None):
""" Split horizontally. """
assert location is None or text is None or new is False # Don't pass two of them.
if location or text or new:
editor_buffer = self._get_or_create_editor_buffer(location=location, text=text)
else:
editor_buffer = None
self.active_tab.hsplit(editor_buffer) | Split horizontally. | Below is the the instruction that describes the task:
### Input:
Split horizontally.
### Response:
def hsplit(self, location=None, new=False, text=None):
""" Split horizontally. """
assert location is None or text is None or new is False # Don't pass two of them.
if location or text or new:
editor_buffer = self._get_or_create_editor_buffer(location=location, text=text)
else:
editor_buffer = None
self.active_tab.hsplit(editor_buffer) |
def read_frames(file_path, frame_size, hop_size, start=0.0,
end=float('inf'), buffer_size=5760000):
"""
Read an audio file frame by frame. The frames are yielded one after another.
Args:
file_path (str): Path to the file to read.
frame_size (int): The number of samples per frame.
hop_size (int): The number of samples between two frames.
start (float): Start in seconds to read from.
end (float): End in seconds to read to.
``inf`` means to the end of the file.
buffer_size (int): Number of samples to load into memory at once
and return as a single block.
The exact number of loaded samples depends on the
block-size of the audioread library. So it can be
of x higher, where the x is typically 1024 or 4096.
Returns:
Generator: A generator yielding a tuple for every frame.
The first item is the frame and
the second a boolean indicating if it is the last frame.
"""
rest_samples = np.array([], dtype=np.float32)
for block in read_blocks(file_path, start=start, end=end, buffer_size=buffer_size):
# Prepend rest samples from previous block
block = np.concatenate([rest_samples, block])
current_sample = 0
# Get frames that are fully contained in the block
while current_sample + frame_size < block.size:
frame = block[current_sample:current_sample + frame_size]
yield frame, False
current_sample += hop_size
# Store rest samples for next block
rest_samples = block[current_sample:]
if rest_samples.size > 0:
rest_samples = np.pad(
rest_samples,
(0, frame_size - rest_samples.size),
mode='constant',
constant_values=0
)
yield rest_samples, True | Read an audio file frame by frame. The frames are yielded one after another.
Args:
file_path (str): Path to the file to read.
frame_size (int): The number of samples per frame.
hop_size (int): The number of samples between two frames.
start (float): Start in seconds to read from.
end (float): End in seconds to read to.
``inf`` means to the end of the file.
buffer_size (int): Number of samples to load into memory at once
and return as a single block.
The exact number of loaded samples depends on the
block-size of the audioread library. So it can be
of x higher, where the x is typically 1024 or 4096.
Returns:
Generator: A generator yielding a tuple for every frame.
The first item is the frame and
the second a boolean indicating if it is the last frame. | Below is the the instruction that describes the task:
### Input:
Read an audio file frame by frame. The frames are yielded one after another.
Args:
file_path (str): Path to the file to read.
frame_size (int): The number of samples per frame.
hop_size (int): The number of samples between two frames.
start (float): Start in seconds to read from.
end (float): End in seconds to read to.
``inf`` means to the end of the file.
buffer_size (int): Number of samples to load into memory at once
and return as a single block.
The exact number of loaded samples depends on the
block-size of the audioread library. So it can be
of x higher, where the x is typically 1024 or 4096.
Returns:
Generator: A generator yielding a tuple for every frame.
The first item is the frame and
the second a boolean indicating if it is the last frame.
### Response:
def read_frames(file_path, frame_size, hop_size, start=0.0,
end=float('inf'), buffer_size=5760000):
"""
Read an audio file frame by frame. The frames are yielded one after another.
Args:
file_path (str): Path to the file to read.
frame_size (int): The number of samples per frame.
hop_size (int): The number of samples between two frames.
start (float): Start in seconds to read from.
end (float): End in seconds to read to.
``inf`` means to the end of the file.
buffer_size (int): Number of samples to load into memory at once
and return as a single block.
The exact number of loaded samples depends on the
block-size of the audioread library. So it can be
of x higher, where the x is typically 1024 or 4096.
Returns:
Generator: A generator yielding a tuple for every frame.
The first item is the frame and
the second a boolean indicating if it is the last frame.
"""
rest_samples = np.array([], dtype=np.float32)
for block in read_blocks(file_path, start=start, end=end, buffer_size=buffer_size):
# Prepend rest samples from previous block
block = np.concatenate([rest_samples, block])
current_sample = 0
# Get frames that are fully contained in the block
while current_sample + frame_size < block.size:
frame = block[current_sample:current_sample + frame_size]
yield frame, False
current_sample += hop_size
# Store rest samples for next block
rest_samples = block[current_sample:]
if rest_samples.size > 0:
rest_samples = np.pad(
rest_samples,
(0, frame_size - rest_samples.size),
mode='constant',
constant_values=0
)
yield rest_samples, True |
def find_codon_mismatches(sbjct_start, sbjct_seq, qry_seq):
"""
This function takes two alligned sequence (subject and query), and
the position on the subject where the alignment starts. The sequences
are compared codon by codon. If a mis matches is found it is saved in
'mis_matches'. If a gap is found the function get_inframe_gap is used
to find the indel sequence and keep the sequence in the correct
reading frame. The function translate_indel is used to name indel
mutations and translate the indels to amino acids
The function returns a list of tuples containing all needed informations
about the mutation in order to look it up in the database dict known
mutation and the with the output files the the user.
"""
mis_matches = []
# Find start pos of first codon in frame, i_start
codon_offset = (sbjct_start-1) % 3
i_start = 0
if codon_offset != 0:
i_start = 3 - codon_offset
sbjct_start = sbjct_start + i_start
# Set sequences in frame
sbjct_seq = sbjct_seq[i_start:]
qry_seq = qry_seq[i_start:]
# Find codon number of the first codon in the sequence, start at 0
codon_no = int((sbjct_start-1) / 3) # 1,2,3 start on 0
# s_shift and q_shift are used when gaps appears
q_shift = 0
s_shift = 0
mut_no = 0
# Find inserts and deletions in sequence
indel_no = 0
indels = get_indels(sbjct_seq, qry_seq, sbjct_start)
# Go through sequence and save mutations when found
for index in range(0, len(sbjct_seq), 3):
# Count codon number
codon_no += 1
# Shift index according to gaps
s_i = index + s_shift
q_i = index + q_shift
# Get codons
sbjct_codon = sbjct_seq[s_i:s_i+3]
qry_codon = qry_seq[q_i:q_i+3]
if len(sbjct_seq[s_i:].replace("-","")) + len(qry_codon[q_i:].replace("-","")) < 6:
break
# Check for mutations
if sbjct_codon.upper() != qry_codon.upper():
# Check for codon insertions and deletions and frameshift mutations
if "-" in sbjct_codon or "-" in qry_codon:
# Get indel info
try:
indel_data = indels[indel_no]
except IndexError:
print(sbjct_codon, qry_codon)
print(indels)
print(gene, indel_data, indel_no)
mut = indel_data[0]
codon_no_indel = indel_data[1]
seq_pos = indel_data[2] + sbjct_start - 1
indel = indel_data[3]
indel_no +=1
# Get the affected sequence in frame for both for sbjct and qry
if mut == "ins":
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], 3)
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], int(math.floor(len(sbjct_rf_indel)/3) *3))
else:
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], 3)
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], int(math.floor(len(qry_rf_indel)/3) *3))
mut_name, aa_ref, aa_alt = name_indel_mutation(sbjct_seq, indel, sbjct_rf_indel, qry_rf_indel, codon_no, mut, sbjct_start - 1)
# Set index to the correct reading frame after the indel gap
shift_diff_before = abs(s_shift - q_shift)
s_shift += len(sbjct_rf_indel) - 3
q_shift += len(qry_rf_indel) - 3
shift_diff = abs(s_shift - q_shift)
if shift_diff_before != 0 and shift_diff %3 == 0:
if s_shift > q_shift:
nucs_needed = int((len(sbjct_rf_indel)/3) *3) + shift_diff
pre_qry_indel = qry_rf_indel
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], nucs_needed)
q_shift += len(qry_rf_indel) - len(pre_qry_indel)
elif q_shift > s_shift:
nucs_needed = int((len(qry_rf_indel)/3)*3) + shift_diff
pre_sbjct_indel = sbjct_rf_indel
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], nucs_needed)
s_shift += len(sbjct_rf_indel) - len(pre_sbjct_indel)
mut_name, aa_ref, aa_alt = name_indel_mutation(sbjct_seq, indel, sbjct_rf_indel, qry_rf_indel, codon_no, mut, sbjct_start - 1)
if "Frameshift" in mut_name:
mut_name = mut_name.split("-")[0] + "- Frame restored"
mis_matches += [[mut, codon_no_indel, seq_pos, indel, mut_name, sbjct_rf_indel, qry_rf_indel, aa_ref, aa_alt]]
# Check if the next mutation in the indels list is in the current codon
# Find the number of individul gaps in the evaluated sequence
no_of_indels = len(re.findall("\-\w", sbjct_rf_indel)) + len(re.findall("\-\w", qry_rf_indel))
if no_of_indels > 1:
for j in range(indel_no, indel_no + no_of_indels - 1):
try:
indel_data = indels[j]
except IndexError:
sys.exit("indel_data list is out of range, bug!")
mut = indel_data[0]
codon_no_indel = indel_data[1]
seq_pos = indel_data[2] + sbjct_start - 1
indel = indel_data[3]
indel_no +=1
mis_matches += [[mut, codon_no_indel, seq_pos, indel, mut_name, sbjct_rf_indel, qry_rf_indel, aa_ref, aa_alt]]
# Set codon number, and save nucleotides from out of frame mutations
if mut == "del":
codon_no += int((len(sbjct_rf_indel) - 3)/3)
# If evaluated insert is only gaps codon_no should not increment
elif sbjct_rf_indel.count("-") == len(sbjct_rf_indel):
codon_no -= 1
# Check of point mutations
else:
mut = "sub"
aa_ref = aa(sbjct_codon)
aa_alt = aa(qry_codon)
if aa_ref != aa_alt:
# End search for mutation if a premature stop codon is found
mut_name = "p." + aa_ref + str(codon_no) + aa_alt
mis_matches += [[mut, codon_no, codon_no, aa_alt, mut_name, sbjct_codon, qry_codon, aa_ref, aa_alt]]
# If a Premature stop codon occur report it an stop the loop
try:
if mis_matches[-1][-1] == "*":
mut_name += " - Premature stop codon"
mis_matches[-1][4] = mis_matches[-1][4].split("-")[0] + " - Premature stop codon"
break
except IndexError:
pass
# Sort mutations on position
mis_matches = sorted(mis_matches, key = lambda x:x[1])
return mis_matches | This function takes two alligned sequence (subject and query), and
the position on the subject where the alignment starts. The sequences
are compared codon by codon. If a mis matches is found it is saved in
'mis_matches'. If a gap is found the function get_inframe_gap is used
to find the indel sequence and keep the sequence in the correct
reading frame. The function translate_indel is used to name indel
mutations and translate the indels to amino acids
The function returns a list of tuples containing all needed informations
about the mutation in order to look it up in the database dict known
mutation and the with the output files the the user. | Below is the the instruction that describes the task:
### Input:
This function takes two alligned sequence (subject and query), and
the position on the subject where the alignment starts. The sequences
are compared codon by codon. If a mis matches is found it is saved in
'mis_matches'. If a gap is found the function get_inframe_gap is used
to find the indel sequence and keep the sequence in the correct
reading frame. The function translate_indel is used to name indel
mutations and translate the indels to amino acids
The function returns a list of tuples containing all needed informations
about the mutation in order to look it up in the database dict known
mutation and the with the output files the the user.
### Response:
def find_codon_mismatches(sbjct_start, sbjct_seq, qry_seq):
"""
This function takes two alligned sequence (subject and query), and
the position on the subject where the alignment starts. The sequences
are compared codon by codon. If a mis matches is found it is saved in
'mis_matches'. If a gap is found the function get_inframe_gap is used
to find the indel sequence and keep the sequence in the correct
reading frame. The function translate_indel is used to name indel
mutations and translate the indels to amino acids
The function returns a list of tuples containing all needed informations
about the mutation in order to look it up in the database dict known
mutation and the with the output files the the user.
"""
mis_matches = []
# Find start pos of first codon in frame, i_start
codon_offset = (sbjct_start-1) % 3
i_start = 0
if codon_offset != 0:
i_start = 3 - codon_offset
sbjct_start = sbjct_start + i_start
# Set sequences in frame
sbjct_seq = sbjct_seq[i_start:]
qry_seq = qry_seq[i_start:]
# Find codon number of the first codon in the sequence, start at 0
codon_no = int((sbjct_start-1) / 3) # 1,2,3 start on 0
# s_shift and q_shift are used when gaps appears
q_shift = 0
s_shift = 0
mut_no = 0
# Find inserts and deletions in sequence
indel_no = 0
indels = get_indels(sbjct_seq, qry_seq, sbjct_start)
# Go through sequence and save mutations when found
for index in range(0, len(sbjct_seq), 3):
# Count codon number
codon_no += 1
# Shift index according to gaps
s_i = index + s_shift
q_i = index + q_shift
# Get codons
sbjct_codon = sbjct_seq[s_i:s_i+3]
qry_codon = qry_seq[q_i:q_i+3]
if len(sbjct_seq[s_i:].replace("-","")) + len(qry_codon[q_i:].replace("-","")) < 6:
break
# Check for mutations
if sbjct_codon.upper() != qry_codon.upper():
# Check for codon insertions and deletions and frameshift mutations
if "-" in sbjct_codon or "-" in qry_codon:
# Get indel info
try:
indel_data = indels[indel_no]
except IndexError:
print(sbjct_codon, qry_codon)
print(indels)
print(gene, indel_data, indel_no)
mut = indel_data[0]
codon_no_indel = indel_data[1]
seq_pos = indel_data[2] + sbjct_start - 1
indel = indel_data[3]
indel_no +=1
# Get the affected sequence in frame for both for sbjct and qry
if mut == "ins":
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], 3)
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], int(math.floor(len(sbjct_rf_indel)/3) *3))
else:
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], 3)
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], int(math.floor(len(qry_rf_indel)/3) *3))
mut_name, aa_ref, aa_alt = name_indel_mutation(sbjct_seq, indel, sbjct_rf_indel, qry_rf_indel, codon_no, mut, sbjct_start - 1)
# Set index to the correct reading frame after the indel gap
shift_diff_before = abs(s_shift - q_shift)
s_shift += len(sbjct_rf_indel) - 3
q_shift += len(qry_rf_indel) - 3
shift_diff = abs(s_shift - q_shift)
if shift_diff_before != 0 and shift_diff %3 == 0:
if s_shift > q_shift:
nucs_needed = int((len(sbjct_rf_indel)/3) *3) + shift_diff
pre_qry_indel = qry_rf_indel
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], nucs_needed)
q_shift += len(qry_rf_indel) - len(pre_qry_indel)
elif q_shift > s_shift:
nucs_needed = int((len(qry_rf_indel)/3)*3) + shift_diff
pre_sbjct_indel = sbjct_rf_indel
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], nucs_needed)
s_shift += len(sbjct_rf_indel) - len(pre_sbjct_indel)
mut_name, aa_ref, aa_alt = name_indel_mutation(sbjct_seq, indel, sbjct_rf_indel, qry_rf_indel, codon_no, mut, sbjct_start - 1)
if "Frameshift" in mut_name:
mut_name = mut_name.split("-")[0] + "- Frame restored"
mis_matches += [[mut, codon_no_indel, seq_pos, indel, mut_name, sbjct_rf_indel, qry_rf_indel, aa_ref, aa_alt]]
# Check if the next mutation in the indels list is in the current codon
# Find the number of individul gaps in the evaluated sequence
no_of_indels = len(re.findall("\-\w", sbjct_rf_indel)) + len(re.findall("\-\w", qry_rf_indel))
if no_of_indels > 1:
for j in range(indel_no, indel_no + no_of_indels - 1):
try:
indel_data = indels[j]
except IndexError:
sys.exit("indel_data list is out of range, bug!")
mut = indel_data[0]
codon_no_indel = indel_data[1]
seq_pos = indel_data[2] + sbjct_start - 1
indel = indel_data[3]
indel_no +=1
mis_matches += [[mut, codon_no_indel, seq_pos, indel, mut_name, sbjct_rf_indel, qry_rf_indel, aa_ref, aa_alt]]
# Set codon number, and save nucleotides from out of frame mutations
if mut == "del":
codon_no += int((len(sbjct_rf_indel) - 3)/3)
# If evaluated insert is only gaps codon_no should not increment
elif sbjct_rf_indel.count("-") == len(sbjct_rf_indel):
codon_no -= 1
# Check of point mutations
else:
mut = "sub"
aa_ref = aa(sbjct_codon)
aa_alt = aa(qry_codon)
if aa_ref != aa_alt:
# End search for mutation if a premature stop codon is found
mut_name = "p." + aa_ref + str(codon_no) + aa_alt
mis_matches += [[mut, codon_no, codon_no, aa_alt, mut_name, sbjct_codon, qry_codon, aa_ref, aa_alt]]
# If a Premature stop codon occur report it an stop the loop
try:
if mis_matches[-1][-1] == "*":
mut_name += " - Premature stop codon"
mis_matches[-1][4] = mis_matches[-1][4].split("-")[0] + " - Premature stop codon"
break
except IndexError:
pass
# Sort mutations on position
mis_matches = sorted(mis_matches, key = lambda x:x[1])
return mis_matches |
def create_generic(numeric_values, textual_values, target, algorithm = util_functions.AlgorithmTypes.regression):
"""
Creates a model from a generic list numeric values and text values
numeric_values - A list of lists that are the predictors
textual_values - A list of lists that are the predictors
(each item in textual_values corresponds to the similarly indexed counterpart in numeric_values)
target - The variable that we are trying to predict. A list of integers.
algorithm - the type of algorithm that will be used
"""
algorithm = select_algorithm(target)
#Initialize a result dictionary to return.
results = {'errors': [],'success' : False, 'cv_kappa' : 0, 'cv_mean_absolute_error': 0,
'feature_ext' : "", 'classifier' : "", 'algorithm' : algorithm}
if len(numeric_values)!=len(textual_values) or len(numeric_values)!=len(target):
msg = "Target, numeric features, and text features must all be the same length."
results['errors'].append(msg)
log.exception(msg)
return results
try:
#Initialize a predictor set object that encapsulates all of the text and numeric predictors
pset = predictor_set.PredictorSet(essaytype="train")
for i in xrange(0, len(numeric_values)):
pset.add_row(numeric_values[i], textual_values[i], target[i])
except:
msg = "predictor set creation failed."
results['errors'].append(msg)
log.exception(msg)
try:
#Extract all features and then train a classifier with the features
feature_ext, classifier, cv_error_results = model_creator.extract_features_and_generate_model_predictors(pset, algorithm)
results['cv_kappa']=cv_error_results['kappa']
results['cv_mean_absolute_error']=cv_error_results['mae']
results['feature_ext']=feature_ext
results['classifier']=classifier
results['success']=True
except:
msg = "feature extraction and model creation failed."
results['errors'].append(msg)
log.exception(msg)
return results | Creates a model from a generic list numeric values and text values
numeric_values - A list of lists that are the predictors
textual_values - A list of lists that are the predictors
(each item in textual_values corresponds to the similarly indexed counterpart in numeric_values)
target - The variable that we are trying to predict. A list of integers.
algorithm - the type of algorithm that will be used | Below is the the instruction that describes the task:
### Input:
Creates a model from a generic list numeric values and text values
numeric_values - A list of lists that are the predictors
textual_values - A list of lists that are the predictors
(each item in textual_values corresponds to the similarly indexed counterpart in numeric_values)
target - The variable that we are trying to predict. A list of integers.
algorithm - the type of algorithm that will be used
### Response:
def create_generic(numeric_values, textual_values, target, algorithm = util_functions.AlgorithmTypes.regression):
"""
Creates a model from a generic list numeric values and text values
numeric_values - A list of lists that are the predictors
textual_values - A list of lists that are the predictors
(each item in textual_values corresponds to the similarly indexed counterpart in numeric_values)
target - The variable that we are trying to predict. A list of integers.
algorithm - the type of algorithm that will be used
"""
algorithm = select_algorithm(target)
#Initialize a result dictionary to return.
results = {'errors': [],'success' : False, 'cv_kappa' : 0, 'cv_mean_absolute_error': 0,
'feature_ext' : "", 'classifier' : "", 'algorithm' : algorithm}
if len(numeric_values)!=len(textual_values) or len(numeric_values)!=len(target):
msg = "Target, numeric features, and text features must all be the same length."
results['errors'].append(msg)
log.exception(msg)
return results
try:
#Initialize a predictor set object that encapsulates all of the text and numeric predictors
pset = predictor_set.PredictorSet(essaytype="train")
for i in xrange(0, len(numeric_values)):
pset.add_row(numeric_values[i], textual_values[i], target[i])
except:
msg = "predictor set creation failed."
results['errors'].append(msg)
log.exception(msg)
try:
#Extract all features and then train a classifier with the features
feature_ext, classifier, cv_error_results = model_creator.extract_features_and_generate_model_predictors(pset, algorithm)
results['cv_kappa']=cv_error_results['kappa']
results['cv_mean_absolute_error']=cv_error_results['mae']
results['feature_ext']=feature_ext
results['classifier']=classifier
results['success']=True
except:
msg = "feature extraction and model creation failed."
results['errors'].append(msg)
log.exception(msg)
return results |
def export_kml_file(self):
"""Generate KML element tree from ``Placemarks``.
Returns:
etree.ElementTree: KML element tree depicting ``Placemarks``
"""
kml = create_elem('kml')
kml.Document = create_elem('Document')
for place in sorted(self.values(), key=lambda x: x.name):
kml.Document.append(place.tokml())
return etree.ElementTree(kml) | Generate KML element tree from ``Placemarks``.
Returns:
etree.ElementTree: KML element tree depicting ``Placemarks`` | Below is the the instruction that describes the task:
### Input:
Generate KML element tree from ``Placemarks``.
Returns:
etree.ElementTree: KML element tree depicting ``Placemarks``
### Response:
def export_kml_file(self):
"""Generate KML element tree from ``Placemarks``.
Returns:
etree.ElementTree: KML element tree depicting ``Placemarks``
"""
kml = create_elem('kml')
kml.Document = create_elem('Document')
for place in sorted(self.values(), key=lambda x: x.name):
kml.Document.append(place.tokml())
return etree.ElementTree(kml) |
def authentication_required(meth):
"""Simple class method decorator.
Checks if the client is currently connected.
:param meth: the original called method
"""
def check(cls, *args, **kwargs):
if cls.authenticated:
return meth(cls, *args, **kwargs)
raise Error("Authentication required")
return check | Simple class method decorator.
Checks if the client is currently connected.
:param meth: the original called method | Below is the the instruction that describes the task:
### Input:
Simple class method decorator.
Checks if the client is currently connected.
:param meth: the original called method
### Response:
def authentication_required(meth):
"""Simple class method decorator.
Checks if the client is currently connected.
:param meth: the original called method
"""
def check(cls, *args, **kwargs):
if cls.authenticated:
return meth(cls, *args, **kwargs)
raise Error("Authentication required")
return check |
def delete_repo(self, repo_name=None, envs=[], query='/repositories/'):
"""
`repo_name` - Name of repository to delete
Delete repo in specified environments
"""
orphan_query = '/content/orphans/rpm/'
juicer.utils.Log.log_debug("Delete Repo: %s", repo_name)
for env in self.args.envs:
if not juicer.utils.repo_exists_p(repo_name, self.connectors[env], env):
juicer.utils.Log.log_info("repo `%s` doesn't exist in %s... skipping!",
(repo_name, env))
continue
else:
url = "%s%s-%s/" % (query, repo_name, env)
_r = self.connectors[env].delete(url)
if _r.status_code == Constants.PULP_DELETE_ACCEPTED:
juicer.utils.Log.log_info("deleted repo `%s` in %s",
(repo_name, env))
# if delete was successful, delete orphaned rpms
_r = self.connectors[env].get(orphan_query)
if _r.status_code is Constants.PULP_GET_OK:
if len(juicer.utils.load_json_str(_r.content)) > 0:
__r = self.connectors[env].delete(orphan_query)
if __r.status_code is Constants.PULP_DELETE_ACCEPTED:
juicer.utils.Log.log_debug("deleted orphaned rpms in %s." % env)
else:
juicer.utils.Log.log_error("unable to delete orphaned rpms in %s. a %s error was returned", (env, __r.status_code))
else:
juicer.utils.Log.log_error("unable to get a list of orphaned rpms. encountered a %s error." % _r.status_code)
else:
_r.raise_for_status()
return True | `repo_name` - Name of repository to delete
Delete repo in specified environments | Below is the the instruction that describes the task:
### Input:
`repo_name` - Name of repository to delete
Delete repo in specified environments
### Response:
def delete_repo(self, repo_name=None, envs=[], query='/repositories/'):
"""
`repo_name` - Name of repository to delete
Delete repo in specified environments
"""
orphan_query = '/content/orphans/rpm/'
juicer.utils.Log.log_debug("Delete Repo: %s", repo_name)
for env in self.args.envs:
if not juicer.utils.repo_exists_p(repo_name, self.connectors[env], env):
juicer.utils.Log.log_info("repo `%s` doesn't exist in %s... skipping!",
(repo_name, env))
continue
else:
url = "%s%s-%s/" % (query, repo_name, env)
_r = self.connectors[env].delete(url)
if _r.status_code == Constants.PULP_DELETE_ACCEPTED:
juicer.utils.Log.log_info("deleted repo `%s` in %s",
(repo_name, env))
# if delete was successful, delete orphaned rpms
_r = self.connectors[env].get(orphan_query)
if _r.status_code is Constants.PULP_GET_OK:
if len(juicer.utils.load_json_str(_r.content)) > 0:
__r = self.connectors[env].delete(orphan_query)
if __r.status_code is Constants.PULP_DELETE_ACCEPTED:
juicer.utils.Log.log_debug("deleted orphaned rpms in %s." % env)
else:
juicer.utils.Log.log_error("unable to delete orphaned rpms in %s. a %s error was returned", (env, __r.status_code))
else:
juicer.utils.Log.log_error("unable to get a list of orphaned rpms. encountered a %s error." % _r.status_code)
else:
_r.raise_for_status()
return True |
def Akers_Deans_Crosser(m, rhog, rhol, kl, mul, Cpl, D, x):
r'''Calculates heat transfer coefficient for condensation
of a pure chemical inside a vertical tube or tube bundle, as presented in
[2]_ according to [1]_.
.. math::
Nu = \frac{hD_i}{k_l} = C Re_e^n Pr_l^{1/3}
C = 0.0265, n=0.8 \text{ for } Re_e > 5\times10^4
C = 5.03, n=\frac{1}{3} \text{ for } Re_e < 5\times10^4
Re_e = \frac{D_i G_e}{\mu_l}
G_e = G\left[(1-x)+x(\rho_l/\rho_g)^{0.5}\right]
Parameters
----------
m : float
Mass flow rate [kg/s]
rhog : float
Density of the gas [kg/m^3]
rhol : float
Density of the liquid [kg/m^3]
kl : float
Thermal conductivity of liquid [W/m/K]
mul : float
Viscosity of liquid [Pa*s]
Cpl : float
Constant-pressure heat capacity of liquid [J/kg/K]
D : float
Diameter of the tubing [m]
x : float
Quality at the specific interval [-]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
Examples
--------
>>> Akers_Deans_Crosser(m=0.35, rhog=6.36, rhol=582.9, kl=0.098,
... mul=159E-6, Cpl=2520., D=0.03, x=0.85)
7117.24177265201
References
----------
.. [1] Akers, W. W., H. A. Deans, and O. K. Crosser. "Condensing Heat
Transfer Within Horizontal Tubes." Chem. Eng. Progr. Vol: 55, Symposium
Ser. No. 29 (January 1, 1959).
.. [2] Kakaç, Sadik, ed. Boilers, Evaporators, and Condensers. 1st.
Wiley-Interscience, 1991.
'''
G = m/(pi/4*D**2)
Ge = G*((1-x) + x*(rhol/rhog)**0.5)
Ree = D*Ge/mul
Prl = mul*Cpl/kl
if Ree > 5E4:
C, n = 0.0265, 0.8
else:
C, n = 5.03, 1/3.
Nu = C*Ree**n*Prl**(1/3.)
return Nu*kl/D | r'''Calculates heat transfer coefficient for condensation
of a pure chemical inside a vertical tube or tube bundle, as presented in
[2]_ according to [1]_.
.. math::
Nu = \frac{hD_i}{k_l} = C Re_e^n Pr_l^{1/3}
C = 0.0265, n=0.8 \text{ for } Re_e > 5\times10^4
C = 5.03, n=\frac{1}{3} \text{ for } Re_e < 5\times10^4
Re_e = \frac{D_i G_e}{\mu_l}
G_e = G\left[(1-x)+x(\rho_l/\rho_g)^{0.5}\right]
Parameters
----------
m : float
Mass flow rate [kg/s]
rhog : float
Density of the gas [kg/m^3]
rhol : float
Density of the liquid [kg/m^3]
kl : float
Thermal conductivity of liquid [W/m/K]
mul : float
Viscosity of liquid [Pa*s]
Cpl : float
Constant-pressure heat capacity of liquid [J/kg/K]
D : float
Diameter of the tubing [m]
x : float
Quality at the specific interval [-]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
Examples
--------
>>> Akers_Deans_Crosser(m=0.35, rhog=6.36, rhol=582.9, kl=0.098,
... mul=159E-6, Cpl=2520., D=0.03, x=0.85)
7117.24177265201
References
----------
.. [1] Akers, W. W., H. A. Deans, and O. K. Crosser. "Condensing Heat
Transfer Within Horizontal Tubes." Chem. Eng. Progr. Vol: 55, Symposium
Ser. No. 29 (January 1, 1959).
.. [2] Kakaç, Sadik, ed. Boilers, Evaporators, and Condensers. 1st.
Wiley-Interscience, 1991. | Below is the the instruction that describes the task:
### Input:
r'''Calculates heat transfer coefficient for condensation
of a pure chemical inside a vertical tube or tube bundle, as presented in
[2]_ according to [1]_.
.. math::
Nu = \frac{hD_i}{k_l} = C Re_e^n Pr_l^{1/3}
C = 0.0265, n=0.8 \text{ for } Re_e > 5\times10^4
C = 5.03, n=\frac{1}{3} \text{ for } Re_e < 5\times10^4
Re_e = \frac{D_i G_e}{\mu_l}
G_e = G\left[(1-x)+x(\rho_l/\rho_g)^{0.5}\right]
Parameters
----------
m : float
Mass flow rate [kg/s]
rhog : float
Density of the gas [kg/m^3]
rhol : float
Density of the liquid [kg/m^3]
kl : float
Thermal conductivity of liquid [W/m/K]
mul : float
Viscosity of liquid [Pa*s]
Cpl : float
Constant-pressure heat capacity of liquid [J/kg/K]
D : float
Diameter of the tubing [m]
x : float
Quality at the specific interval [-]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
Examples
--------
>>> Akers_Deans_Crosser(m=0.35, rhog=6.36, rhol=582.9, kl=0.098,
... mul=159E-6, Cpl=2520., D=0.03, x=0.85)
7117.24177265201
References
----------
.. [1] Akers, W. W., H. A. Deans, and O. K. Crosser. "Condensing Heat
Transfer Within Horizontal Tubes." Chem. Eng. Progr. Vol: 55, Symposium
Ser. No. 29 (January 1, 1959).
.. [2] Kakaç, Sadik, ed. Boilers, Evaporators, and Condensers. 1st.
Wiley-Interscience, 1991.
### Response:
def Akers_Deans_Crosser(m, rhog, rhol, kl, mul, Cpl, D, x):
r'''Calculates heat transfer coefficient for condensation
of a pure chemical inside a vertical tube or tube bundle, as presented in
[2]_ according to [1]_.
.. math::
Nu = \frac{hD_i}{k_l} = C Re_e^n Pr_l^{1/3}
C = 0.0265, n=0.8 \text{ for } Re_e > 5\times10^4
C = 5.03, n=\frac{1}{3} \text{ for } Re_e < 5\times10^4
Re_e = \frac{D_i G_e}{\mu_l}
G_e = G\left[(1-x)+x(\rho_l/\rho_g)^{0.5}\right]
Parameters
----------
m : float
Mass flow rate [kg/s]
rhog : float
Density of the gas [kg/m^3]
rhol : float
Density of the liquid [kg/m^3]
kl : float
Thermal conductivity of liquid [W/m/K]
mul : float
Viscosity of liquid [Pa*s]
Cpl : float
Constant-pressure heat capacity of liquid [J/kg/K]
D : float
Diameter of the tubing [m]
x : float
Quality at the specific interval [-]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
Examples
--------
>>> Akers_Deans_Crosser(m=0.35, rhog=6.36, rhol=582.9, kl=0.098,
... mul=159E-6, Cpl=2520., D=0.03, x=0.85)
7117.24177265201
References
----------
.. [1] Akers, W. W., H. A. Deans, and O. K. Crosser. "Condensing Heat
Transfer Within Horizontal Tubes." Chem. Eng. Progr. Vol: 55, Symposium
Ser. No. 29 (January 1, 1959).
.. [2] Kakaç, Sadik, ed. Boilers, Evaporators, and Condensers. 1st.
Wiley-Interscience, 1991.
'''
G = m/(pi/4*D**2)
Ge = G*((1-x) + x*(rhol/rhog)**0.5)
Ree = D*Ge/mul
Prl = mul*Cpl/kl
if Ree > 5E4:
C, n = 0.0265, 0.8
else:
C, n = 5.03, 1/3.
Nu = C*Ree**n*Prl**(1/3.)
return Nu*kl/D |
def add_subsegment(self, subsegment):
"""
Add input subsegment as a child subsegment and increment
reference counter and total subsegments counter of the
parent segment.
"""
super(Subsegment, self).add_subsegment(subsegment)
self.parent_segment.increment() | Add input subsegment as a child subsegment and increment
reference counter and total subsegments counter of the
parent segment. | Below is the the instruction that describes the task:
### Input:
Add input subsegment as a child subsegment and increment
reference counter and total subsegments counter of the
parent segment.
### Response:
def add_subsegment(self, subsegment):
"""
Add input subsegment as a child subsegment and increment
reference counter and total subsegments counter of the
parent segment.
"""
super(Subsegment, self).add_subsegment(subsegment)
self.parent_segment.increment() |
def next(self):
"""
Returns the next data value.
:return: (float|int) the next data value
"""
out = self.peek()[self._headers.index(self._field)]
self._cursor += 1
if out is not None:
self._lastValue = out
return out | Returns the next data value.
:return: (float|int) the next data value | Below is the the instruction that describes the task:
### Input:
Returns the next data value.
:return: (float|int) the next data value
### Response:
def next(self):
"""
Returns the next data value.
:return: (float|int) the next data value
"""
out = self.peek()[self._headers.index(self._field)]
self._cursor += 1
if out is not None:
self._lastValue = out
return out |
def _encode_multipart_formdata(fields, files):
"""
Create a multipart encoded form for use in PUTing and POSTing.
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------A_vEry_UnlikelY_bouNdary_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append(str('Content-Disposition: form-data; name="%s"' % key))
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append(str('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)))
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body | Create a multipart encoded form for use in PUTing and POSTing.
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance | Below is the the instruction that describes the task:
### Input:
Create a multipart encoded form for use in PUTing and POSTing.
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
### Response:
def _encode_multipart_formdata(fields, files):
"""
Create a multipart encoded form for use in PUTing and POSTing.
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------A_vEry_UnlikelY_bouNdary_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append(str('Content-Disposition: form-data; name="%s"' % key))
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append(str('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)))
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body |
def run_migrations_online(config):
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine and associate a
connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
alembic.context.configure(connection=connection)
with alembic.context.begin_transaction():
alembic.context.run_migrations() | Run migrations in 'online' mode.
In this scenario we need to create an Engine and associate a
connection with the context. | Below is the the instruction that describes the task:
### Input:
Run migrations in 'online' mode.
In this scenario we need to create an Engine and associate a
connection with the context.
### Response:
def run_migrations_online(config):
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine and associate a
connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
alembic.context.configure(connection=connection)
with alembic.context.begin_transaction():
alembic.context.run_migrations() |
def walk(self):
""" Walk the directory like os.path
(yields a 3-tuple (dirpath, dirnames, filenames)
except it exclude all files/directories on the fly. """
for root, dirs, files in os.walk(self.path, topdown=True):
# TODO relative walk, recursive call if root excluder found???
#root_excluder = get_root_excluder(root)
ndirs = []
# First we exclude directories
for d in list(dirs):
if self.is_excluded(os.path.join(root, d)):
dirs.remove(d)
elif not os.path.islink(os.path.join(root, d)):
ndirs.append(d)
nfiles = []
for fpath in (os.path.join(root, f) for f in files):
if not self.is_excluded(fpath) and not os.path.islink(fpath):
nfiles.append(os.path.relpath(fpath, root))
yield root, ndirs, nfiles | Walk the directory like os.path
(yields a 3-tuple (dirpath, dirnames, filenames)
except it exclude all files/directories on the fly. | Below is the the instruction that describes the task:
### Input:
Walk the directory like os.path
(yields a 3-tuple (dirpath, dirnames, filenames)
except it exclude all files/directories on the fly.
### Response:
def walk(self):
""" Walk the directory like os.path
(yields a 3-tuple (dirpath, dirnames, filenames)
except it exclude all files/directories on the fly. """
for root, dirs, files in os.walk(self.path, topdown=True):
# TODO relative walk, recursive call if root excluder found???
#root_excluder = get_root_excluder(root)
ndirs = []
# First we exclude directories
for d in list(dirs):
if self.is_excluded(os.path.join(root, d)):
dirs.remove(d)
elif not os.path.islink(os.path.join(root, d)):
ndirs.append(d)
nfiles = []
for fpath in (os.path.join(root, f) for f in files):
if not self.is_excluded(fpath) and not os.path.islink(fpath):
nfiles.append(os.path.relpath(fpath, root))
yield root, ndirs, nfiles |
def disconnect(self):
"""Disconnect from the server"""
logger.info(u'Disconnecting')
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
self.state = DISCONNECTED | Disconnect from the server | Below is the the instruction that describes the task:
### Input:
Disconnect from the server
### Response:
def disconnect(self):
"""Disconnect from the server"""
logger.info(u'Disconnecting')
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
self.state = DISCONNECTED |
def _truncnorm_sf(truncation_level, values):
"""
Survival function for truncated normal distribution.
Assumes zero mean, standard deviation equal to one and symmetric
truncation.
:param truncation_level:
Positive float number representing the truncation on both sides
around the mean, in units of sigma.
:param values:
Numpy array of values as input to a survival function for the given
distribution.
:returns:
Numpy array of survival function results in a range between 0 and 1.
>>> from scipy.stats import truncnorm
>>> truncnorm(-3, 3).sf(0.12345) == _truncnorm_sf(3, 0.12345)
True
"""
# notation from http://en.wikipedia.org/wiki/Truncated_normal_distribution.
# given that mu = 0 and sigma = 1, we have alpha = a and beta = b.
# "CDF" in comments refers to cumulative distribution function
# of non-truncated distribution with that mu and sigma values.
# assume symmetric truncation, that is ``a = - truncation_level``
# and ``b = + truncation_level``.
# calculate CDF of b
phi_b = ndtr(truncation_level)
# calculate Z as ``Z = CDF(b) - CDF(a)``, here we assume that
# ``CDF(a) == CDF(- truncation_level) == 1 - CDF(b)``
z = phi_b * 2 - 1
# calculate the result of survival function of ``values``,
# and restrict it to the interval where probability is defined --
# 0..1. here we use some transformations of the original formula
# that is ``SF(x) = 1 - (CDF(x) - CDF(a)) / Z`` in order to minimize
# number of arithmetic operations and function calls:
# ``SF(x) = (Z - CDF(x) + CDF(a)) / Z``,
# ``SF(x) = (CDF(b) - CDF(a) - CDF(x) + CDF(a)) / Z``,
# ``SF(x) = (CDF(b) - CDF(x)) / Z``.
return ((phi_b - ndtr(values)) / z).clip(0.0, 1.0) | Survival function for truncated normal distribution.
Assumes zero mean, standard deviation equal to one and symmetric
truncation.
:param truncation_level:
Positive float number representing the truncation on both sides
around the mean, in units of sigma.
:param values:
Numpy array of values as input to a survival function for the given
distribution.
:returns:
Numpy array of survival function results in a range between 0 and 1.
>>> from scipy.stats import truncnorm
>>> truncnorm(-3, 3).sf(0.12345) == _truncnorm_sf(3, 0.12345)
True | Below is the the instruction that describes the task:
### Input:
Survival function for truncated normal distribution.
Assumes zero mean, standard deviation equal to one and symmetric
truncation.
:param truncation_level:
Positive float number representing the truncation on both sides
around the mean, in units of sigma.
:param values:
Numpy array of values as input to a survival function for the given
distribution.
:returns:
Numpy array of survival function results in a range between 0 and 1.
>>> from scipy.stats import truncnorm
>>> truncnorm(-3, 3).sf(0.12345) == _truncnorm_sf(3, 0.12345)
True
### Response:
def _truncnorm_sf(truncation_level, values):
"""
Survival function for truncated normal distribution.
Assumes zero mean, standard deviation equal to one and symmetric
truncation.
:param truncation_level:
Positive float number representing the truncation on both sides
around the mean, in units of sigma.
:param values:
Numpy array of values as input to a survival function for the given
distribution.
:returns:
Numpy array of survival function results in a range between 0 and 1.
>>> from scipy.stats import truncnorm
>>> truncnorm(-3, 3).sf(0.12345) == _truncnorm_sf(3, 0.12345)
True
"""
# notation from http://en.wikipedia.org/wiki/Truncated_normal_distribution.
# given that mu = 0 and sigma = 1, we have alpha = a and beta = b.
# "CDF" in comments refers to cumulative distribution function
# of non-truncated distribution with that mu and sigma values.
# assume symmetric truncation, that is ``a = - truncation_level``
# and ``b = + truncation_level``.
# calculate CDF of b
phi_b = ndtr(truncation_level)
# calculate Z as ``Z = CDF(b) - CDF(a)``, here we assume that
# ``CDF(a) == CDF(- truncation_level) == 1 - CDF(b)``
z = phi_b * 2 - 1
# calculate the result of survival function of ``values``,
# and restrict it to the interval where probability is defined --
# 0..1. here we use some transformations of the original formula
# that is ``SF(x) = 1 - (CDF(x) - CDF(a)) / Z`` in order to minimize
# number of arithmetic operations and function calls:
# ``SF(x) = (Z - CDF(x) + CDF(a)) / Z``,
# ``SF(x) = (CDF(b) - CDF(a) - CDF(x) + CDF(a)) / Z``,
# ``SF(x) = (CDF(b) - CDF(x)) / Z``.
return ((phi_b - ndtr(values)) / z).clip(0.0, 1.0) |
def _read_value(self, file):
"""Read a single value from the given file"""
if self.data_type.nptype is not None:
dtype = (np.dtype(self.data_type.nptype).newbyteorder(
self.endianness))
return fromfile(file, dtype=dtype, count=1)
return self.data_type.read(file, self.endianness) | Read a single value from the given file | Below is the the instruction that describes the task:
### Input:
Read a single value from the given file
### Response:
def _read_value(self, file):
"""Read a single value from the given file"""
if self.data_type.nptype is not None:
dtype = (np.dtype(self.data_type.nptype).newbyteorder(
self.endianness))
return fromfile(file, dtype=dtype, count=1)
return self.data_type.read(file, self.endianness) |
def _read_cache_from_file(self):
"""Read the contents of the cache from a file on disk."""
cache = {}
try:
with(open(self._cache_file_name, 'r')) as fp:
contents = fp.read()
cache = simplejson.loads(contents)
except (IOError, JSONDecodeError):
# The file could not be read. This is not a problem if the file does not exist.
pass
return cache | Read the contents of the cache from a file on disk. | Below is the the instruction that describes the task:
### Input:
Read the contents of the cache from a file on disk.
### Response:
def _read_cache_from_file(self):
"""Read the contents of the cache from a file on disk."""
cache = {}
try:
with(open(self._cache_file_name, 'r')) as fp:
contents = fp.read()
cache = simplejson.loads(contents)
except (IOError, JSONDecodeError):
# The file could not be read. This is not a problem if the file does not exist.
pass
return cache |
def get_diagonalisation(frequencies, rate_matrix=None):
"""
Normalises and diagonalises the rate matrix.
:param frequencies: character state frequencies.
:type frequencies: numpy.array
:param rate_matrix: (optional) rate matrix (by default an all-equal-rate matrix is used)
:type rate_matrix: numpy.ndarray
:return: matrix diagonalisation (d, A, A^{-1})
such that A.dot(np.diag(d))).dot(A^{-1}) = 1/mu Q (normalised generator)
:rtype: tuple
"""
Q = get_normalised_generator(frequencies, rate_matrix)
d, A = np.linalg.eig(Q)
return d, A, np.linalg.inv(A) | Normalises and diagonalises the rate matrix.
:param frequencies: character state frequencies.
:type frequencies: numpy.array
:param rate_matrix: (optional) rate matrix (by default an all-equal-rate matrix is used)
:type rate_matrix: numpy.ndarray
:return: matrix diagonalisation (d, A, A^{-1})
such that A.dot(np.diag(d))).dot(A^{-1}) = 1/mu Q (normalised generator)
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Normalises and diagonalises the rate matrix.
:param frequencies: character state frequencies.
:type frequencies: numpy.array
:param rate_matrix: (optional) rate matrix (by default an all-equal-rate matrix is used)
:type rate_matrix: numpy.ndarray
:return: matrix diagonalisation (d, A, A^{-1})
such that A.dot(np.diag(d))).dot(A^{-1}) = 1/mu Q (normalised generator)
:rtype: tuple
### Response:
def get_diagonalisation(frequencies, rate_matrix=None):
"""
Normalises and diagonalises the rate matrix.
:param frequencies: character state frequencies.
:type frequencies: numpy.array
:param rate_matrix: (optional) rate matrix (by default an all-equal-rate matrix is used)
:type rate_matrix: numpy.ndarray
:return: matrix diagonalisation (d, A, A^{-1})
such that A.dot(np.diag(d))).dot(A^{-1}) = 1/mu Q (normalised generator)
:rtype: tuple
"""
Q = get_normalised_generator(frequencies, rate_matrix)
d, A = np.linalg.eig(Q)
return d, A, np.linalg.inv(A) |
def _delete_vdev_info(self, vdev):
"""handle vdev related info."""
vdev = vdev.lower()
network_config_file_name = self._get_network_file()
device = self._get_device_name(vdev)
cmd = '\n'.join(("num=$(sed -n '/auto %s/=' %s)" % (device,
network_config_file_name),
"dns=$(awk 'NR==(\"\'$num\'\"+6)&&"
"/dns-nameservers/' %s)" %
network_config_file_name,
"if [[ -n $dns ]]; then",
" sed -i '/auto %s/,+6d' %s" % (device,
network_config_file_name),
"else",
" sed -i '/auto %s/,+5d' %s" % (device,
network_config_file_name),
"fi"))
return cmd | handle vdev related info. | Below is the the instruction that describes the task:
### Input:
handle vdev related info.
### Response:
def _delete_vdev_info(self, vdev):
"""handle vdev related info."""
vdev = vdev.lower()
network_config_file_name = self._get_network_file()
device = self._get_device_name(vdev)
cmd = '\n'.join(("num=$(sed -n '/auto %s/=' %s)" % (device,
network_config_file_name),
"dns=$(awk 'NR==(\"\'$num\'\"+6)&&"
"/dns-nameservers/' %s)" %
network_config_file_name,
"if [[ -n $dns ]]; then",
" sed -i '/auto %s/,+6d' %s" % (device,
network_config_file_name),
"else",
" sed -i '/auto %s/,+5d' %s" % (device,
network_config_file_name),
"fi"))
return cmd |
def calculate_wave(i, lst_bin, wpm=WPM, frequency=FREQUENCY, framerate=FRAMERATE, amplitude=AMPLITUDE, seconds_per_dot=SECONDS_PER_DOT):
"""
Returns product of a sin wave and morse code (dit, dah, silent)
"""
bit = morse_bin(i=i, lst_bin=lst_bin, wpm=wpm, framerate=framerate,
default_value=0.0, seconds_per_dot=seconds_per_dot)
sine = sine_wave(i=i, frequency=frequency, framerate=framerate, amplitude=amplitude)
return bit * sine | Returns product of a sin wave and morse code (dit, dah, silent) | Below is the the instruction that describes the task:
### Input:
Returns product of a sin wave and morse code (dit, dah, silent)
### Response:
def calculate_wave(i, lst_bin, wpm=WPM, frequency=FREQUENCY, framerate=FRAMERATE, amplitude=AMPLITUDE, seconds_per_dot=SECONDS_PER_DOT):
"""
Returns product of a sin wave and morse code (dit, dah, silent)
"""
bit = morse_bin(i=i, lst_bin=lst_bin, wpm=wpm, framerate=framerate,
default_value=0.0, seconds_per_dot=seconds_per_dot)
sine = sine_wave(i=i, frequency=frequency, framerate=framerate, amplitude=amplitude)
return bit * sine |
def to_json(self):
""" Writes the complete Morse complex merge hierarchy to a
string object.
@ Out, a string object storing the entire merge hierarchy of
all maxima.
"""
capsule = {}
capsule["Hierarchy"] = []
for (
dying,
(persistence, surviving, saddle),
) in self.merge_sequence.items():
capsule["Hierarchy"].append(
{
"Persistence": persistence,
"Dying": dying,
"Surviving": surviving,
"Saddle": saddle,
}
)
capsule["Partitions"] = []
base = np.array([None] * len(self.Y))
for label, items in self.base_partitions.items():
base[items] = label
capsule["Partitions"] = base.tolist()
return json.dumps(capsule, separators=(",", ":")) | Writes the complete Morse complex merge hierarchy to a
string object.
@ Out, a string object storing the entire merge hierarchy of
all maxima. | Below is the the instruction that describes the task:
### Input:
Writes the complete Morse complex merge hierarchy to a
string object.
@ Out, a string object storing the entire merge hierarchy of
all maxima.
### Response:
def to_json(self):
""" Writes the complete Morse complex merge hierarchy to a
string object.
@ Out, a string object storing the entire merge hierarchy of
all maxima.
"""
capsule = {}
capsule["Hierarchy"] = []
for (
dying,
(persistence, surviving, saddle),
) in self.merge_sequence.items():
capsule["Hierarchy"].append(
{
"Persistence": persistence,
"Dying": dying,
"Surviving": surviving,
"Saddle": saddle,
}
)
capsule["Partitions"] = []
base = np.array([None] * len(self.Y))
for label, items in self.base_partitions.items():
base[items] = label
capsule["Partitions"] = base.tolist()
return json.dumps(capsule, separators=(",", ":")) |
def check_arguments(args, parser):
"""Check arguments passed by user that are not checked by argparse itself."""
if args.asm_block not in ['auto', 'manual']:
try:
args.asm_block = int(args.asm_block)
except ValueError:
parser.error('--asm-block can only be "auto", "manual" or an integer')
# Set default unit depending on performance model requested
if not args.unit:
if 'Roofline' in args.pmodel or 'RooflineIACA' in args.pmodel:
args.unit = 'FLOP/s'
else:
args.unit = 'cy/CL' | Check arguments passed by user that are not checked by argparse itself. | Below is the the instruction that describes the task:
### Input:
Check arguments passed by user that are not checked by argparse itself.
### Response:
def check_arguments(args, parser):
"""Check arguments passed by user that are not checked by argparse itself."""
if args.asm_block not in ['auto', 'manual']:
try:
args.asm_block = int(args.asm_block)
except ValueError:
parser.error('--asm-block can only be "auto", "manual" or an integer')
# Set default unit depending on performance model requested
if not args.unit:
if 'Roofline' in args.pmodel or 'RooflineIACA' in args.pmodel:
args.unit = 'FLOP/s'
else:
args.unit = 'cy/CL' |
def nmb_neurons(self) -> Tuple[int, ...]:
"""Number of neurons of the hidden layers.
>>> from hydpy import ANN
>>> ann = ANN(None)
>>> ann(nmb_inputs=2, nmb_neurons=(2, 1), nmb_outputs=3)
>>> ann.nmb_neurons
(2, 1)
>>> ann.nmb_neurons = (3,)
>>> ann.nmb_neurons
(3,)
>>> del ann.nmb_neurons
>>> ann.nmb_neurons
Traceback (most recent call last):
...
hydpy.core.exceptiontools.AttributeNotReady: Attribute `nmb_neurons` \
of object `ann` has not been prepared so far.
"""
return tuple(numpy.asarray(self._cann.nmb_neurons)) | Number of neurons of the hidden layers.
>>> from hydpy import ANN
>>> ann = ANN(None)
>>> ann(nmb_inputs=2, nmb_neurons=(2, 1), nmb_outputs=3)
>>> ann.nmb_neurons
(2, 1)
>>> ann.nmb_neurons = (3,)
>>> ann.nmb_neurons
(3,)
>>> del ann.nmb_neurons
>>> ann.nmb_neurons
Traceback (most recent call last):
...
hydpy.core.exceptiontools.AttributeNotReady: Attribute `nmb_neurons` \
of object `ann` has not been prepared so far. | Below is the the instruction that describes the task:
### Input:
Number of neurons of the hidden layers.
>>> from hydpy import ANN
>>> ann = ANN(None)
>>> ann(nmb_inputs=2, nmb_neurons=(2, 1), nmb_outputs=3)
>>> ann.nmb_neurons
(2, 1)
>>> ann.nmb_neurons = (3,)
>>> ann.nmb_neurons
(3,)
>>> del ann.nmb_neurons
>>> ann.nmb_neurons
Traceback (most recent call last):
...
hydpy.core.exceptiontools.AttributeNotReady: Attribute `nmb_neurons` \
of object `ann` has not been prepared so far.
### Response:
def nmb_neurons(self) -> Tuple[int, ...]:
"""Number of neurons of the hidden layers.
>>> from hydpy import ANN
>>> ann = ANN(None)
>>> ann(nmb_inputs=2, nmb_neurons=(2, 1), nmb_outputs=3)
>>> ann.nmb_neurons
(2, 1)
>>> ann.nmb_neurons = (3,)
>>> ann.nmb_neurons
(3,)
>>> del ann.nmb_neurons
>>> ann.nmb_neurons
Traceback (most recent call last):
...
hydpy.core.exceptiontools.AttributeNotReady: Attribute `nmb_neurons` \
of object `ann` has not been prepared so far.
"""
return tuple(numpy.asarray(self._cann.nmb_neurons)) |
def unregister_service(self, info):
"""Unregister a service."""
try:
del(self.services[info.name.lower()])
except:
pass
now = current_time_millis()
next_time = now
i = 0
while i < 3:
if now < next_time:
self.wait(next_time - now)
now = current_time_millis()
continue
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
out.add_answer_at_time(
DNSPointer(info.type,
_TYPE_PTR, _CLASS_IN, 0, info.name), 0)
out.add_answer_at_time(
DNSService(info.name,
_TYPE_SRV, _CLASS_IN, 0, info.priority,
info.weight, info.port, info.name), 0)
out.add_answer_at_time(
DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
for k in info.address:
out.add_answer_at_time(
DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, k), 0)
self.send(out)
i += 1
next_time += _UNREGISTER_TIME | Unregister a service. | Below is the the instruction that describes the task:
### Input:
Unregister a service.
### Response:
def unregister_service(self, info):
"""Unregister a service."""
try:
del(self.services[info.name.lower()])
except:
pass
now = current_time_millis()
next_time = now
i = 0
while i < 3:
if now < next_time:
self.wait(next_time - now)
now = current_time_millis()
continue
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
out.add_answer_at_time(
DNSPointer(info.type,
_TYPE_PTR, _CLASS_IN, 0, info.name), 0)
out.add_answer_at_time(
DNSService(info.name,
_TYPE_SRV, _CLASS_IN, 0, info.priority,
info.weight, info.port, info.name), 0)
out.add_answer_at_time(
DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
for k in info.address:
out.add_answer_at_time(
DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, k), 0)
self.send(out)
i += 1
next_time += _UNREGISTER_TIME |
def exists(self, value=None):
"""
Return True if the given pk value exists for the given class.
If no value is given, we use the value of the current field, which
is the value of the "_pk" attribute of its instance.
"""
try:
if not value:
value = self.get()
except (AttributeError, DoesNotExist):
# If the instance is deleted, the _pk attribute doesn't exist
# anymore. So we catch the AttributeError to return False (this pk
# field doesn't exist anymore) in this specific case
return False
else:
return self.connection.sismember(self.collection_key, value) | Return True if the given pk value exists for the given class.
If no value is given, we use the value of the current field, which
is the value of the "_pk" attribute of its instance. | Below is the the instruction that describes the task:
### Input:
Return True if the given pk value exists for the given class.
If no value is given, we use the value of the current field, which
is the value of the "_pk" attribute of its instance.
### Response:
def exists(self, value=None):
"""
Return True if the given pk value exists for the given class.
If no value is given, we use the value of the current field, which
is the value of the "_pk" attribute of its instance.
"""
try:
if not value:
value = self.get()
except (AttributeError, DoesNotExist):
# If the instance is deleted, the _pk attribute doesn't exist
# anymore. So we catch the AttributeError to return False (this pk
# field doesn't exist anymore) in this specific case
return False
else:
return self.connection.sismember(self.collection_key, value) |
def get(method, hmc, uri, uri_parms, logon_required):
"""Operation: List Storage Groups (always global but with filters)."""
query_str = uri_parms[0]
filter_args = parse_query_parms(method, uri, query_str)
result_storage_groups = []
for sg in hmc.consoles.console.storage_groups.list(filter_args):
result_sg = {}
for prop in sg.properties:
if prop in ('object-uri', 'cpc-uri', 'name', 'status',
'fulfillment-state', 'type'):
result_sg[prop] = sg.properties[prop]
result_storage_groups.append(result_sg)
return {'storage-groups': result_storage_groups} | Operation: List Storage Groups (always global but with filters). | Below is the the instruction that describes the task:
### Input:
Operation: List Storage Groups (always global but with filters).
### Response:
def get(method, hmc, uri, uri_parms, logon_required):
"""Operation: List Storage Groups (always global but with filters)."""
query_str = uri_parms[0]
filter_args = parse_query_parms(method, uri, query_str)
result_storage_groups = []
for sg in hmc.consoles.console.storage_groups.list(filter_args):
result_sg = {}
for prop in sg.properties:
if prop in ('object-uri', 'cpc-uri', 'name', 'status',
'fulfillment-state', 'type'):
result_sg[prop] = sg.properties[prop]
result_storage_groups.append(result_sg)
return {'storage-groups': result_storage_groups} |
def conv_defs(self):
""" Reads through the JSON object and converts them to Dataset """
log.setLevel(self.log_level)
start = datetime.datetime.now()
log.debug(" Converting to a Dataset: %s Triples", len(self.results))
self.defs = RdfDataset(self.results,
def_load=True,
bnode_only=True)
# self.cfg.__setattr__('rdf_prop_defs', self.defs, True)
log.debug(" conv complete in: %s" % (datetime.datetime.now() - start)) | Reads through the JSON object and converts them to Dataset | Below is the the instruction that describes the task:
### Input:
Reads through the JSON object and converts them to Dataset
### Response:
def conv_defs(self):
""" Reads through the JSON object and converts them to Dataset """
log.setLevel(self.log_level)
start = datetime.datetime.now()
log.debug(" Converting to a Dataset: %s Triples", len(self.results))
self.defs = RdfDataset(self.results,
def_load=True,
bnode_only=True)
# self.cfg.__setattr__('rdf_prop_defs', self.defs, True)
log.debug(" conv complete in: %s" % (datetime.datetime.now() - start)) |
def color(self, color=None):
"""Returns or sets (if a value is provided) the series' colour.
:param str color: If given, the series' colour will be set to this.
:rtype: ``str``"""
if color is None:
return self._color
else:
if not isinstance(color, str):
raise TypeError("color must be str, not '%s'" % str(color))
self._color = color | Returns or sets (if a value is provided) the series' colour.
:param str color: If given, the series' colour will be set to this.
:rtype: ``str`` | Below is the the instruction that describes the task:
### Input:
Returns or sets (if a value is provided) the series' colour.
:param str color: If given, the series' colour will be set to this.
:rtype: ``str``
### Response:
def color(self, color=None):
"""Returns or sets (if a value is provided) the series' colour.
:param str color: If given, the series' colour will be set to this.
:rtype: ``str``"""
if color is None:
return self._color
else:
if not isinstance(color, str):
raise TypeError("color must be str, not '%s'" % str(color))
self._color = color |
def enforce_type(self, attr, val):
"""converts a value to the attribute's type"""
if not attr in self.types:
return utfstr(val)
elif self.types[attr] == 'int':
return int(float(val))
elif self.types[attr] == 'float':
return float(val)
else:
return utfstr(val) | converts a value to the attribute's type | Below is the the instruction that describes the task:
### Input:
converts a value to the attribute's type
### Response:
def enforce_type(self, attr, val):
"""converts a value to the attribute's type"""
if not attr in self.types:
return utfstr(val)
elif self.types[attr] == 'int':
return int(float(val))
elif self.types[attr] == 'float':
return float(val)
else:
return utfstr(val) |
def dump_collection(cfg, f, indent=0):
'''Save a collection of attributes'''
for i, value in enumerate(cfg):
dump_value(None, value, f, indent)
if i < len(cfg) - 1:
f.write(u',\n') | Save a collection of attributes | Below is the the instruction that describes the task:
### Input:
Save a collection of attributes
### Response:
def dump_collection(cfg, f, indent=0):
'''Save a collection of attributes'''
for i, value in enumerate(cfg):
dump_value(None, value, f, indent)
if i < len(cfg) - 1:
f.write(u',\n') |
def feed(self, weights, data):
"""
Evaluate the network with alternative weights on the input data and
return the output activation.
"""
assert len(data) == self.layers[0].size
self.layers[0].apply(data)
# Propagate trough the remaining layers.
connections = zip(self.layers[:-1], weights, self.layers[1:])
for previous, weight, current in connections:
incoming = self.forward(weight, previous.outgoing)
current.apply(incoming)
# Return the activations of the output layer.
return self.layers[-1].outgoing | Evaluate the network with alternative weights on the input data and
return the output activation. | Below is the the instruction that describes the task:
### Input:
Evaluate the network with alternative weights on the input data and
return the output activation.
### Response:
def feed(self, weights, data):
"""
Evaluate the network with alternative weights on the input data and
return the output activation.
"""
assert len(data) == self.layers[0].size
self.layers[0].apply(data)
# Propagate trough the remaining layers.
connections = zip(self.layers[:-1], weights, self.layers[1:])
for previous, weight, current in connections:
incoming = self.forward(weight, previous.outgoing)
current.apply(incoming)
# Return the activations of the output layer.
return self.layers[-1].outgoing |
def load_image(file) -> DataAndMetadata.DataAndMetadata:
"""
Loads the image from the file-like object or string file.
If file is a string, the file is opened and then read.
Returns a numpy ndarray of our best guess for the most important image
in the file.
"""
if isinstance(file, str) or isinstance(file, str):
with open(file, "rb") as f:
return load_image(f)
dmtag = parse_dm3.parse_dm_header(file)
dmtag = fix_strings(dmtag)
# display_keys(dmtag)
img_index = -1
image_tags = dmtag['ImageList'][img_index]
data = imagedatadict_to_ndarray(image_tags['ImageData'])
calibrations = []
calibration_tags = image_tags['ImageData'].get('Calibrations', dict())
for dimension in calibration_tags.get('Dimension', list()):
origin, scale, units = dimension.get('Origin', 0.0), dimension.get('Scale', 1.0), dimension.get('Units', str())
calibrations.append((-origin * scale, scale, units))
calibrations = tuple(reversed(calibrations))
if len(data.shape) == 3 and data.dtype != numpy.uint8:
if image_tags['ImageTags'].get('Meta Data', dict()).get("Format", str()).lower() in ("spectrum", "spectrum image"):
if data.shape[1] == 1:
data = numpy.squeeze(data, 1)
data = numpy.moveaxis(data, 0, 1)
data_descriptor = DataAndMetadata.DataDescriptor(False, 1, 1)
calibrations = (calibrations[2], calibrations[0])
else:
data = numpy.moveaxis(data, 0, 2)
data_descriptor = DataAndMetadata.DataDescriptor(False, 2, 1)
calibrations = tuple(calibrations[1:]) + (calibrations[0],)
else:
data_descriptor = DataAndMetadata.DataDescriptor(False, 1, 2)
elif len(data.shape) == 4 and data.dtype != numpy.uint8:
# data = numpy.moveaxis(data, 0, 2)
data_descriptor = DataAndMetadata.DataDescriptor(False, 2, 2)
elif data.dtype == numpy.uint8:
data_descriptor = DataAndMetadata.DataDescriptor(False, 0, len(data.shape[:-1]))
else:
data_descriptor = DataAndMetadata.DataDescriptor(False, 0, len(data.shape))
brightness = calibration_tags.get('Brightness', dict())
origin, scale, units = brightness.get('Origin', 0.0), brightness.get('Scale', 1.0), brightness.get('Units', str())
intensity = -origin * scale, scale, units
timestamp = None
timezone = None
timezone_offset = None
title = image_tags.get('Name')
properties = dict()
if 'ImageTags' in image_tags:
voltage = image_tags['ImageTags'].get('ImageScanned', dict()).get('EHT', dict())
if voltage:
properties.setdefault("hardware_source", dict())["autostem"] = { "high_tension_v": float(voltage) }
dm_metadata_signal = image_tags['ImageTags'].get('Meta Data', dict()).get('Signal')
if dm_metadata_signal and dm_metadata_signal.lower() == "eels":
properties.setdefault("hardware_source", dict())["signal_type"] = dm_metadata_signal
if image_tags['ImageTags'].get('Meta Data', dict()).get("Format", str()).lower() in ("spectrum", "spectrum image"):
data_descriptor.collection_dimension_count += data_descriptor.datum_dimension_count - 1
data_descriptor.datum_dimension_count = 1
if image_tags['ImageTags'].get('Meta Data', dict()).get("IsSequence", False) and data_descriptor.collection_dimension_count > 0:
data_descriptor.is_sequence = True
data_descriptor.collection_dimension_count -= 1
timestamp_str = image_tags['ImageTags'].get("Timestamp")
if timestamp_str:
timestamp = get_datetime_from_timestamp_str(timestamp_str)
timezone = image_tags['ImageTags'].get("Timezone")
timezone_offset = image_tags['ImageTags'].get("TimezoneOffset")
# to avoid having duplicate copies in Swift, get rid of these tags
image_tags['ImageTags'].pop("Timestamp", None)
image_tags['ImageTags'].pop("Timezone", None)
image_tags['ImageTags'].pop("TimezoneOffset", None)
# put the image tags into properties
properties.update(image_tags['ImageTags'])
dimensional_calibrations = [Calibration.Calibration(c[0], c[1], c[2]) for c in calibrations]
while len(dimensional_calibrations) < data_descriptor.expected_dimension_count:
dimensional_calibrations.append(Calibration.Calibration())
intensity_calibration = Calibration.Calibration(intensity[0], intensity[1], intensity[2])
return DataAndMetadata.new_data_and_metadata(data,
data_descriptor=data_descriptor,
dimensional_calibrations=dimensional_calibrations,
intensity_calibration=intensity_calibration,
metadata=properties,
timestamp=timestamp,
timezone=timezone,
timezone_offset=timezone_offset) | Loads the image from the file-like object or string file.
If file is a string, the file is opened and then read.
Returns a numpy ndarray of our best guess for the most important image
in the file. | Below is the the instruction that describes the task:
### Input:
Loads the image from the file-like object or string file.
If file is a string, the file is opened and then read.
Returns a numpy ndarray of our best guess for the most important image
in the file.
### Response:
def load_image(file) -> DataAndMetadata.DataAndMetadata:
"""
Loads the image from the file-like object or string file.
If file is a string, the file is opened and then read.
Returns a numpy ndarray of our best guess for the most important image
in the file.
"""
if isinstance(file, str) or isinstance(file, str):
with open(file, "rb") as f:
return load_image(f)
dmtag = parse_dm3.parse_dm_header(file)
dmtag = fix_strings(dmtag)
# display_keys(dmtag)
img_index = -1
image_tags = dmtag['ImageList'][img_index]
data = imagedatadict_to_ndarray(image_tags['ImageData'])
calibrations = []
calibration_tags = image_tags['ImageData'].get('Calibrations', dict())
for dimension in calibration_tags.get('Dimension', list()):
origin, scale, units = dimension.get('Origin', 0.0), dimension.get('Scale', 1.0), dimension.get('Units', str())
calibrations.append((-origin * scale, scale, units))
calibrations = tuple(reversed(calibrations))
if len(data.shape) == 3 and data.dtype != numpy.uint8:
if image_tags['ImageTags'].get('Meta Data', dict()).get("Format", str()).lower() in ("spectrum", "spectrum image"):
if data.shape[1] == 1:
data = numpy.squeeze(data, 1)
data = numpy.moveaxis(data, 0, 1)
data_descriptor = DataAndMetadata.DataDescriptor(False, 1, 1)
calibrations = (calibrations[2], calibrations[0])
else:
data = numpy.moveaxis(data, 0, 2)
data_descriptor = DataAndMetadata.DataDescriptor(False, 2, 1)
calibrations = tuple(calibrations[1:]) + (calibrations[0],)
else:
data_descriptor = DataAndMetadata.DataDescriptor(False, 1, 2)
elif len(data.shape) == 4 and data.dtype != numpy.uint8:
# data = numpy.moveaxis(data, 0, 2)
data_descriptor = DataAndMetadata.DataDescriptor(False, 2, 2)
elif data.dtype == numpy.uint8:
data_descriptor = DataAndMetadata.DataDescriptor(False, 0, len(data.shape[:-1]))
else:
data_descriptor = DataAndMetadata.DataDescriptor(False, 0, len(data.shape))
brightness = calibration_tags.get('Brightness', dict())
origin, scale, units = brightness.get('Origin', 0.0), brightness.get('Scale', 1.0), brightness.get('Units', str())
intensity = -origin * scale, scale, units
timestamp = None
timezone = None
timezone_offset = None
title = image_tags.get('Name')
properties = dict()
if 'ImageTags' in image_tags:
voltage = image_tags['ImageTags'].get('ImageScanned', dict()).get('EHT', dict())
if voltage:
properties.setdefault("hardware_source", dict())["autostem"] = { "high_tension_v": float(voltage) }
dm_metadata_signal = image_tags['ImageTags'].get('Meta Data', dict()).get('Signal')
if dm_metadata_signal and dm_metadata_signal.lower() == "eels":
properties.setdefault("hardware_source", dict())["signal_type"] = dm_metadata_signal
if image_tags['ImageTags'].get('Meta Data', dict()).get("Format", str()).lower() in ("spectrum", "spectrum image"):
data_descriptor.collection_dimension_count += data_descriptor.datum_dimension_count - 1
data_descriptor.datum_dimension_count = 1
if image_tags['ImageTags'].get('Meta Data', dict()).get("IsSequence", False) and data_descriptor.collection_dimension_count > 0:
data_descriptor.is_sequence = True
data_descriptor.collection_dimension_count -= 1
timestamp_str = image_tags['ImageTags'].get("Timestamp")
if timestamp_str:
timestamp = get_datetime_from_timestamp_str(timestamp_str)
timezone = image_tags['ImageTags'].get("Timezone")
timezone_offset = image_tags['ImageTags'].get("TimezoneOffset")
# to avoid having duplicate copies in Swift, get rid of these tags
image_tags['ImageTags'].pop("Timestamp", None)
image_tags['ImageTags'].pop("Timezone", None)
image_tags['ImageTags'].pop("TimezoneOffset", None)
# put the image tags into properties
properties.update(image_tags['ImageTags'])
dimensional_calibrations = [Calibration.Calibration(c[0], c[1], c[2]) for c in calibrations]
while len(dimensional_calibrations) < data_descriptor.expected_dimension_count:
dimensional_calibrations.append(Calibration.Calibration())
intensity_calibration = Calibration.Calibration(intensity[0], intensity[1], intensity[2])
return DataAndMetadata.new_data_and_metadata(data,
data_descriptor=data_descriptor,
dimensional_calibrations=dimensional_calibrations,
intensity_calibration=intensity_calibration,
metadata=properties,
timestamp=timestamp,
timezone=timezone,
timezone_offset=timezone_offset) |
def group(self, *args):
"""Returns one or more subgroups of the match. Each argument is either a
group index or a group name."""
if len(args) == 0:
args = (0,)
grouplist = []
for group in args:
grouplist.append(self._get_slice(self._get_index(group), None))
if len(grouplist) == 1:
return grouplist[0]
else:
return tuple(grouplist) | Returns one or more subgroups of the match. Each argument is either a
group index or a group name. | Below is the the instruction that describes the task:
### Input:
Returns one or more subgroups of the match. Each argument is either a
group index or a group name.
### Response:
def group(self, *args):
"""Returns one or more subgroups of the match. Each argument is either a
group index or a group name."""
if len(args) == 0:
args = (0,)
grouplist = []
for group in args:
grouplist.append(self._get_slice(self._get_index(group), None))
if len(grouplist) == 1:
return grouplist[0]
else:
return tuple(grouplist) |
def create_host_call(model_dir):
"""Construct a host_call writing scalar summaries.
Args:
model_dir: String containing path to train
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call.
"""
graph = tf.get_default_graph()
summaries = graph.get_collection(tf.GraphKeys.SUMMARIES)
gs_t = tf.reshape(tf.to_int32(tf.train.get_global_step()), [1])
summary_kwargs = collections.OrderedDict()
for t in summaries:
# TODO(aidangomez): enable ImageSummary support when we have a faster method
# see @shibow's comment in cl/202344570
if t.op.type not in ["ScalarSummary"]:
tf.logging.warn("Ignoring unsupported tf.Summary type %s" % t.op.type)
continue
name = t.op.name
tensor = t.op.inputs[1]
if t.op.type == "ScalarSummary":
assert tensor.shape.is_compatible_with([])
if tensor.dtype == tf.int64:
tensor = tf.to_int32(tensor)
summary_kwargs["ScalarSummary" + name] = tf.reshape(tensor, [1])
elif t.op.type == "ImageSummary":
# TODO(aidangomez): as we move to support more types, update
# common_layers.tpu_safe_image_summary
if tensor.dtype != tf.float32:
tf.logging.warn(
"Currently T2T on TPU only supports ImageSummary of "
"tf.float32-type Tensors. Skipping Tensor "
"%s with dtype %s..." % (tensor.name, tensor.dtype))
continue
# tensor = tf.to_float(tensor)
summary_kwargs["ImageSummary" + name] = tensor
# When no supported summaries are found, don't create host_call. Otherwise,
# TPU outfeed queue would enqueue global_step while host_call doesn't dequeue
# it, eventually causing hang.
if not summary_kwargs:
return None
summary_kwargs["global_step"] = gs_t
log_info("summary_kwargs %s" % str(summary_kwargs))
def host_call_fn(**kwargs):
"""Training host call. Creates summaries for training metrics.
Args:
**kwargs: Dict of {str: Tensor} , with `Tensor` of shape `[batch]`. Must
contain key "global_step" with value of current global_step Tensor.
Returns:
List of summary ops to run on the CPU host.
"""
gs = tf.to_int64(kwargs.pop("global_step")[0])
with tf.contrib.summary.create_file_writer(model_dir).as_default():
with tf.contrib.summary.always_record_summaries():
# We need to use tf.contrib.summary in order to feed the `step`.
for name, value in sorted(six.iteritems(kwargs)):
if name.startswith("ScalarSummary"):
name = name[len("ScalarSummary"):]
tf.contrib.summary.scalar(
name, tf.reduce_mean(tf.to_float(value)), step=gs)
elif name.startswith("ImageSummary"):
name = name[len("ImageSummary"):]
tf.contrib.summary.image(name, value, step=gs)
return tf.contrib.summary.all_summary_ops()
return (host_call_fn, summary_kwargs) | Construct a host_call writing scalar summaries.
Args:
model_dir: String containing path to train
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call. | Below is the the instruction that describes the task:
### Input:
Construct a host_call writing scalar summaries.
Args:
model_dir: String containing path to train
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call.
### Response:
def create_host_call(model_dir):
"""Construct a host_call writing scalar summaries.
Args:
model_dir: String containing path to train
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call.
"""
graph = tf.get_default_graph()
summaries = graph.get_collection(tf.GraphKeys.SUMMARIES)
gs_t = tf.reshape(tf.to_int32(tf.train.get_global_step()), [1])
summary_kwargs = collections.OrderedDict()
for t in summaries:
# TODO(aidangomez): enable ImageSummary support when we have a faster method
# see @shibow's comment in cl/202344570
if t.op.type not in ["ScalarSummary"]:
tf.logging.warn("Ignoring unsupported tf.Summary type %s" % t.op.type)
continue
name = t.op.name
tensor = t.op.inputs[1]
if t.op.type == "ScalarSummary":
assert tensor.shape.is_compatible_with([])
if tensor.dtype == tf.int64:
tensor = tf.to_int32(tensor)
summary_kwargs["ScalarSummary" + name] = tf.reshape(tensor, [1])
elif t.op.type == "ImageSummary":
# TODO(aidangomez): as we move to support more types, update
# common_layers.tpu_safe_image_summary
if tensor.dtype != tf.float32:
tf.logging.warn(
"Currently T2T on TPU only supports ImageSummary of "
"tf.float32-type Tensors. Skipping Tensor "
"%s with dtype %s..." % (tensor.name, tensor.dtype))
continue
# tensor = tf.to_float(tensor)
summary_kwargs["ImageSummary" + name] = tensor
# When no supported summaries are found, don't create host_call. Otherwise,
# TPU outfeed queue would enqueue global_step while host_call doesn't dequeue
# it, eventually causing hang.
if not summary_kwargs:
return None
summary_kwargs["global_step"] = gs_t
log_info("summary_kwargs %s" % str(summary_kwargs))
def host_call_fn(**kwargs):
"""Training host call. Creates summaries for training metrics.
Args:
**kwargs: Dict of {str: Tensor} , with `Tensor` of shape `[batch]`. Must
contain key "global_step" with value of current global_step Tensor.
Returns:
List of summary ops to run on the CPU host.
"""
gs = tf.to_int64(kwargs.pop("global_step")[0])
with tf.contrib.summary.create_file_writer(model_dir).as_default():
with tf.contrib.summary.always_record_summaries():
# We need to use tf.contrib.summary in order to feed the `step`.
for name, value in sorted(six.iteritems(kwargs)):
if name.startswith("ScalarSummary"):
name = name[len("ScalarSummary"):]
tf.contrib.summary.scalar(
name, tf.reduce_mean(tf.to_float(value)), step=gs)
elif name.startswith("ImageSummary"):
name = name[len("ImageSummary"):]
tf.contrib.summary.image(name, value, step=gs)
return tf.contrib.summary.all_summary_ops()
return (host_call_fn, summary_kwargs) |
def build_wigner_circuits(circuit, phis, thetas, qubits,
qreg, creg):
"""Create the circuits to rotate to points in phase space
Args:
circuit (QuantumCircuit): The circuit to be appended with tomography
state preparation and/or measurements.
phis (np.matrix[[complex]]): phis
thetas (np.matrix[[complex]]): thetas
qubits (list[int]): a list of the qubit indexes of qreg to be measured.
qreg (QuantumRegister): the quantum register containing qubits to be
measured.
creg (ClassicalRegister): the classical register containing bits to
store measurement outcomes.
Returns:
list: A list of names of the added wigner function circuits.
Raises:
QiskitError: if circuit is not a valid QuantumCircuit.
"""
if not isinstance(circuit, QuantumCircuit):
raise QiskitError('Input circuit must be a QuantumCircuit object')
tomography_circuits = []
points = len(phis[0])
for point in range(points):
label = '_wigner_phase_point'
label += str(point)
tmp_circ = QuantumCircuit(qreg, creg, name=label)
for qubit, _ in enumerate(qubits):
tmp_circ.u3(thetas[qubit][point], 0,
phis[qubit][point], qreg[qubits[qubit]])
tmp_circ.measure(qreg[qubits[qubit]], creg[qubits[qubit]])
# Add to original circuit
tmp_circ = circuit + tmp_circ
tmp_circ.name = circuit.name + label
tomography_circuits.append(tmp_circ)
logger.info('>> Created Wigner function circuits for "%s"', circuit.name)
return tomography_circuits | Create the circuits to rotate to points in phase space
Args:
circuit (QuantumCircuit): The circuit to be appended with tomography
state preparation and/or measurements.
phis (np.matrix[[complex]]): phis
thetas (np.matrix[[complex]]): thetas
qubits (list[int]): a list of the qubit indexes of qreg to be measured.
qreg (QuantumRegister): the quantum register containing qubits to be
measured.
creg (ClassicalRegister): the classical register containing bits to
store measurement outcomes.
Returns:
list: A list of names of the added wigner function circuits.
Raises:
QiskitError: if circuit is not a valid QuantumCircuit. | Below is the the instruction that describes the task:
### Input:
Create the circuits to rotate to points in phase space
Args:
circuit (QuantumCircuit): The circuit to be appended with tomography
state preparation and/or measurements.
phis (np.matrix[[complex]]): phis
thetas (np.matrix[[complex]]): thetas
qubits (list[int]): a list of the qubit indexes of qreg to be measured.
qreg (QuantumRegister): the quantum register containing qubits to be
measured.
creg (ClassicalRegister): the classical register containing bits to
store measurement outcomes.
Returns:
list: A list of names of the added wigner function circuits.
Raises:
QiskitError: if circuit is not a valid QuantumCircuit.
### Response:
def build_wigner_circuits(circuit, phis, thetas, qubits,
qreg, creg):
"""Create the circuits to rotate to points in phase space
Args:
circuit (QuantumCircuit): The circuit to be appended with tomography
state preparation and/or measurements.
phis (np.matrix[[complex]]): phis
thetas (np.matrix[[complex]]): thetas
qubits (list[int]): a list of the qubit indexes of qreg to be measured.
qreg (QuantumRegister): the quantum register containing qubits to be
measured.
creg (ClassicalRegister): the classical register containing bits to
store measurement outcomes.
Returns:
list: A list of names of the added wigner function circuits.
Raises:
QiskitError: if circuit is not a valid QuantumCircuit.
"""
if not isinstance(circuit, QuantumCircuit):
raise QiskitError('Input circuit must be a QuantumCircuit object')
tomography_circuits = []
points = len(phis[0])
for point in range(points):
label = '_wigner_phase_point'
label += str(point)
tmp_circ = QuantumCircuit(qreg, creg, name=label)
for qubit, _ in enumerate(qubits):
tmp_circ.u3(thetas[qubit][point], 0,
phis[qubit][point], qreg[qubits[qubit]])
tmp_circ.measure(qreg[qubits[qubit]], creg[qubits[qubit]])
# Add to original circuit
tmp_circ = circuit + tmp_circ
tmp_circ.name = circuit.name + label
tomography_circuits.append(tmp_circ)
logger.info('>> Created Wigner function circuits for "%s"', circuit.name)
return tomography_circuits |
def interact(self,
msg='SHUTIT PAUSE POINT',
shutit_pexpect_child=None,
print_input=True,
level=1,
resize=True,
color='32',
default_msg=None,
wait=-1):
"""Same as pause_point, but sets up the terminal ready for unmediated
interaction."""
shutit_global.shutit_global_object.yield_to_draw()
self.pause_point(msg=msg,
shutit_pexpect_child=shutit_pexpect_child,
print_input=print_input,
level=level,
resize=resize,
color=color,
default_msg=default_msg,
interact=True,
wait=wait) | Same as pause_point, but sets up the terminal ready for unmediated
interaction. | Below is the the instruction that describes the task:
### Input:
Same as pause_point, but sets up the terminal ready for unmediated
interaction.
### Response:
def interact(self,
msg='SHUTIT PAUSE POINT',
shutit_pexpect_child=None,
print_input=True,
level=1,
resize=True,
color='32',
default_msg=None,
wait=-1):
"""Same as pause_point, but sets up the terminal ready for unmediated
interaction."""
shutit_global.shutit_global_object.yield_to_draw()
self.pause_point(msg=msg,
shutit_pexpect_child=shutit_pexpect_child,
print_input=print_input,
level=level,
resize=resize,
color=color,
default_msg=default_msg,
interact=True,
wait=wait) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.