code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def normalize(self, body):
""" Invoke the JSON API normalizer
Perform the following:
* add the type as a rtype property
* flatten the payload
* add the id as a rid property ONLY if present
We don't need to vet the inputs much because the Parser
has already done all the work.
:param body:
the already vetted & parsed payload
:return:
normalized dict
"""
resource = body['data']
data = {'rtype': resource['type']}
if 'attributes' in resource:
attributes = resource['attributes']
attributes = self._normalize_attributes(attributes)
data.update(attributes)
if 'relationships' in resource:
relationships = resource['relationships']
relationships = self._normalize_relationships(relationships)
data.update(relationships)
if resource.get('id'):
data['rid'] = resource['id']
return data | Invoke the JSON API normalizer
Perform the following:
* add the type as a rtype property
* flatten the payload
* add the id as a rid property ONLY if present
We don't need to vet the inputs much because the Parser
has already done all the work.
:param body:
the already vetted & parsed payload
:return:
normalized dict | Below is the the instruction that describes the task:
### Input:
Invoke the JSON API normalizer
Perform the following:
* add the type as a rtype property
* flatten the payload
* add the id as a rid property ONLY if present
We don't need to vet the inputs much because the Parser
has already done all the work.
:param body:
the already vetted & parsed payload
:return:
normalized dict
### Response:
def normalize(self, body):
""" Invoke the JSON API normalizer
Perform the following:
* add the type as a rtype property
* flatten the payload
* add the id as a rid property ONLY if present
We don't need to vet the inputs much because the Parser
has already done all the work.
:param body:
the already vetted & parsed payload
:return:
normalized dict
"""
resource = body['data']
data = {'rtype': resource['type']}
if 'attributes' in resource:
attributes = resource['attributes']
attributes = self._normalize_attributes(attributes)
data.update(attributes)
if 'relationships' in resource:
relationships = resource['relationships']
relationships = self._normalize_relationships(relationships)
data.update(relationships)
if resource.get('id'):
data['rid'] = resource['id']
return data |
def load_rules(self, force_reload=False, overwrite=True):
"""Load rules from policy file or cache."""
# double-checked locking
if self.load_once and self._policy_loaded:
return
with self._load_lock:
if self.load_once and self._policy_loaded:
return
reloaded, data = _cache.read_file(
self.policy_file, force_reload=force_reload)
self._policy_loaded = True
if reloaded or not self.rules:
rules = Rules.load_json(data, self.default_rule, self.raise_error)
self._set_rules(rules, overwrite=overwrite)
LOG.debug('Reload policy file: %s', self.policy_file) | Load rules from policy file or cache. | Below is the the instruction that describes the task:
### Input:
Load rules from policy file or cache.
### Response:
def load_rules(self, force_reload=False, overwrite=True):
"""Load rules from policy file or cache."""
# double-checked locking
if self.load_once and self._policy_loaded:
return
with self._load_lock:
if self.load_once and self._policy_loaded:
return
reloaded, data = _cache.read_file(
self.policy_file, force_reload=force_reload)
self._policy_loaded = True
if reloaded or not self.rules:
rules = Rules.load_json(data, self.default_rule, self.raise_error)
self._set_rules(rules, overwrite=overwrite)
LOG.debug('Reload policy file: %s', self.policy_file) |
def getComboValue(combo):
"""
Checks to see if there is a dataType custom property set to determine
whether to return an integer or a string.
:param combo | <QComboBox>
:return <int> || <str>
"""
dataType = unwrapVariant(combo.property('dataType'))
if dataType == 'string':
return combo.currentText()
elif dataType == 'data':
return unwrapVariant(combo.itemData(combo.currentIndex()))
return combo.currentIndex() | Checks to see if there is a dataType custom property set to determine
whether to return an integer or a string.
:param combo | <QComboBox>
:return <int> || <str> | Below is the the instruction that describes the task:
### Input:
Checks to see if there is a dataType custom property set to determine
whether to return an integer or a string.
:param combo | <QComboBox>
:return <int> || <str>
### Response:
def getComboValue(combo):
"""
Checks to see if there is a dataType custom property set to determine
whether to return an integer or a string.
:param combo | <QComboBox>
:return <int> || <str>
"""
dataType = unwrapVariant(combo.property('dataType'))
if dataType == 'string':
return combo.currentText()
elif dataType == 'data':
return unwrapVariant(combo.itemData(combo.currentIndex()))
return combo.currentIndex() |
def predict(abg,date,obs=568):
"""Run GB's predict using an ABG file as input."""
import orbfit
import RO.StringUtil
(ra,dec,a,b,ang) = orbfit.predict(abg,date,obs)
obj['RA']=ra
obj['DEC']=dec
obj['dRA']=a
obj['dDEC']=b
obj['dANG']=ang
return obj | Run GB's predict using an ABG file as input. | Below is the the instruction that describes the task:
### Input:
Run GB's predict using an ABG file as input.
### Response:
def predict(abg,date,obs=568):
"""Run GB's predict using an ABG file as input."""
import orbfit
import RO.StringUtil
(ra,dec,a,b,ang) = orbfit.predict(abg,date,obs)
obj['RA']=ra
obj['DEC']=dec
obj['dRA']=a
obj['dDEC']=b
obj['dANG']=ang
return obj |
def get_objectlist(description, config_key, module):
"""
Take a description and return a list of classes.
Parameters
----------
description : list of dictionaries
Each dictionary has only one entry. The key is the name of a class. The
value of that entry is a list of dictionaries again. Those dictionaries
are paramters.
Returns
-------
List of objects.
"""
object_list = []
for feature in description:
for feat, params in feature.items():
feat = get_class(feat, config_key, module)
if params is None:
object_list.append(feat())
else:
parameters = {}
for dicts in params:
for param_name, param_value in dicts.items():
parameters[param_name] = param_value
object_list.append(feat(**parameters)) # pylint: disable=W0142
return object_list | Take a description and return a list of classes.
Parameters
----------
description : list of dictionaries
Each dictionary has only one entry. The key is the name of a class. The
value of that entry is a list of dictionaries again. Those dictionaries
are paramters.
Returns
-------
List of objects. | Below is the the instruction that describes the task:
### Input:
Take a description and return a list of classes.
Parameters
----------
description : list of dictionaries
Each dictionary has only one entry. The key is the name of a class. The
value of that entry is a list of dictionaries again. Those dictionaries
are paramters.
Returns
-------
List of objects.
### Response:
def get_objectlist(description, config_key, module):
"""
Take a description and return a list of classes.
Parameters
----------
description : list of dictionaries
Each dictionary has only one entry. The key is the name of a class. The
value of that entry is a list of dictionaries again. Those dictionaries
are paramters.
Returns
-------
List of objects.
"""
object_list = []
for feature in description:
for feat, params in feature.items():
feat = get_class(feat, config_key, module)
if params is None:
object_list.append(feat())
else:
parameters = {}
for dicts in params:
for param_name, param_value in dicts.items():
parameters[param_name] = param_value
object_list.append(feat(**parameters)) # pylint: disable=W0142
return object_list |
def get_datatypes(self):
"""
Returns a set of datatypes in this cell.
Returns
-------
out : set
Set of the datatypes used in this cell.
"""
datatypes = set()
for element in self.elements:
if isinstance(element, PolygonSet):
datatypes.update(element.datatypes)
elif isinstance(element, CellReference) or isinstance(
element, CellArray):
datatypes.update(element.ref_cell.get_datatypes())
return datatypes | Returns a set of datatypes in this cell.
Returns
-------
out : set
Set of the datatypes used in this cell. | Below is the the instruction that describes the task:
### Input:
Returns a set of datatypes in this cell.
Returns
-------
out : set
Set of the datatypes used in this cell.
### Response:
def get_datatypes(self):
"""
Returns a set of datatypes in this cell.
Returns
-------
out : set
Set of the datatypes used in this cell.
"""
datatypes = set()
for element in self.elements:
if isinstance(element, PolygonSet):
datatypes.update(element.datatypes)
elif isinstance(element, CellReference) or isinstance(
element, CellArray):
datatypes.update(element.ref_cell.get_datatypes())
return datatypes |
def check_port(self, port):
"""
Attempts to bind to the requested communicator port, checking if it is already in use.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("localhost", port))
except socket.error:
raise UnityWorkerInUseException(self.worker_id)
finally:
s.close() | Attempts to bind to the requested communicator port, checking if it is already in use. | Below is the the instruction that describes the task:
### Input:
Attempts to bind to the requested communicator port, checking if it is already in use.
### Response:
def check_port(self, port):
"""
Attempts to bind to the requested communicator port, checking if it is already in use.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("localhost", port))
except socket.error:
raise UnityWorkerInUseException(self.worker_id)
finally:
s.close() |
def roughness_Farshad(ID=None, D=None, coeffs=None):
r'''Calculates of retrieves the roughness of a pipe based on the work of
[1]_. This function will return an average value for pipes of a given
material, or if diameter is provided, will calculate one specifically for
the pipe inner diameter according to the following expression with
constants `A` and `B`:
.. math::
\epsilon = A\cdot D^{B+1}
Please not that `A` has units of inches, and `B` requires `D` to be in
inches as well.
The list of supported materials is as follows:
* 'Plastic coated'
* 'Carbon steel, honed bare'
* 'Cr13, electropolished bare'
* 'Cement lining'
* 'Carbon steel, bare'
* 'Fiberglass lining'
* 'Cr13, bare'
If `coeffs` and `D` are given, the custom coefficients for the equation as
given by the user will be used and `ID` is not required.
Parameters
----------
ID : str, optional
Name of pipe material from above list
D : float, optional
Actual inner diameter of pipe, [m]
coeffs : tuple, optional
(A, B) Coefficients to use directly, instead of looking them up;
they are actually dimensional, in the forms (inch^-B, -) but only
coefficients with those dimensions are available [-]
Returns
-------
epsilon : float
Roughness of pipe [m]
Notes
-----
The diameter-dependent form provides lower roughness values for larger
diameters.
The measurements were based on DIN 4768/1 (1987), using both a
"Dektak ST Surface Profiler" and a "Hommel Tester T1000". Both instruments
were found to be in agreement. A series of flow tests, in which pressure
drop directly measured, were performed as well, with nitrogen gas as an
operating fluid. The accuracy of the data from these tests is claimed to be
within 1%.
Using those results, the authors back-calculated what relative roughness
values would be necessary to produce the observed pressure drops. The
average difference between this back-calculated roughness and the measured
roughness was 6.75%.
For microchannels, this model will predict roughness much larger than the
actual channel diameter.
Examples
--------
>>> roughness_Farshad('Cr13, bare', 0.05)
5.3141677781137006e-05
References
----------
.. [1] Farshad, Fred F., and Herman H. Rieke. "Surface Roughness Design
Values for Modern Pipes." SPE Drilling & Completion 21, no. 3 (September
1, 2006): 212-215. doi:10.2118/89040-PA.
'''
# Case 1, coeffs given; only run if ID is not given.
if ID is None and coeffs:
A, B = coeffs
return A*(D/inch)**(B+1)*inch
# Case 2, lookup parameters
try :
dat = _Farshad_roughness[ID]
except:
raise KeyError('ID was not in _Farshad_roughness.')
if D is None:
return dat[0]
else:
A, B = dat[1], dat[2]
return A*(D/inch)**(B+1)*inch | r'''Calculates of retrieves the roughness of a pipe based on the work of
[1]_. This function will return an average value for pipes of a given
material, or if diameter is provided, will calculate one specifically for
the pipe inner diameter according to the following expression with
constants `A` and `B`:
.. math::
\epsilon = A\cdot D^{B+1}
Please not that `A` has units of inches, and `B` requires `D` to be in
inches as well.
The list of supported materials is as follows:
* 'Plastic coated'
* 'Carbon steel, honed bare'
* 'Cr13, electropolished bare'
* 'Cement lining'
* 'Carbon steel, bare'
* 'Fiberglass lining'
* 'Cr13, bare'
If `coeffs` and `D` are given, the custom coefficients for the equation as
given by the user will be used and `ID` is not required.
Parameters
----------
ID : str, optional
Name of pipe material from above list
D : float, optional
Actual inner diameter of pipe, [m]
coeffs : tuple, optional
(A, B) Coefficients to use directly, instead of looking them up;
they are actually dimensional, in the forms (inch^-B, -) but only
coefficients with those dimensions are available [-]
Returns
-------
epsilon : float
Roughness of pipe [m]
Notes
-----
The diameter-dependent form provides lower roughness values for larger
diameters.
The measurements were based on DIN 4768/1 (1987), using both a
"Dektak ST Surface Profiler" and a "Hommel Tester T1000". Both instruments
were found to be in agreement. A series of flow tests, in which pressure
drop directly measured, were performed as well, with nitrogen gas as an
operating fluid. The accuracy of the data from these tests is claimed to be
within 1%.
Using those results, the authors back-calculated what relative roughness
values would be necessary to produce the observed pressure drops. The
average difference between this back-calculated roughness and the measured
roughness was 6.75%.
For microchannels, this model will predict roughness much larger than the
actual channel diameter.
Examples
--------
>>> roughness_Farshad('Cr13, bare', 0.05)
5.3141677781137006e-05
References
----------
.. [1] Farshad, Fred F., and Herman H. Rieke. "Surface Roughness Design
Values for Modern Pipes." SPE Drilling & Completion 21, no. 3 (September
1, 2006): 212-215. doi:10.2118/89040-PA. | Below is the the instruction that describes the task:
### Input:
r'''Calculates of retrieves the roughness of a pipe based on the work of
[1]_. This function will return an average value for pipes of a given
material, or if diameter is provided, will calculate one specifically for
the pipe inner diameter according to the following expression with
constants `A` and `B`:
.. math::
\epsilon = A\cdot D^{B+1}
Please not that `A` has units of inches, and `B` requires `D` to be in
inches as well.
The list of supported materials is as follows:
* 'Plastic coated'
* 'Carbon steel, honed bare'
* 'Cr13, electropolished bare'
* 'Cement lining'
* 'Carbon steel, bare'
* 'Fiberglass lining'
* 'Cr13, bare'
If `coeffs` and `D` are given, the custom coefficients for the equation as
given by the user will be used and `ID` is not required.
Parameters
----------
ID : str, optional
Name of pipe material from above list
D : float, optional
Actual inner diameter of pipe, [m]
coeffs : tuple, optional
(A, B) Coefficients to use directly, instead of looking them up;
they are actually dimensional, in the forms (inch^-B, -) but only
coefficients with those dimensions are available [-]
Returns
-------
epsilon : float
Roughness of pipe [m]
Notes
-----
The diameter-dependent form provides lower roughness values for larger
diameters.
The measurements were based on DIN 4768/1 (1987), using both a
"Dektak ST Surface Profiler" and a "Hommel Tester T1000". Both instruments
were found to be in agreement. A series of flow tests, in which pressure
drop directly measured, were performed as well, with nitrogen gas as an
operating fluid. The accuracy of the data from these tests is claimed to be
within 1%.
Using those results, the authors back-calculated what relative roughness
values would be necessary to produce the observed pressure drops. The
average difference between this back-calculated roughness and the measured
roughness was 6.75%.
For microchannels, this model will predict roughness much larger than the
actual channel diameter.
Examples
--------
>>> roughness_Farshad('Cr13, bare', 0.05)
5.3141677781137006e-05
References
----------
.. [1] Farshad, Fred F., and Herman H. Rieke. "Surface Roughness Design
Values for Modern Pipes." SPE Drilling & Completion 21, no. 3 (September
1, 2006): 212-215. doi:10.2118/89040-PA.
### Response:
def roughness_Farshad(ID=None, D=None, coeffs=None):
r'''Calculates of retrieves the roughness of a pipe based on the work of
[1]_. This function will return an average value for pipes of a given
material, or if diameter is provided, will calculate one specifically for
the pipe inner diameter according to the following expression with
constants `A` and `B`:
.. math::
\epsilon = A\cdot D^{B+1}
Please not that `A` has units of inches, and `B` requires `D` to be in
inches as well.
The list of supported materials is as follows:
* 'Plastic coated'
* 'Carbon steel, honed bare'
* 'Cr13, electropolished bare'
* 'Cement lining'
* 'Carbon steel, bare'
* 'Fiberglass lining'
* 'Cr13, bare'
If `coeffs` and `D` are given, the custom coefficients for the equation as
given by the user will be used and `ID` is not required.
Parameters
----------
ID : str, optional
Name of pipe material from above list
D : float, optional
Actual inner diameter of pipe, [m]
coeffs : tuple, optional
(A, B) Coefficients to use directly, instead of looking them up;
they are actually dimensional, in the forms (inch^-B, -) but only
coefficients with those dimensions are available [-]
Returns
-------
epsilon : float
Roughness of pipe [m]
Notes
-----
The diameter-dependent form provides lower roughness values for larger
diameters.
The measurements were based on DIN 4768/1 (1987), using both a
"Dektak ST Surface Profiler" and a "Hommel Tester T1000". Both instruments
were found to be in agreement. A series of flow tests, in which pressure
drop directly measured, were performed as well, with nitrogen gas as an
operating fluid. The accuracy of the data from these tests is claimed to be
within 1%.
Using those results, the authors back-calculated what relative roughness
values would be necessary to produce the observed pressure drops. The
average difference between this back-calculated roughness and the measured
roughness was 6.75%.
For microchannels, this model will predict roughness much larger than the
actual channel diameter.
Examples
--------
>>> roughness_Farshad('Cr13, bare', 0.05)
5.3141677781137006e-05
References
----------
.. [1] Farshad, Fred F., and Herman H. Rieke. "Surface Roughness Design
Values for Modern Pipes." SPE Drilling & Completion 21, no. 3 (September
1, 2006): 212-215. doi:10.2118/89040-PA.
'''
# Case 1, coeffs given; only run if ID is not given.
if ID is None and coeffs:
A, B = coeffs
return A*(D/inch)**(B+1)*inch
# Case 2, lookup parameters
try :
dat = _Farshad_roughness[ID]
except:
raise KeyError('ID was not in _Farshad_roughness.')
if D is None:
return dat[0]
else:
A, B = dat[1], dat[2]
return A*(D/inch)**(B+1)*inch |
def all_docs_with_tag(self, doc_tag):
"""
Returns all the documents with the specified tag.
"""
docs = []
while True:
try:
doc = self.next_doc_with(doc_tag)
docs.append(doc)
except StopIteration:
break
self.seek(0)
return docs | Returns all the documents with the specified tag. | Below is the the instruction that describes the task:
### Input:
Returns all the documents with the specified tag.
### Response:
def all_docs_with_tag(self, doc_tag):
"""
Returns all the documents with the specified tag.
"""
docs = []
while True:
try:
doc = self.next_doc_with(doc_tag)
docs.append(doc)
except StopIteration:
break
self.seek(0)
return docs |
def group_by(self, to_key):
"""
:param to_key:
:type to_key: T -> unicode
:rtype: TDict[TList[T]]
Usage:
>>> TList([1, 2, 3, 4, 5]).group_by(lambda x: x % 2).to_json()
'{"0": [2,4],"1": [1,3,5]}'
"""
ret = TDict()
for v in self:
k = to_key(v)
ret.setdefault(k, TList())
ret[k].append(v)
return ret | :param to_key:
:type to_key: T -> unicode
:rtype: TDict[TList[T]]
Usage:
>>> TList([1, 2, 3, 4, 5]).group_by(lambda x: x % 2).to_json()
'{"0": [2,4],"1": [1,3,5]}' | Below is the the instruction that describes the task:
### Input:
:param to_key:
:type to_key: T -> unicode
:rtype: TDict[TList[T]]
Usage:
>>> TList([1, 2, 3, 4, 5]).group_by(lambda x: x % 2).to_json()
'{"0": [2,4],"1": [1,3,5]}'
### Response:
def group_by(self, to_key):
"""
:param to_key:
:type to_key: T -> unicode
:rtype: TDict[TList[T]]
Usage:
>>> TList([1, 2, 3, 4, 5]).group_by(lambda x: x % 2).to_json()
'{"0": [2,4],"1": [1,3,5]}'
"""
ret = TDict()
for v in self:
k = to_key(v)
ret.setdefault(k, TList())
ret[k].append(v)
return ret |
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups) | Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g)) | Below is the the instruction that describes the task:
### Input:
Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
### Response:
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups) |
def predict_y(self, Xnew):
"""
Compute the mean and variance of held-out data at the points Xnew
"""
pred_f_mean, pred_f_var = self._build_predict(Xnew)
return self.likelihood.predict_mean_and_var(pred_f_mean, pred_f_var) | Compute the mean and variance of held-out data at the points Xnew | Below is the the instruction that describes the task:
### Input:
Compute the mean and variance of held-out data at the points Xnew
### Response:
def predict_y(self, Xnew):
"""
Compute the mean and variance of held-out data at the points Xnew
"""
pred_f_mean, pred_f_var = self._build_predict(Xnew)
return self.likelihood.predict_mean_and_var(pred_f_mean, pred_f_var) |
def __publish(topic, message, subject=None):
""" Publish a message to a SNS topic
:type topic: str
:param topic: SNS topic to publish the message to
:type message: str
:param message: Message to send via SNS
:type subject: str
:param subject: Subject to use for e-mail notifications
:returns: None
"""
try:
SNS_CONNECTION.publish(topic=topic, message=message, subject=subject)
logger.info('Sent SNS notification to {0}'.format(topic))
except BotoServerError as error:
logger.error('Problem sending SNS notification: {0}'.format(
error.message))
return | Publish a message to a SNS topic
:type topic: str
:param topic: SNS topic to publish the message to
:type message: str
:param message: Message to send via SNS
:type subject: str
:param subject: Subject to use for e-mail notifications
:returns: None | Below is the the instruction that describes the task:
### Input:
Publish a message to a SNS topic
:type topic: str
:param topic: SNS topic to publish the message to
:type message: str
:param message: Message to send via SNS
:type subject: str
:param subject: Subject to use for e-mail notifications
:returns: None
### Response:
def __publish(topic, message, subject=None):
""" Publish a message to a SNS topic
:type topic: str
:param topic: SNS topic to publish the message to
:type message: str
:param message: Message to send via SNS
:type subject: str
:param subject: Subject to use for e-mail notifications
:returns: None
"""
try:
SNS_CONNECTION.publish(topic=topic, message=message, subject=subject)
logger.info('Sent SNS notification to {0}'.format(topic))
except BotoServerError as error:
logger.error('Problem sending SNS notification: {0}'.format(
error.message))
return |
def watchpoint_set(self,
addr,
addr_mask=0x0,
data=0x0,
data_mask=0x0,
access_size=None,
read=False,
write=False,
privileged=False):
"""Sets a watchpoint at the given address.
This method allows for a watchpoint to be set on an given address or
range of addresses. The watchpoint can then be triggered if the data
at the given address matches the specified ``data`` or range of data as
determined by ``data_mask``, on specific access size events, reads,
writes, or privileged accesses.
Both ``addr_mask`` and ``data_mask`` are used to specify ranges. Bits
set to ``1`` are masked out and not taken into consideration when
comparison against an address or data value. E.g. an ``addr_mask``
with a value of ``0x1`` and ``addr`` with value ``0xdeadbeef`` means
that the watchpoint will be set on addresses ``0xdeadbeef`` and
``0xdeadbeee``. If the ``data`` was ``0x11223340`` and the given
``data_mask`` has a value of ``0x0000000F``, then the watchpoint would
trigger for data matching ``0x11223340 - 0x1122334F``.
Note:
If both ``read`` and ``write`` are specified, then the watchpoint
will trigger on both read and write events to the given address.
Args:
self (JLink): the ``JLink`` instance
addr_mask (int): optional mask to use for determining which address
the watchpoint should be set on
data (int): optional data to set the watchpoint on in order to have
the watchpoint triggered when the value at the specified address
matches the given ``data``
data_mask (int): optional mask to use for determining the range of
data on which the watchpoint should be triggered
access_size (int): if specified, this must be one of ``{8, 16, 32}``
and determines the access size for which the watchpoint should
trigger
read (bool): if ``True``, triggers the watchpoint on read events
write (bool): if ``True``, triggers the watchpoint on write events
privileged (bool): if ``True``, triggers the watchpoint on privileged
accesses
Returns:
The handle of the created watchpoint.
Raises:
ValueError: if an invalid access size is given.
JLinkException: if the watchpoint fails to be set.
"""
access_flags = 0x0
access_mask_flags = 0x0
# If an access size is not specified, we must specify that the size of
# the access does not matter by specifying the access mask flags.
if access_size is None:
access_mask_flags = access_mask_flags | enums.JLinkAccessMaskFlags.SIZE
elif access_size == 8:
access_flags = access_flags | enums.JLinkAccessFlags.SIZE_8BIT
elif access_size == 16:
access_flags = access_flags | enums.JLinkAccessFlags.SIZE_16BIT
elif access_size == 32:
access_flags = access_flags | enums.JLinkAccessFlags.SIZE_32BIT
else:
raise ValueError('Invalid access size given: %d' % access_size)
# The read and write access flags cannot be specified together, so if
# the user specifies that they want read and write access, then the
# access mask flag must be set.
if read and write:
access_mask_flags = access_mask_flags | enums.JLinkAccessMaskFlags.DIR
elif read:
access_flags = access_flags | enums.JLinkAccessFlags.READ
elif write:
access_flags = access_flags | enums.JLinkAccessFlags.WRITE
# If privileged is not specified, then there is no specification level
# on which kinds of writes should be accessed, in which case we must
# specify that flag.
if privileged:
access_flags = access_flags | enums.JLinkAccessFlags.PRIV
else:
access_mask_flags = access_mask_flags | enums.JLinkAccessMaskFlags.PRIV
# Populate the Data event to configure how the watchpoint is triggered.
wp = structs.JLinkDataEvent()
wp.Addr = addr
wp.AddrMask = addr_mask
wp.Data = data
wp.DataMask = data_mask
wp.Access = access_flags
wp.AccessMask = access_mask_flags
# Return value of the function is <= 0 in the event of an error,
# otherwise the watchpoint was set successfully.
handle = ctypes.c_uint32()
res = self._dll.JLINKARM_SetDataEvent(ctypes.pointer(wp), ctypes.pointer(handle))
if res < 0:
raise errors.JLinkDataException(res)
return handle.value | Sets a watchpoint at the given address.
This method allows for a watchpoint to be set on an given address or
range of addresses. The watchpoint can then be triggered if the data
at the given address matches the specified ``data`` or range of data as
determined by ``data_mask``, on specific access size events, reads,
writes, or privileged accesses.
Both ``addr_mask`` and ``data_mask`` are used to specify ranges. Bits
set to ``1`` are masked out and not taken into consideration when
comparison against an address or data value. E.g. an ``addr_mask``
with a value of ``0x1`` and ``addr`` with value ``0xdeadbeef`` means
that the watchpoint will be set on addresses ``0xdeadbeef`` and
``0xdeadbeee``. If the ``data`` was ``0x11223340`` and the given
``data_mask`` has a value of ``0x0000000F``, then the watchpoint would
trigger for data matching ``0x11223340 - 0x1122334F``.
Note:
If both ``read`` and ``write`` are specified, then the watchpoint
will trigger on both read and write events to the given address.
Args:
self (JLink): the ``JLink`` instance
addr_mask (int): optional mask to use for determining which address
the watchpoint should be set on
data (int): optional data to set the watchpoint on in order to have
the watchpoint triggered when the value at the specified address
matches the given ``data``
data_mask (int): optional mask to use for determining the range of
data on which the watchpoint should be triggered
access_size (int): if specified, this must be one of ``{8, 16, 32}``
and determines the access size for which the watchpoint should
trigger
read (bool): if ``True``, triggers the watchpoint on read events
write (bool): if ``True``, triggers the watchpoint on write events
privileged (bool): if ``True``, triggers the watchpoint on privileged
accesses
Returns:
The handle of the created watchpoint.
Raises:
ValueError: if an invalid access size is given.
JLinkException: if the watchpoint fails to be set. | Below is the the instruction that describes the task:
### Input:
Sets a watchpoint at the given address.
This method allows for a watchpoint to be set on an given address or
range of addresses. The watchpoint can then be triggered if the data
at the given address matches the specified ``data`` or range of data as
determined by ``data_mask``, on specific access size events, reads,
writes, or privileged accesses.
Both ``addr_mask`` and ``data_mask`` are used to specify ranges. Bits
set to ``1`` are masked out and not taken into consideration when
comparison against an address or data value. E.g. an ``addr_mask``
with a value of ``0x1`` and ``addr`` with value ``0xdeadbeef`` means
that the watchpoint will be set on addresses ``0xdeadbeef`` and
``0xdeadbeee``. If the ``data`` was ``0x11223340`` and the given
``data_mask`` has a value of ``0x0000000F``, then the watchpoint would
trigger for data matching ``0x11223340 - 0x1122334F``.
Note:
If both ``read`` and ``write`` are specified, then the watchpoint
will trigger on both read and write events to the given address.
Args:
self (JLink): the ``JLink`` instance
addr_mask (int): optional mask to use for determining which address
the watchpoint should be set on
data (int): optional data to set the watchpoint on in order to have
the watchpoint triggered when the value at the specified address
matches the given ``data``
data_mask (int): optional mask to use for determining the range of
data on which the watchpoint should be triggered
access_size (int): if specified, this must be one of ``{8, 16, 32}``
and determines the access size for which the watchpoint should
trigger
read (bool): if ``True``, triggers the watchpoint on read events
write (bool): if ``True``, triggers the watchpoint on write events
privileged (bool): if ``True``, triggers the watchpoint on privileged
accesses
Returns:
The handle of the created watchpoint.
Raises:
ValueError: if an invalid access size is given.
JLinkException: if the watchpoint fails to be set.
### Response:
def watchpoint_set(self,
addr,
addr_mask=0x0,
data=0x0,
data_mask=0x0,
access_size=None,
read=False,
write=False,
privileged=False):
"""Sets a watchpoint at the given address.
This method allows for a watchpoint to be set on an given address or
range of addresses. The watchpoint can then be triggered if the data
at the given address matches the specified ``data`` or range of data as
determined by ``data_mask``, on specific access size events, reads,
writes, or privileged accesses.
Both ``addr_mask`` and ``data_mask`` are used to specify ranges. Bits
set to ``1`` are masked out and not taken into consideration when
comparison against an address or data value. E.g. an ``addr_mask``
with a value of ``0x1`` and ``addr`` with value ``0xdeadbeef`` means
that the watchpoint will be set on addresses ``0xdeadbeef`` and
``0xdeadbeee``. If the ``data`` was ``0x11223340`` and the given
``data_mask`` has a value of ``0x0000000F``, then the watchpoint would
trigger for data matching ``0x11223340 - 0x1122334F``.
Note:
If both ``read`` and ``write`` are specified, then the watchpoint
will trigger on both read and write events to the given address.
Args:
self (JLink): the ``JLink`` instance
addr_mask (int): optional mask to use for determining which address
the watchpoint should be set on
data (int): optional data to set the watchpoint on in order to have
the watchpoint triggered when the value at the specified address
matches the given ``data``
data_mask (int): optional mask to use for determining the range of
data on which the watchpoint should be triggered
access_size (int): if specified, this must be one of ``{8, 16, 32}``
and determines the access size for which the watchpoint should
trigger
read (bool): if ``True``, triggers the watchpoint on read events
write (bool): if ``True``, triggers the watchpoint on write events
privileged (bool): if ``True``, triggers the watchpoint on privileged
accesses
Returns:
The handle of the created watchpoint.
Raises:
ValueError: if an invalid access size is given.
JLinkException: if the watchpoint fails to be set.
"""
access_flags = 0x0
access_mask_flags = 0x0
# If an access size is not specified, we must specify that the size of
# the access does not matter by specifying the access mask flags.
if access_size is None:
access_mask_flags = access_mask_flags | enums.JLinkAccessMaskFlags.SIZE
elif access_size == 8:
access_flags = access_flags | enums.JLinkAccessFlags.SIZE_8BIT
elif access_size == 16:
access_flags = access_flags | enums.JLinkAccessFlags.SIZE_16BIT
elif access_size == 32:
access_flags = access_flags | enums.JLinkAccessFlags.SIZE_32BIT
else:
raise ValueError('Invalid access size given: %d' % access_size)
# The read and write access flags cannot be specified together, so if
# the user specifies that they want read and write access, then the
# access mask flag must be set.
if read and write:
access_mask_flags = access_mask_flags | enums.JLinkAccessMaskFlags.DIR
elif read:
access_flags = access_flags | enums.JLinkAccessFlags.READ
elif write:
access_flags = access_flags | enums.JLinkAccessFlags.WRITE
# If privileged is not specified, then there is no specification level
# on which kinds of writes should be accessed, in which case we must
# specify that flag.
if privileged:
access_flags = access_flags | enums.JLinkAccessFlags.PRIV
else:
access_mask_flags = access_mask_flags | enums.JLinkAccessMaskFlags.PRIV
# Populate the Data event to configure how the watchpoint is triggered.
wp = structs.JLinkDataEvent()
wp.Addr = addr
wp.AddrMask = addr_mask
wp.Data = data
wp.DataMask = data_mask
wp.Access = access_flags
wp.AccessMask = access_mask_flags
# Return value of the function is <= 0 in the event of an error,
# otherwise the watchpoint was set successfully.
handle = ctypes.c_uint32()
res = self._dll.JLINKARM_SetDataEvent(ctypes.pointer(wp), ctypes.pointer(handle))
if res < 0:
raise errors.JLinkDataException(res)
return handle.value |
def fhp_from_json_dict(
json_dict # type: Dict[str, Any]
):
# type: (...) -> FieldHashingProperties
"""
Make a :class:`FieldHashingProperties` object from a dictionary.
:param dict json_dict:
The dictionary must have have an 'ngram' key
and one of k or num_bits. It may have
'positional' key; if missing a default is used.
The encoding is
always set to the default value.
:return: A :class:`FieldHashingProperties` instance.
"""
h = json_dict.get('hash', {'type': 'blakeHash'})
num_bits = json_dict.get('numBits')
k = json_dict.get('k')
if not num_bits and not k:
num_bits = 200 # default for v2 schema
return FieldHashingProperties(
ngram=json_dict['ngram'],
positional=json_dict.get(
'positional', FieldHashingProperties._DEFAULT_POSITIONAL),
hash_type=h['type'],
prevent_singularity=h.get('prevent_singularity'),
num_bits=num_bits,
k=k,
missing_value=MissingValueSpec.from_json_dict(
json_dict[
'missingValue']) if 'missingValue' in json_dict else None
) | Make a :class:`FieldHashingProperties` object from a dictionary.
:param dict json_dict:
The dictionary must have have an 'ngram' key
and one of k or num_bits. It may have
'positional' key; if missing a default is used.
The encoding is
always set to the default value.
:return: A :class:`FieldHashingProperties` instance. | Below is the the instruction that describes the task:
### Input:
Make a :class:`FieldHashingProperties` object from a dictionary.
:param dict json_dict:
The dictionary must have have an 'ngram' key
and one of k or num_bits. It may have
'positional' key; if missing a default is used.
The encoding is
always set to the default value.
:return: A :class:`FieldHashingProperties` instance.
### Response:
def fhp_from_json_dict(
json_dict # type: Dict[str, Any]
):
# type: (...) -> FieldHashingProperties
"""
Make a :class:`FieldHashingProperties` object from a dictionary.
:param dict json_dict:
The dictionary must have have an 'ngram' key
and one of k or num_bits. It may have
'positional' key; if missing a default is used.
The encoding is
always set to the default value.
:return: A :class:`FieldHashingProperties` instance.
"""
h = json_dict.get('hash', {'type': 'blakeHash'})
num_bits = json_dict.get('numBits')
k = json_dict.get('k')
if not num_bits and not k:
num_bits = 200 # default for v2 schema
return FieldHashingProperties(
ngram=json_dict['ngram'],
positional=json_dict.get(
'positional', FieldHashingProperties._DEFAULT_POSITIONAL),
hash_type=h['type'],
prevent_singularity=h.get('prevent_singularity'),
num_bits=num_bits,
k=k,
missing_value=MissingValueSpec.from_json_dict(
json_dict[
'missingValue']) if 'missingValue' in json_dict else None
) |
def express_route_ports(self):
"""Instance depends on the API version:
* 2018-08-01: :class:`ExpressRoutePortsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRoutePortsOperations>`
"""
api_version = self._get_api_version('express_route_ports')
if api_version == '2018-08-01':
from .v2018_08_01.operations import ExpressRoutePortsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | Instance depends on the API version:
* 2018-08-01: :class:`ExpressRoutePortsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRoutePortsOperations>` | Below is the the instruction that describes the task:
### Input:
Instance depends on the API version:
* 2018-08-01: :class:`ExpressRoutePortsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRoutePortsOperations>`
### Response:
def express_route_ports(self):
"""Instance depends on the API version:
* 2018-08-01: :class:`ExpressRoutePortsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRoutePortsOperations>`
"""
api_version = self._get_api_version('express_route_ports')
if api_version == '2018-08-01':
from .v2018_08_01.operations import ExpressRoutePortsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) |
def get_host(environ):
# type: (Dict[str, str]) -> str
"""Return the host for the given WSGI environment. Yanked from Werkzeug."""
if environ.get("HTTP_HOST"):
rv = environ["HTTP_HOST"]
if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
rv = rv[:-3]
elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
rv = rv[:-4]
elif environ.get("SERVER_NAME"):
rv = environ["SERVER_NAME"]
if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in (
("https", "443"),
("http", "80"),
):
rv += ":" + environ["SERVER_PORT"]
else:
# In spite of the WSGI spec, SERVER_NAME might not be present.
rv = "unknown"
return rv | Return the host for the given WSGI environment. Yanked from Werkzeug. | Below is the the instruction that describes the task:
### Input:
Return the host for the given WSGI environment. Yanked from Werkzeug.
### Response:
def get_host(environ):
# type: (Dict[str, str]) -> str
"""Return the host for the given WSGI environment. Yanked from Werkzeug."""
if environ.get("HTTP_HOST"):
rv = environ["HTTP_HOST"]
if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
rv = rv[:-3]
elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
rv = rv[:-4]
elif environ.get("SERVER_NAME"):
rv = environ["SERVER_NAME"]
if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in (
("https", "443"),
("http", "80"),
):
rv += ":" + environ["SERVER_PORT"]
else:
# In spite of the WSGI spec, SERVER_NAME might not be present.
rv = "unknown"
return rv |
def multi_load_data(Channel, RunNos, RepeatNos, directoryPath='.', calcPSD=True, NPerSegmentPSD=1000000):
"""
Lets you load multiple datasets at once assuming they have a
filename which contains a pattern of the form:
CH<ChannelNo>_RUN00...<RunNo>_REPEAT00...<RepeatNo>
Parameters
----------
Channel : int
The channel you want to load
RunNos : sequence
Sequence of run numbers you want to load
RepeatNos : sequence
Sequence of repeat numbers you want to load
directoryPath : string, optional
The path to the directory housing the data
The default is the current directory
Returns
-------
Data : list
A list containing the DataObjects that were loaded.
"""
matching_files = search_data_std(Channel=Channel, RunNos=RunNos, RepeatNos=RepeatNos, directoryPath=directoryPath)
#data = []
#for filepath in matching_files_:
# data.append(load_data(filepath, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD))
cpu_count = _cpu_count()
workerPool = _Pool(cpu_count)
load_data_partial = _partial(load_data, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD)
data = workerPool.map(load_data_partial, matching_files)
workerPool.close()
workerPool.terminate()
workerPool.join()
#with _Pool(cpu_count) as workerPool:
#load_data_partial = _partial(load_data, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD)
#data = workerPool.map(load_data_partial, files_CorrectRepeatNo)
return data | Lets you load multiple datasets at once assuming they have a
filename which contains a pattern of the form:
CH<ChannelNo>_RUN00...<RunNo>_REPEAT00...<RepeatNo>
Parameters
----------
Channel : int
The channel you want to load
RunNos : sequence
Sequence of run numbers you want to load
RepeatNos : sequence
Sequence of repeat numbers you want to load
directoryPath : string, optional
The path to the directory housing the data
The default is the current directory
Returns
-------
Data : list
A list containing the DataObjects that were loaded. | Below is the the instruction that describes the task:
### Input:
Lets you load multiple datasets at once assuming they have a
filename which contains a pattern of the form:
CH<ChannelNo>_RUN00...<RunNo>_REPEAT00...<RepeatNo>
Parameters
----------
Channel : int
The channel you want to load
RunNos : sequence
Sequence of run numbers you want to load
RepeatNos : sequence
Sequence of repeat numbers you want to load
directoryPath : string, optional
The path to the directory housing the data
The default is the current directory
Returns
-------
Data : list
A list containing the DataObjects that were loaded.
### Response:
def multi_load_data(Channel, RunNos, RepeatNos, directoryPath='.', calcPSD=True, NPerSegmentPSD=1000000):
"""
Lets you load multiple datasets at once assuming they have a
filename which contains a pattern of the form:
CH<ChannelNo>_RUN00...<RunNo>_REPEAT00...<RepeatNo>
Parameters
----------
Channel : int
The channel you want to load
RunNos : sequence
Sequence of run numbers you want to load
RepeatNos : sequence
Sequence of repeat numbers you want to load
directoryPath : string, optional
The path to the directory housing the data
The default is the current directory
Returns
-------
Data : list
A list containing the DataObjects that were loaded.
"""
matching_files = search_data_std(Channel=Channel, RunNos=RunNos, RepeatNos=RepeatNos, directoryPath=directoryPath)
#data = []
#for filepath in matching_files_:
# data.append(load_data(filepath, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD))
cpu_count = _cpu_count()
workerPool = _Pool(cpu_count)
load_data_partial = _partial(load_data, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD)
data = workerPool.map(load_data_partial, matching_files)
workerPool.close()
workerPool.terminate()
workerPool.join()
#with _Pool(cpu_count) as workerPool:
#load_data_partial = _partial(load_data, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD)
#data = workerPool.map(load_data_partial, files_CorrectRepeatNo)
return data |
def specific_gains(string):
"""Convert string with gains of individual amplification elements to dict"""
if not string:
return {}
gains = {}
for gain in string.split(','):
amp_name, value = gain.split('=')
gains[amp_name.strip()] = float(value.strip())
return gains | Convert string with gains of individual amplification elements to dict | Below is the the instruction that describes the task:
### Input:
Convert string with gains of individual amplification elements to dict
### Response:
def specific_gains(string):
"""Convert string with gains of individual amplification elements to dict"""
if not string:
return {}
gains = {}
for gain in string.split(','):
amp_name, value = gain.split('=')
gains[amp_name.strip()] = float(value.strip())
return gains |
def run(app: web.Application, **kwargs):
"""Run an `aiohttp.web.Application` using gunicorn.
:param app: The app to run.
:param str app_uri: Import path to `app`. Takes the form
``$(MODULE_NAME):$(VARIABLE_NAME)``.
The module name can be a full dotted path.
The variable name refers to the `aiohttp.web.Application` instance.
This argument is required if ``reload=True``.
:param str host: Hostname to listen on.
:param int port: Port of the server.
:param bool reload: Whether to reload the server on a code change.
If not set, will take the same value as ``app.debug``.
**EXPERIMENTAL**.
:param \*\*kwargs: Extra configuration options to set on the
``GunicornApp's`` config object.
"""
runner = Runner(app, **kwargs)
runner.run() | Run an `aiohttp.web.Application` using gunicorn.
:param app: The app to run.
:param str app_uri: Import path to `app`. Takes the form
``$(MODULE_NAME):$(VARIABLE_NAME)``.
The module name can be a full dotted path.
The variable name refers to the `aiohttp.web.Application` instance.
This argument is required if ``reload=True``.
:param str host: Hostname to listen on.
:param int port: Port of the server.
:param bool reload: Whether to reload the server on a code change.
If not set, will take the same value as ``app.debug``.
**EXPERIMENTAL**.
:param \*\*kwargs: Extra configuration options to set on the
``GunicornApp's`` config object. | Below is the the instruction that describes the task:
### Input:
Run an `aiohttp.web.Application` using gunicorn.
:param app: The app to run.
:param str app_uri: Import path to `app`. Takes the form
``$(MODULE_NAME):$(VARIABLE_NAME)``.
The module name can be a full dotted path.
The variable name refers to the `aiohttp.web.Application` instance.
This argument is required if ``reload=True``.
:param str host: Hostname to listen on.
:param int port: Port of the server.
:param bool reload: Whether to reload the server on a code change.
If not set, will take the same value as ``app.debug``.
**EXPERIMENTAL**.
:param \*\*kwargs: Extra configuration options to set on the
``GunicornApp's`` config object.
### Response:
def run(app: web.Application, **kwargs):
"""Run an `aiohttp.web.Application` using gunicorn.
:param app: The app to run.
:param str app_uri: Import path to `app`. Takes the form
``$(MODULE_NAME):$(VARIABLE_NAME)``.
The module name can be a full dotted path.
The variable name refers to the `aiohttp.web.Application` instance.
This argument is required if ``reload=True``.
:param str host: Hostname to listen on.
:param int port: Port of the server.
:param bool reload: Whether to reload the server on a code change.
If not set, will take the same value as ``app.debug``.
**EXPERIMENTAL**.
:param \*\*kwargs: Extra configuration options to set on the
``GunicornApp's`` config object.
"""
runner = Runner(app, **kwargs)
runner.run() |
def list_keyvaults(access_token, subscription_id, rgname):
'''Lists key vaults in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. 200 OK.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.KeyVault/vaults',
'?api-version=', KEYVAULT_API])
return do_get_next(endpoint, access_token) | Lists key vaults in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. 200 OK. | Below is the the instruction that describes the task:
### Input:
Lists key vaults in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. 200 OK.
### Response:
def list_keyvaults(access_token, subscription_id, rgname):
'''Lists key vaults in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. 200 OK.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.KeyVault/vaults',
'?api-version=', KEYVAULT_API])
return do_get_next(endpoint, access_token) |
def setup(self):
"""
Abinit has the very *bad* habit of changing the file extension by appending the characters in [A,B ..., Z]
to the output file, and this breaks a lot of code that relies of the use of a unique file extension.
Here we fix this issue by renaming run.abo to run.abo_[number] if the output file "run.abo" already
exists. A few lines of code in python, a lot of problems if you try to implement this trick in Fortran90.
"""
def rename_file(afile):
"""Helper function to rename :class:`File` objects. Return string for logging purpose."""
# Find the index of the last file (if any).
# TODO: Maybe it's better to use run.abo --> run(1).abo
fnames = [f for f in os.listdir(self.workdir) if f.startswith(afile.basename)]
nums = [int(f) for f in [f.split("_")[-1] for f in fnames] if f.isdigit()]
last = max(nums) if nums else 0
new_path = afile.path + "_" + str(last+1)
os.rename(afile.path, new_path)
return "Will rename %s to %s" % (afile.path, new_path)
logs = []
if self.output_file.exists: logs.append(rename_file(self.output_file))
if self.log_file.exists: logs.append(rename_file(self.log_file))
if logs:
self.history.info("\n".join(logs)) | Abinit has the very *bad* habit of changing the file extension by appending the characters in [A,B ..., Z]
to the output file, and this breaks a lot of code that relies of the use of a unique file extension.
Here we fix this issue by renaming run.abo to run.abo_[number] if the output file "run.abo" already
exists. A few lines of code in python, a lot of problems if you try to implement this trick in Fortran90. | Below is the the instruction that describes the task:
### Input:
Abinit has the very *bad* habit of changing the file extension by appending the characters in [A,B ..., Z]
to the output file, and this breaks a lot of code that relies of the use of a unique file extension.
Here we fix this issue by renaming run.abo to run.abo_[number] if the output file "run.abo" already
exists. A few lines of code in python, a lot of problems if you try to implement this trick in Fortran90.
### Response:
def setup(self):
"""
Abinit has the very *bad* habit of changing the file extension by appending the characters in [A,B ..., Z]
to the output file, and this breaks a lot of code that relies of the use of a unique file extension.
Here we fix this issue by renaming run.abo to run.abo_[number] if the output file "run.abo" already
exists. A few lines of code in python, a lot of problems if you try to implement this trick in Fortran90.
"""
def rename_file(afile):
"""Helper function to rename :class:`File` objects. Return string for logging purpose."""
# Find the index of the last file (if any).
# TODO: Maybe it's better to use run.abo --> run(1).abo
fnames = [f for f in os.listdir(self.workdir) if f.startswith(afile.basename)]
nums = [int(f) for f in [f.split("_")[-1] for f in fnames] if f.isdigit()]
last = max(nums) if nums else 0
new_path = afile.path + "_" + str(last+1)
os.rename(afile.path, new_path)
return "Will rename %s to %s" % (afile.path, new_path)
logs = []
if self.output_file.exists: logs.append(rename_file(self.output_file))
if self.log_file.exists: logs.append(rename_file(self.log_file))
if logs:
self.history.info("\n".join(logs)) |
def _product(k, v):
"""
Perform the product between two objects
even if they don't support iteration
"""
if not _can_iterate(k):
k = [k]
if not _can_iterate(v):
v = [v]
return list(product(k, v)) | Perform the product between two objects
even if they don't support iteration | Below is the the instruction that describes the task:
### Input:
Perform the product between two objects
even if they don't support iteration
### Response:
def _product(k, v):
"""
Perform the product between two objects
even if they don't support iteration
"""
if not _can_iterate(k):
k = [k]
if not _can_iterate(v):
v = [v]
return list(product(k, v)) |
def gen_repr(cls, template, *args, **kwargs):
"""Generates a string for :func:`repr`."""
buf = io.StringIO()
buf.write(u'<')
buf.write(cls.__module__.decode() if kwargs.pop('full', False) else u'etc')
buf.write(u'.')
buf.write(cls.__name__.decode())
if not kwargs.pop('dense', False):
buf.write(u' ')
buf.write(template.format(*args, **kwargs))
options = kwargs.pop('options', [])
for attr, value in options:
if value is not None:
buf.write(u' %s=%s' % (attr, value))
buf.write(u'>')
return buf.getvalue() | Generates a string for :func:`repr`. | Below is the the instruction that describes the task:
### Input:
Generates a string for :func:`repr`.
### Response:
def gen_repr(cls, template, *args, **kwargs):
"""Generates a string for :func:`repr`."""
buf = io.StringIO()
buf.write(u'<')
buf.write(cls.__module__.decode() if kwargs.pop('full', False) else u'etc')
buf.write(u'.')
buf.write(cls.__name__.decode())
if not kwargs.pop('dense', False):
buf.write(u' ')
buf.write(template.format(*args, **kwargs))
options = kwargs.pop('options', [])
for attr, value in options:
if value is not None:
buf.write(u' %s=%s' % (attr, value))
buf.write(u'>')
return buf.getvalue() |
def list_all_discount_promotions(cls, **kwargs):
"""List DiscountPromotions
Return a list of DiscountPromotions
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_discount_promotions(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[DiscountPromotion]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_discount_promotions_with_http_info(**kwargs)
else:
(data) = cls._list_all_discount_promotions_with_http_info(**kwargs)
return data | List DiscountPromotions
Return a list of DiscountPromotions
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_discount_promotions(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[DiscountPromotion]
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
List DiscountPromotions
Return a list of DiscountPromotions
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_discount_promotions(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[DiscountPromotion]
If the method is called asynchronously,
returns the request thread.
### Response:
def list_all_discount_promotions(cls, **kwargs):
"""List DiscountPromotions
Return a list of DiscountPromotions
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_discount_promotions(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[DiscountPromotion]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_discount_promotions_with_http_info(**kwargs)
else:
(data) = cls._list_all_discount_promotions_with_http_info(**kwargs)
return data |
def get_memory_banks_per_run(coreAssignment, cgroups):
"""Get an assignment of memory banks to runs that fits to the given coreAssignment,
i.e., no run is allowed to use memory that is not local (on the same NUMA node)
to one of its CPU cores."""
try:
# read list of available memory banks
allMems = set(cgroups.read_allowed_memory_banks())
result = []
for cores in coreAssignment:
mems = set()
for core in cores:
coreDir = '/sys/devices/system/cpu/cpu{0}/'.format(core)
mems.update(_get_memory_banks_listed_in_dir(coreDir))
allowedMems = sorted(mems.intersection(allMems))
logging.debug("Memory banks for cores %s are %s, of which we can use %s.", cores, list(mems), allowedMems)
result.append(allowedMems)
assert len(result) == len(coreAssignment)
if any(result) and os.path.isdir('/sys/devices/system/node/'):
return result
else:
# All runs get the empty list of memory regions
# because this system has no NUMA support
return None
except ValueError as e:
sys.exit("Could not read memory information from kernel: {0}".format(e)) | Get an assignment of memory banks to runs that fits to the given coreAssignment,
i.e., no run is allowed to use memory that is not local (on the same NUMA node)
to one of its CPU cores. | Below is the the instruction that describes the task:
### Input:
Get an assignment of memory banks to runs that fits to the given coreAssignment,
i.e., no run is allowed to use memory that is not local (on the same NUMA node)
to one of its CPU cores.
### Response:
def get_memory_banks_per_run(coreAssignment, cgroups):
"""Get an assignment of memory banks to runs that fits to the given coreAssignment,
i.e., no run is allowed to use memory that is not local (on the same NUMA node)
to one of its CPU cores."""
try:
# read list of available memory banks
allMems = set(cgroups.read_allowed_memory_banks())
result = []
for cores in coreAssignment:
mems = set()
for core in cores:
coreDir = '/sys/devices/system/cpu/cpu{0}/'.format(core)
mems.update(_get_memory_banks_listed_in_dir(coreDir))
allowedMems = sorted(mems.intersection(allMems))
logging.debug("Memory banks for cores %s are %s, of which we can use %s.", cores, list(mems), allowedMems)
result.append(allowedMems)
assert len(result) == len(coreAssignment)
if any(result) and os.path.isdir('/sys/devices/system/node/'):
return result
else:
# All runs get the empty list of memory regions
# because this system has no NUMA support
return None
except ValueError as e:
sys.exit("Could not read memory information from kernel: {0}".format(e)) |
def from_env(cls, prefix, kms_decrypt=False, aws_profile=None):
"""
Load database credential from env variable.
- host: ENV.{PREFIX}_HOST
- port: ENV.{PREFIX}_PORT
- database: ENV.{PREFIX}_DATABASE
- username: ENV.{PREFIX}_USERNAME
- password: ENV.{PREFIX}_PASSWORD
:param prefix: str
:param kms_decrypt: bool
:param aws_profile: str
"""
if len(prefix) < 1:
raise ValueError("prefix can't be empty")
if len(set(prefix).difference(set(string.ascii_uppercase + "_"))):
raise ValueError("prefix can only use [A-Z] and '_'!")
if not prefix.endswith("_"):
prefix = prefix + "_"
data = dict(
host=os.getenv(prefix + "HOST"),
port=os.getenv(prefix + "PORT"),
database=os.getenv(prefix + "DATABASE"),
username=os.getenv(prefix + "USERNAME"),
password=os.getenv(prefix + "PASSWORD"),
)
if kms_decrypt is True: # pragma: no cover
import boto3
from base64 import b64decode
if aws_profile is not None:
kms = boto3.client("kms")
else:
ses = boto3.Session(profile_name=aws_profile)
kms = ses.client("kms")
def decrypt(kms, text):
return kms.decrypt(
CiphertextBlob=b64decode(text.encode("utf-8"))
)["Plaintext"].decode("utf-8")
data = {
key: value if value is None else decrypt(kms, str(value))
for key, value in data.items()
}
return cls(**data) | Load database credential from env variable.
- host: ENV.{PREFIX}_HOST
- port: ENV.{PREFIX}_PORT
- database: ENV.{PREFIX}_DATABASE
- username: ENV.{PREFIX}_USERNAME
- password: ENV.{PREFIX}_PASSWORD
:param prefix: str
:param kms_decrypt: bool
:param aws_profile: str | Below is the the instruction that describes the task:
### Input:
Load database credential from env variable.
- host: ENV.{PREFIX}_HOST
- port: ENV.{PREFIX}_PORT
- database: ENV.{PREFIX}_DATABASE
- username: ENV.{PREFIX}_USERNAME
- password: ENV.{PREFIX}_PASSWORD
:param prefix: str
:param kms_decrypt: bool
:param aws_profile: str
### Response:
def from_env(cls, prefix, kms_decrypt=False, aws_profile=None):
"""
Load database credential from env variable.
- host: ENV.{PREFIX}_HOST
- port: ENV.{PREFIX}_PORT
- database: ENV.{PREFIX}_DATABASE
- username: ENV.{PREFIX}_USERNAME
- password: ENV.{PREFIX}_PASSWORD
:param prefix: str
:param kms_decrypt: bool
:param aws_profile: str
"""
if len(prefix) < 1:
raise ValueError("prefix can't be empty")
if len(set(prefix).difference(set(string.ascii_uppercase + "_"))):
raise ValueError("prefix can only use [A-Z] and '_'!")
if not prefix.endswith("_"):
prefix = prefix + "_"
data = dict(
host=os.getenv(prefix + "HOST"),
port=os.getenv(prefix + "PORT"),
database=os.getenv(prefix + "DATABASE"),
username=os.getenv(prefix + "USERNAME"),
password=os.getenv(prefix + "PASSWORD"),
)
if kms_decrypt is True: # pragma: no cover
import boto3
from base64 import b64decode
if aws_profile is not None:
kms = boto3.client("kms")
else:
ses = boto3.Session(profile_name=aws_profile)
kms = ses.client("kms")
def decrypt(kms, text):
return kms.decrypt(
CiphertextBlob=b64decode(text.encode("utf-8"))
)["Plaintext"].decode("utf-8")
data = {
key: value if value is None else decrypt(kms, str(value))
for key, value in data.items()
}
return cls(**data) |
def discover_yaml(bank=None, **meta):
"""Discovers the YAML format and registers it if available.
Install YAML support via PIP::
pip install PyYAML
:param bank: The format bank to register the format in
:param meta: Extra information associated with the format
"""
try:
import yaml
if bank is None:
bank = default_bank
bank.register('yaml', yaml.load, yaml.dump, **meta)
except ImportError:
pass | Discovers the YAML format and registers it if available.
Install YAML support via PIP::
pip install PyYAML
:param bank: The format bank to register the format in
:param meta: Extra information associated with the format | Below is the the instruction that describes the task:
### Input:
Discovers the YAML format and registers it if available.
Install YAML support via PIP::
pip install PyYAML
:param bank: The format bank to register the format in
:param meta: Extra information associated with the format
### Response:
def discover_yaml(bank=None, **meta):
"""Discovers the YAML format and registers it if available.
Install YAML support via PIP::
pip install PyYAML
:param bank: The format bank to register the format in
:param meta: Extra information associated with the format
"""
try:
import yaml
if bank is None:
bank = default_bank
bank.register('yaml', yaml.load, yaml.dump, **meta)
except ImportError:
pass |
def _parse_planar_geometry_surface(self, node):
"""
Parses a planar geometry surface
"""
nodes = []
for key in ["topLeft", "topRight", "bottomRight", "bottomLeft"]:
nodes.append(geo.Point(getattr(node, key)["lon"],
getattr(node, key)["lat"],
getattr(node, key)["depth"]))
top_left, top_right, bottom_right, bottom_left = tuple(nodes)
return geo.PlanarSurface.from_corner_points(
top_left, top_right, bottom_right, bottom_left) | Parses a planar geometry surface | Below is the the instruction that describes the task:
### Input:
Parses a planar geometry surface
### Response:
def _parse_planar_geometry_surface(self, node):
"""
Parses a planar geometry surface
"""
nodes = []
for key in ["topLeft", "topRight", "bottomRight", "bottomLeft"]:
nodes.append(geo.Point(getattr(node, key)["lon"],
getattr(node, key)["lat"],
getattr(node, key)["depth"]))
top_left, top_right, bottom_right, bottom_left = tuple(nodes)
return geo.PlanarSurface.from_corner_points(
top_left, top_right, bottom_right, bottom_left) |
def create(self, repo_user, repo_name, issue_number, body):
"""
PATCH /repos/:owner/:repo/issues/:number/comments
:param issue_number: The issue's (or pull request's) number
:param body: The body of this comment
"""
return self.api.makeRequest(
['repos', repo_user, repo_name,
'issues', issue_number, 'comments'],
method='POST',
post=dict(body=body)) | PATCH /repos/:owner/:repo/issues/:number/comments
:param issue_number: The issue's (or pull request's) number
:param body: The body of this comment | Below is the the instruction that describes the task:
### Input:
PATCH /repos/:owner/:repo/issues/:number/comments
:param issue_number: The issue's (or pull request's) number
:param body: The body of this comment
### Response:
def create(self, repo_user, repo_name, issue_number, body):
"""
PATCH /repos/:owner/:repo/issues/:number/comments
:param issue_number: The issue's (or pull request's) number
:param body: The body of this comment
"""
return self.api.makeRequest(
['repos', repo_user, repo_name,
'issues', issue_number, 'comments'],
method='POST',
post=dict(body=body)) |
def cancel(self,order_id):
''' cancel the specified order
:param order_id: order_id to be canceled
'''
url= 'https://coincheck.com/api/exchange/orders/' + order_id
headers = make_header(url,access_key=self.access_key,secret_key=self.secret_key)
r = requests.delete(url,headers=headers)
return json.loads(r.text) | cancel the specified order
:param order_id: order_id to be canceled | Below is the the instruction that describes the task:
### Input:
cancel the specified order
:param order_id: order_id to be canceled
### Response:
def cancel(self,order_id):
''' cancel the specified order
:param order_id: order_id to be canceled
'''
url= 'https://coincheck.com/api/exchange/orders/' + order_id
headers = make_header(url,access_key=self.access_key,secret_key=self.secret_key)
r = requests.delete(url,headers=headers)
return json.loads(r.text) |
def delete(self, resource_group, name):
"""
Delete a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
"""
self.connection.container_groups.delete(resource_group, name) | Delete a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str | Below is the the instruction that describes the task:
### Input:
Delete a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
### Response:
def delete(self, resource_group, name):
"""
Delete a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
"""
self.connection.container_groups.delete(resource_group, name) |
def extract_filestem(data):
"""Extract filestem from Entrez eSummary data.
Function expects esummary['DocumentSummarySet']['DocumentSummary'][0]
Some illegal characters may occur in AssemblyName - for these, a more
robust regex replace/escape may be required. Sadly, NCBI don't just
use standard percent escapes, but instead replace certain
characters with underscores: white space, slash, comma, hash, brackets.
"""
escapes = re.compile(r"[\s/,#\(\)]")
escname = re.sub(escapes, '_', data['AssemblyName'])
return '_'.join([data['AssemblyAccession'], escname]) | Extract filestem from Entrez eSummary data.
Function expects esummary['DocumentSummarySet']['DocumentSummary'][0]
Some illegal characters may occur in AssemblyName - for these, a more
robust regex replace/escape may be required. Sadly, NCBI don't just
use standard percent escapes, but instead replace certain
characters with underscores: white space, slash, comma, hash, brackets. | Below is the the instruction that describes the task:
### Input:
Extract filestem from Entrez eSummary data.
Function expects esummary['DocumentSummarySet']['DocumentSummary'][0]
Some illegal characters may occur in AssemblyName - for these, a more
robust regex replace/escape may be required. Sadly, NCBI don't just
use standard percent escapes, but instead replace certain
characters with underscores: white space, slash, comma, hash, brackets.
### Response:
def extract_filestem(data):
"""Extract filestem from Entrez eSummary data.
Function expects esummary['DocumentSummarySet']['DocumentSummary'][0]
Some illegal characters may occur in AssemblyName - for these, a more
robust regex replace/escape may be required. Sadly, NCBI don't just
use standard percent escapes, but instead replace certain
characters with underscores: white space, slash, comma, hash, brackets.
"""
escapes = re.compile(r"[\s/,#\(\)]")
escname = re.sub(escapes, '_', data['AssemblyName'])
return '_'.join([data['AssemblyAccession'], escname]) |
def _date_val(self, dt: datetime) -> None:
"""
Add a date value
:param dt: datetime to add
"""
self._tval_char = dt.strftime('%Y-%m-%d %H:%M')
self._nval_num = (dt.year * 10000) + (dt.month * 100) + dt.day + \
(((dt.hour / 100.0) + (dt.minute / 10000.0)) if isinstance(dt, datetime) else 0) | Add a date value
:param dt: datetime to add | Below is the the instruction that describes the task:
### Input:
Add a date value
:param dt: datetime to add
### Response:
def _date_val(self, dt: datetime) -> None:
"""
Add a date value
:param dt: datetime to add
"""
self._tval_char = dt.strftime('%Y-%m-%d %H:%M')
self._nval_num = (dt.year * 10000) + (dt.month * 100) + dt.day + \
(((dt.hour / 100.0) + (dt.minute / 10000.0)) if isinstance(dt, datetime) else 0) |
def num(value):
"""Convert a value from one of several bases to an int."""
if re_hex_num.match(value):
return int(value, base=16)
else:
return int(value) | Convert a value from one of several bases to an int. | Below is the the instruction that describes the task:
### Input:
Convert a value from one of several bases to an int.
### Response:
def num(value):
"""Convert a value from one of several bases to an int."""
if re_hex_num.match(value):
return int(value, base=16)
else:
return int(value) |
def remove_response_property(xml_root):
"""Removes response properties if exist."""
if xml_root.tag == "testsuites":
properties = xml_root.find("properties")
resp_properties = []
for prop in properties:
prop_name = prop.get("name", "")
if "polarion-response-" in prop_name:
resp_properties.append(prop)
for resp_property in resp_properties:
properties.remove(resp_property)
elif xml_root.tag in ("testcases", "requirements"):
resp_properties = xml_root.find("response-properties")
if resp_properties is not None:
xml_root.remove(resp_properties)
else:
raise Dump2PolarionException(_NOT_EXPECTED_FORMAT_MSG) | Removes response properties if exist. | Below is the the instruction that describes the task:
### Input:
Removes response properties if exist.
### Response:
def remove_response_property(xml_root):
"""Removes response properties if exist."""
if xml_root.tag == "testsuites":
properties = xml_root.find("properties")
resp_properties = []
for prop in properties:
prop_name = prop.get("name", "")
if "polarion-response-" in prop_name:
resp_properties.append(prop)
for resp_property in resp_properties:
properties.remove(resp_property)
elif xml_root.tag in ("testcases", "requirements"):
resp_properties = xml_root.find("response-properties")
if resp_properties is not None:
xml_root.remove(resp_properties)
else:
raise Dump2PolarionException(_NOT_EXPECTED_FORMAT_MSG) |
def _create_tautological_expression_for_location(query_metadata_table, location):
"""For a given location, create a BinaryComposition that always evaluates to 'true'."""
location_type = query_metadata_table.get_location_info(location).type
location_exists = BinaryComposition(
u'!=', ContextField(location, location_type), NullLiteral)
location_does_not_exist = BinaryComposition(
u'=', ContextField(location, location_type), NullLiteral)
return BinaryComposition(u'||', location_exists, location_does_not_exist) | For a given location, create a BinaryComposition that always evaluates to 'true'. | Below is the the instruction that describes the task:
### Input:
For a given location, create a BinaryComposition that always evaluates to 'true'.
### Response:
def _create_tautological_expression_for_location(query_metadata_table, location):
"""For a given location, create a BinaryComposition that always evaluates to 'true'."""
location_type = query_metadata_table.get_location_info(location).type
location_exists = BinaryComposition(
u'!=', ContextField(location, location_type), NullLiteral)
location_does_not_exist = BinaryComposition(
u'=', ContextField(location, location_type), NullLiteral)
return BinaryComposition(u'||', location_exists, location_does_not_exist) |
def _get_firewall_rules(firewall_rules):
'''
Construct a list of optional firewall rules from the cloud profile.
'''
ret = []
for key, value in six.iteritems(firewall_rules):
# Verify the required 'protocol' property is present in the cloud
# profile config
if 'protocol' not in firewall_rules[key].keys():
raise SaltCloudConfigError(
'The firewall rule \'{0}\' is missing \'protocol\''.format(key)
)
ret.append(FirewallRule(
name=key,
protocol=firewall_rules[key].get('protocol', None),
source_mac=firewall_rules[key].get('source_mac', None),
source_ip=firewall_rules[key].get('source_ip', None),
target_ip=firewall_rules[key].get('target_ip', None),
port_range_start=firewall_rules[key].get('port_range_start', None),
port_range_end=firewall_rules[key].get('port_range_end', None),
icmp_type=firewall_rules[key].get('icmp_type', None),
icmp_code=firewall_rules[key].get('icmp_code', None)
))
return ret | Construct a list of optional firewall rules from the cloud profile. | Below is the the instruction that describes the task:
### Input:
Construct a list of optional firewall rules from the cloud profile.
### Response:
def _get_firewall_rules(firewall_rules):
'''
Construct a list of optional firewall rules from the cloud profile.
'''
ret = []
for key, value in six.iteritems(firewall_rules):
# Verify the required 'protocol' property is present in the cloud
# profile config
if 'protocol' not in firewall_rules[key].keys():
raise SaltCloudConfigError(
'The firewall rule \'{0}\' is missing \'protocol\''.format(key)
)
ret.append(FirewallRule(
name=key,
protocol=firewall_rules[key].get('protocol', None),
source_mac=firewall_rules[key].get('source_mac', None),
source_ip=firewall_rules[key].get('source_ip', None),
target_ip=firewall_rules[key].get('target_ip', None),
port_range_start=firewall_rules[key].get('port_range_start', None),
port_range_end=firewall_rules[key].get('port_range_end', None),
icmp_type=firewall_rules[key].get('icmp_type', None),
icmp_code=firewall_rules[key].get('icmp_code', None)
))
return ret |
def _succeed(self, request_id, reply, duration):
"""Publish a CommandSucceededEvent."""
self.listeners.publish_command_success(
duration, reply, self.name,
request_id, self.sock_info.address, self.op_id) | Publish a CommandSucceededEvent. | Below is the the instruction that describes the task:
### Input:
Publish a CommandSucceededEvent.
### Response:
def _succeed(self, request_id, reply, duration):
"""Publish a CommandSucceededEvent."""
self.listeners.publish_command_success(
duration, reply, self.name,
request_id, self.sock_info.address, self.op_id) |
def _fix_example_namespace(self):
"""Attempts to resolve issues where our samples use
'http://example.com/' for our example namespace but python-stix uses
'http://example.com' by removing the former.
"""
example_prefix = 'example' # Example ns prefix
idgen_prefix = idgen.get_id_namespace_prefix()
# If the ID namespace alias doesn't match the example alias, return.
if idgen_prefix != example_prefix:
return
# If the example namespace prefix isn't in the parsed namespace
# prefixes, return.
if example_prefix not in self._input_namespaces:
return
self._input_namespaces[example_prefix] = idgen.EXAMPLE_NAMESPACE.name | Attempts to resolve issues where our samples use
'http://example.com/' for our example namespace but python-stix uses
'http://example.com' by removing the former. | Below is the the instruction that describes the task:
### Input:
Attempts to resolve issues where our samples use
'http://example.com/' for our example namespace but python-stix uses
'http://example.com' by removing the former.
### Response:
def _fix_example_namespace(self):
"""Attempts to resolve issues where our samples use
'http://example.com/' for our example namespace but python-stix uses
'http://example.com' by removing the former.
"""
example_prefix = 'example' # Example ns prefix
idgen_prefix = idgen.get_id_namespace_prefix()
# If the ID namespace alias doesn't match the example alias, return.
if idgen_prefix != example_prefix:
return
# If the example namespace prefix isn't in the parsed namespace
# prefixes, return.
if example_prefix not in self._input_namespaces:
return
self._input_namespaces[example_prefix] = idgen.EXAMPLE_NAMESPACE.name |
def GetHashCode(self):
"""uint32 identifier"""
slice_length = 4 if len(self.Data) >= 4 else len(self.Data)
return int.from_bytes(self.Data[:slice_length], 'little') | uint32 identifier | Below is the the instruction that describes the task:
### Input:
uint32 identifier
### Response:
def GetHashCode(self):
"""uint32 identifier"""
slice_length = 4 if len(self.Data) >= 4 else len(self.Data)
return int.from_bytes(self.Data[:slice_length], 'little') |
def spur(image, mask=None, iterations=1):
'''Remove spur pixels from an image
0 0 0 0 0 0
0 1 0 -> 0 0 0
0 0 1 0 0 ?
'''
global spur_table_1,spur_table_2
if mask is None:
masked_image = image
else:
masked_image = image.astype(bool).copy()
masked_image[~mask] = False
index_i, index_j, masked_image = prepare_for_index_lookup(masked_image,
False)
if iterations is None:
iterations = len(index_i)
for i in range(iterations):
for table in (spur_table_1, spur_table_2):
index_i, index_j = index_lookup(index_i, index_j,
masked_image, table, 1)
masked_image = extract_from_image_lookup(image, index_i, index_j)
if not mask is None:
masked_image[~mask] = image[~mask]
return masked_image | Remove spur pixels from an image
0 0 0 0 0 0
0 1 0 -> 0 0 0
0 0 1 0 0 ? | Below is the the instruction that describes the task:
### Input:
Remove spur pixels from an image
0 0 0 0 0 0
0 1 0 -> 0 0 0
0 0 1 0 0 ?
### Response:
def spur(image, mask=None, iterations=1):
'''Remove spur pixels from an image
0 0 0 0 0 0
0 1 0 -> 0 0 0
0 0 1 0 0 ?
'''
global spur_table_1,spur_table_2
if mask is None:
masked_image = image
else:
masked_image = image.astype(bool).copy()
masked_image[~mask] = False
index_i, index_j, masked_image = prepare_for_index_lookup(masked_image,
False)
if iterations is None:
iterations = len(index_i)
for i in range(iterations):
for table in (spur_table_1, spur_table_2):
index_i, index_j = index_lookup(index_i, index_j,
masked_image, table, 1)
masked_image = extract_from_image_lookup(image, index_i, index_j)
if not mask is None:
masked_image[~mask] = image[~mask]
return masked_image |
def _wrap_results(result, dtype, fill_value=None):
""" wrap our results if needed """
if is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
if fill_value is None:
# GH#24293
fill_value = iNaT
if not isinstance(result, np.ndarray):
tz = getattr(dtype, 'tz', None)
assert not isna(fill_value), "Expected non-null fill_value"
if result == fill_value:
result = np.nan
result = tslibs.Timestamp(result, tz=tz)
else:
result = result.view(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
if result == fill_value:
result = np.nan
# raise if we have a timedelta64[ns] which is too large
if np.fabs(result) > _int64_max:
raise ValueError("overflow in timedelta operation")
result = tslibs.Timedelta(result, unit='ns')
else:
result = result.astype('i8').view(dtype)
return result | wrap our results if needed | Below is the the instruction that describes the task:
### Input:
wrap our results if needed
### Response:
def _wrap_results(result, dtype, fill_value=None):
""" wrap our results if needed """
if is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
if fill_value is None:
# GH#24293
fill_value = iNaT
if not isinstance(result, np.ndarray):
tz = getattr(dtype, 'tz', None)
assert not isna(fill_value), "Expected non-null fill_value"
if result == fill_value:
result = np.nan
result = tslibs.Timestamp(result, tz=tz)
else:
result = result.view(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
if result == fill_value:
result = np.nan
# raise if we have a timedelta64[ns] which is too large
if np.fabs(result) > _int64_max:
raise ValueError("overflow in timedelta operation")
result = tslibs.Timedelta(result, unit='ns')
else:
result = result.astype('i8').view(dtype)
return result |
def set_velocities(self, velocities):
"""
:param velocities (au): list of list of atom velocities
:return:
"""
assert len(velocities) == len(self.mol)
self.params["velocity"] = velocities | :param velocities (au): list of list of atom velocities
:return: | Below is the the instruction that describes the task:
### Input:
:param velocities (au): list of list of atom velocities
:return:
### Response:
def set_velocities(self, velocities):
"""
:param velocities (au): list of list of atom velocities
:return:
"""
assert len(velocities) == len(self.mol)
self.params["velocity"] = velocities |
def process_tokens(self, tokens):
u"""
Iterate other tokens to find strings and ensure that they are prefixed.
:param tokens:
:return:
"""
for (tok_type, token, (start_row, _), _, _) in tokens:
if tok_type == tokenize.STRING:
self._check_string(token, start_row) | u"""
Iterate other tokens to find strings and ensure that they are prefixed.
:param tokens:
:return: | Below is the the instruction that describes the task:
### Input:
u"""
Iterate other tokens to find strings and ensure that they are prefixed.
:param tokens:
:return:
### Response:
def process_tokens(self, tokens):
u"""
Iterate other tokens to find strings and ensure that they are prefixed.
:param tokens:
:return:
"""
for (tok_type, token, (start_row, _), _, _) in tokens:
if tok_type == tokenize.STRING:
self._check_string(token, start_row) |
def get_summaries_log_dir(decode_hp, output_dir, dataset_split):
"""Get nested summaries_log_dir based on decode_hp."""
child_dir = decode_hp.summaries_log_dir
level_dir = "".join([str(level) for level in decode_hp.level_interp])
if decode_hp.channel_interp == "all":
rank_dir = "all"
else:
rank_dir = "rank_%d" % decode_hp.rank_interp
child_dir = "%s/%s_%s" % (child_dir, level_dir, rank_dir)
if dataset_split is not None:
child_dir += "_{}".format(dataset_split)
return os.path.join(output_dir, child_dir) | Get nested summaries_log_dir based on decode_hp. | Below is the the instruction that describes the task:
### Input:
Get nested summaries_log_dir based on decode_hp.
### Response:
def get_summaries_log_dir(decode_hp, output_dir, dataset_split):
"""Get nested summaries_log_dir based on decode_hp."""
child_dir = decode_hp.summaries_log_dir
level_dir = "".join([str(level) for level in decode_hp.level_interp])
if decode_hp.channel_interp == "all":
rank_dir = "all"
else:
rank_dir = "rank_%d" % decode_hp.rank_interp
child_dir = "%s/%s_%s" % (child_dir, level_dir, rank_dir)
if dataset_split is not None:
child_dir += "_{}".format(dataset_split)
return os.path.join(output_dir, child_dir) |
def connect_proxy(self, proxy_host='localhost', proxy_port=0, proxy_type=socks.HTTP,
host='localhost', port=0):
"""Connect to a host on a given port via proxy server
If the hostname ends with a colon (`:') followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host and proxy server are
specified during instantiation.
:param proxy_host: Hostname of proxy server
:type proxy_host: string
:param proxy_port: Port of proxy server, by default port for specified proxy type is used
:type proxy_port: int
:param proxy_type: Proxy type to use (see socks.PROXY_TYPES for details)
:type proxy_type: int
:param host: Hostname of SMTP server
:type host: string
:param port: Port of SMTP server, by default smtplib.SMTP_PORT is used
:type port: int
:return: Tuple of (code, msg)
:rtype: tuple
"""
if proxy_type not in socks.DEFAULT_PORTS.keys():
raise NotSupportedProxyType
(proxy_host, proxy_port) = self._parse_host(host=proxy_host, port=proxy_port)
if not proxy_port:
proxy_port = socks.DEFAULT_PORTS[proxy_type]
(host, port) = self._parse_host(host=host, port=port)
if self.debuglevel > 0:
self._print_debug('connect: via proxy', proxy_host, proxy_port)
s = socks.socksocket()
s.set_proxy(proxy_type=proxy_type, addr=proxy_host, port=proxy_port)
s.settimeout(self.timeout)
if self.source_address is not None:
s.bind(self.source_address)
s.connect((host, port))
# todo
# Send CRLF in order to get first response from destination server.
# Probably it's needed only for HTTP proxies. Further investigation required.
s.sendall(bCRLF)
self.sock = s
(code, msg) = self.getreply()
if self.debuglevel > 0:
self._print_debug('connect:', repr(msg))
return code, msg | Connect to a host on a given port via proxy server
If the hostname ends with a colon (`:') followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host and proxy server are
specified during instantiation.
:param proxy_host: Hostname of proxy server
:type proxy_host: string
:param proxy_port: Port of proxy server, by default port for specified proxy type is used
:type proxy_port: int
:param proxy_type: Proxy type to use (see socks.PROXY_TYPES for details)
:type proxy_type: int
:param host: Hostname of SMTP server
:type host: string
:param port: Port of SMTP server, by default smtplib.SMTP_PORT is used
:type port: int
:return: Tuple of (code, msg)
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Connect to a host on a given port via proxy server
If the hostname ends with a colon (`:') followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host and proxy server are
specified during instantiation.
:param proxy_host: Hostname of proxy server
:type proxy_host: string
:param proxy_port: Port of proxy server, by default port for specified proxy type is used
:type proxy_port: int
:param proxy_type: Proxy type to use (see socks.PROXY_TYPES for details)
:type proxy_type: int
:param host: Hostname of SMTP server
:type host: string
:param port: Port of SMTP server, by default smtplib.SMTP_PORT is used
:type port: int
:return: Tuple of (code, msg)
:rtype: tuple
### Response:
def connect_proxy(self, proxy_host='localhost', proxy_port=0, proxy_type=socks.HTTP,
host='localhost', port=0):
"""Connect to a host on a given port via proxy server
If the hostname ends with a colon (`:') followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host and proxy server are
specified during instantiation.
:param proxy_host: Hostname of proxy server
:type proxy_host: string
:param proxy_port: Port of proxy server, by default port for specified proxy type is used
:type proxy_port: int
:param proxy_type: Proxy type to use (see socks.PROXY_TYPES for details)
:type proxy_type: int
:param host: Hostname of SMTP server
:type host: string
:param port: Port of SMTP server, by default smtplib.SMTP_PORT is used
:type port: int
:return: Tuple of (code, msg)
:rtype: tuple
"""
if proxy_type not in socks.DEFAULT_PORTS.keys():
raise NotSupportedProxyType
(proxy_host, proxy_port) = self._parse_host(host=proxy_host, port=proxy_port)
if not proxy_port:
proxy_port = socks.DEFAULT_PORTS[proxy_type]
(host, port) = self._parse_host(host=host, port=port)
if self.debuglevel > 0:
self._print_debug('connect: via proxy', proxy_host, proxy_port)
s = socks.socksocket()
s.set_proxy(proxy_type=proxy_type, addr=proxy_host, port=proxy_port)
s.settimeout(self.timeout)
if self.source_address is not None:
s.bind(self.source_address)
s.connect((host, port))
# todo
# Send CRLF in order to get first response from destination server.
# Probably it's needed only for HTTP proxies. Further investigation required.
s.sendall(bCRLF)
self.sock = s
(code, msg) = self.getreply()
if self.debuglevel > 0:
self._print_debug('connect:', repr(msg))
return code, msg |
def delete(config, group, force):
"""Delete an LDAP group."""
if not force:
if not click.confirm(
'Confirm that you want to delete group {}'.format(group)):
sys.exit("Deletion of {} aborted".format(group))
client = Client()
client.prepare_connection()
group_api = API(client)
group_api.delete(group) | Delete an LDAP group. | Below is the the instruction that describes the task:
### Input:
Delete an LDAP group.
### Response:
def delete(config, group, force):
"""Delete an LDAP group."""
if not force:
if not click.confirm(
'Confirm that you want to delete group {}'.format(group)):
sys.exit("Deletion of {} aborted".format(group))
client = Client()
client.prepare_connection()
group_api = API(client)
group_api.delete(group) |
def to_pandas(self):
"""Return a pandas dataframe representation of the condensed tree.
Each row of the dataframe corresponds to an edge in the tree.
The columns of the dataframe are `parent`, `child`, `lambda_val`
and `child_size`.
The `parent` and `child` are the ids of the
parent and child nodes in the tree. Node ids less than the number
of points in the original dataset represent individual points, while
ids greater than the number of points are clusters.
The `lambda_val` value is the value (1/distance) at which the `child`
node leaves the cluster.
The `child_size` is the number of points in the `child` node.
"""
try:
from pandas import DataFrame, Series
except ImportError:
raise ImportError('You must have pandas installed to export pandas DataFrames')
result = DataFrame(self._raw_tree)
return result | Return a pandas dataframe representation of the condensed tree.
Each row of the dataframe corresponds to an edge in the tree.
The columns of the dataframe are `parent`, `child`, `lambda_val`
and `child_size`.
The `parent` and `child` are the ids of the
parent and child nodes in the tree. Node ids less than the number
of points in the original dataset represent individual points, while
ids greater than the number of points are clusters.
The `lambda_val` value is the value (1/distance) at which the `child`
node leaves the cluster.
The `child_size` is the number of points in the `child` node. | Below is the the instruction that describes the task:
### Input:
Return a pandas dataframe representation of the condensed tree.
Each row of the dataframe corresponds to an edge in the tree.
The columns of the dataframe are `parent`, `child`, `lambda_val`
and `child_size`.
The `parent` and `child` are the ids of the
parent and child nodes in the tree. Node ids less than the number
of points in the original dataset represent individual points, while
ids greater than the number of points are clusters.
The `lambda_val` value is the value (1/distance) at which the `child`
node leaves the cluster.
The `child_size` is the number of points in the `child` node.
### Response:
def to_pandas(self):
"""Return a pandas dataframe representation of the condensed tree.
Each row of the dataframe corresponds to an edge in the tree.
The columns of the dataframe are `parent`, `child`, `lambda_val`
and `child_size`.
The `parent` and `child` are the ids of the
parent and child nodes in the tree. Node ids less than the number
of points in the original dataset represent individual points, while
ids greater than the number of points are clusters.
The `lambda_val` value is the value (1/distance) at which the `child`
node leaves the cluster.
The `child_size` is the number of points in the `child` node.
"""
try:
from pandas import DataFrame, Series
except ImportError:
raise ImportError('You must have pandas installed to export pandas DataFrames')
result = DataFrame(self._raw_tree)
return result |
def fill_subparser(subparser):
"""Sets up a subparser to download audio of YouTube videos.
Adds the compulsory `--youtube-id` flag.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `youtube_audio` command.
"""
subparser.add_argument(
'--youtube-id', type=str, required=True,
help=("The YouTube ID of the video from which to extract audio, "
"usually an 11-character string.")
)
return download | Sets up a subparser to download audio of YouTube videos.
Adds the compulsory `--youtube-id` flag.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `youtube_audio` command. | Below is the the instruction that describes the task:
### Input:
Sets up a subparser to download audio of YouTube videos.
Adds the compulsory `--youtube-id` flag.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `youtube_audio` command.
### Response:
def fill_subparser(subparser):
"""Sets up a subparser to download audio of YouTube videos.
Adds the compulsory `--youtube-id` flag.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `youtube_audio` command.
"""
subparser.add_argument(
'--youtube-id', type=str, required=True,
help=("The YouTube ID of the video from which to extract audio, "
"usually an 11-character string.")
)
return download |
def generate_graphs(data, name, results_dir):
"""Generate all reports from original dataframe
:param dic data: dict containing raw and compiled results dataframes
:param str name: name for prefixing graphs output
:param str results_dir: results output directory
"""
graphs.resp_graph_raw(data['raw'], name + '_response_times.svg', results_dir)
graphs.resp_graph(data['compiled'], name + '_response_times_intervals.svg', results_dir)
graphs.tp_graph(data['compiled'], name + '_throughput.svg', results_dir) | Generate all reports from original dataframe
:param dic data: dict containing raw and compiled results dataframes
:param str name: name for prefixing graphs output
:param str results_dir: results output directory | Below is the the instruction that describes the task:
### Input:
Generate all reports from original dataframe
:param dic data: dict containing raw and compiled results dataframes
:param str name: name for prefixing graphs output
:param str results_dir: results output directory
### Response:
def generate_graphs(data, name, results_dir):
"""Generate all reports from original dataframe
:param dic data: dict containing raw and compiled results dataframes
:param str name: name for prefixing graphs output
:param str results_dir: results output directory
"""
graphs.resp_graph_raw(data['raw'], name + '_response_times.svg', results_dir)
graphs.resp_graph(data['compiled'], name + '_response_times_intervals.svg', results_dir)
graphs.tp_graph(data['compiled'], name + '_throughput.svg', results_dir) |
def search(self, term: str, case_sensitive: bool = False) -> 'PrettyDir':
"""Searches for names that match some pattern.
Args:
term: String used to match names. A name is returned if it matches
the whole search term.
case_sensitive: Boolean to match case or not, default is False
(case insensitive).
Return:
A PrettyDir object with matched names.
"""
if case_sensitive:
return PrettyDir(
self.obj, [pattr for pattr in self.pattrs if term in pattr.name]
)
else:
term = term.lower()
return PrettyDir(
self.obj, [pattr for pattr in self.pattrs if term in pattr.name.lower()]
) | Searches for names that match some pattern.
Args:
term: String used to match names. A name is returned if it matches
the whole search term.
case_sensitive: Boolean to match case or not, default is False
(case insensitive).
Return:
A PrettyDir object with matched names. | Below is the the instruction that describes the task:
### Input:
Searches for names that match some pattern.
Args:
term: String used to match names. A name is returned if it matches
the whole search term.
case_sensitive: Boolean to match case or not, default is False
(case insensitive).
Return:
A PrettyDir object with matched names.
### Response:
def search(self, term: str, case_sensitive: bool = False) -> 'PrettyDir':
"""Searches for names that match some pattern.
Args:
term: String used to match names. A name is returned if it matches
the whole search term.
case_sensitive: Boolean to match case or not, default is False
(case insensitive).
Return:
A PrettyDir object with matched names.
"""
if case_sensitive:
return PrettyDir(
self.obj, [pattr for pattr in self.pattrs if term in pattr.name]
)
else:
term = term.lower()
return PrettyDir(
self.obj, [pattr for pattr in self.pattrs if term in pattr.name.lower()]
) |
def _merge_patches(self):
"""Injects object patches into their original object definitions."""
for patched_item, patched_namespace in self._patch_data_by_canonical_name.values():
patched_item_base_name = self._get_base_name(patched_item.name, patched_namespace.name)
if patched_item_base_name not in self._item_by_canonical_name:
raise InvalidSpec('Patch {} must correspond to a pre-existing data_type.'.format(
quote(patched_item.name)), patched_item.lineno, patched_item.path)
existing_item = self._item_by_canonical_name[patched_item_base_name]
self._check_patch_type_mismatch(patched_item, existing_item)
if isinstance(patched_item, (AstStructPatch, AstUnionPatch)):
self._check_field_names_unique(existing_item, patched_item)
existing_item.fields += patched_item.fields
self._inject_patched_examples(existing_item, patched_item)
else:
raise AssertionError('Unknown Patch Object Type {}'.format(
patched_item.__class__.__name__)) | Injects object patches into their original object definitions. | Below is the the instruction that describes the task:
### Input:
Injects object patches into their original object definitions.
### Response:
def _merge_patches(self):
"""Injects object patches into their original object definitions."""
for patched_item, patched_namespace in self._patch_data_by_canonical_name.values():
patched_item_base_name = self._get_base_name(patched_item.name, patched_namespace.name)
if patched_item_base_name not in self._item_by_canonical_name:
raise InvalidSpec('Patch {} must correspond to a pre-existing data_type.'.format(
quote(patched_item.name)), patched_item.lineno, patched_item.path)
existing_item = self._item_by_canonical_name[patched_item_base_name]
self._check_patch_type_mismatch(patched_item, existing_item)
if isinstance(patched_item, (AstStructPatch, AstUnionPatch)):
self._check_field_names_unique(existing_item, patched_item)
existing_item.fields += patched_item.fields
self._inject_patched_examples(existing_item, patched_item)
else:
raise AssertionError('Unknown Patch Object Type {}'.format(
patched_item.__class__.__name__)) |
def words_amount_needed(entropybits: Union[int, float],
entropy_w: Union[int, float],
entropy_n: Union[int, float],
amount_n: int) -> int:
"""Calculate words needed for a passphrase based on entropy."""
# Thanks to @julianor for this tip to calculate default amount of
# entropy: minbitlen/log2(len(wordlist)).
# I set the minimum entropy bits and calculate the amount of words
# needed, cosidering the entropy of the wordlist.
# Then: entropy_w * amount_w + entropy_n * amount_n >= ENTROPY_BITS_MIN
if not isinstance(entropybits, (int, float)):
raise TypeError('entropybits can only be int or float')
if not isinstance(entropy_w, (int, float)):
raise TypeError('entropy_w can only be int or float')
if not isinstance(entropy_n, (int, float)):
raise TypeError('entropy_n can only be int or float')
if not isinstance(amount_n, int):
raise TypeError('amount_n can only be int')
if entropybits < 0:
raise ValueError('entropybits should be greater than 0')
if entropy_w <= 0:
raise ValueError('entropy_w should be greater than 0')
if entropy_n < 0:
raise ValueError('entropy_n should be greater than 0')
if amount_n < 0:
raise ValueError('amount_n should be greater than 0')
amount_w = (entropybits - entropy_n * amount_n) / entropy_w
if amount_w > -1.0:
return ceil(fabs(amount_w))
return 0 | Calculate words needed for a passphrase based on entropy. | Below is the the instruction that describes the task:
### Input:
Calculate words needed for a passphrase based on entropy.
### Response:
def words_amount_needed(entropybits: Union[int, float],
entropy_w: Union[int, float],
entropy_n: Union[int, float],
amount_n: int) -> int:
"""Calculate words needed for a passphrase based on entropy."""
# Thanks to @julianor for this tip to calculate default amount of
# entropy: minbitlen/log2(len(wordlist)).
# I set the minimum entropy bits and calculate the amount of words
# needed, cosidering the entropy of the wordlist.
# Then: entropy_w * amount_w + entropy_n * amount_n >= ENTROPY_BITS_MIN
if not isinstance(entropybits, (int, float)):
raise TypeError('entropybits can only be int or float')
if not isinstance(entropy_w, (int, float)):
raise TypeError('entropy_w can only be int or float')
if not isinstance(entropy_n, (int, float)):
raise TypeError('entropy_n can only be int or float')
if not isinstance(amount_n, int):
raise TypeError('amount_n can only be int')
if entropybits < 0:
raise ValueError('entropybits should be greater than 0')
if entropy_w <= 0:
raise ValueError('entropy_w should be greater than 0')
if entropy_n < 0:
raise ValueError('entropy_n should be greater than 0')
if amount_n < 0:
raise ValueError('amount_n should be greater than 0')
amount_w = (entropybits - entropy_n * amount_n) / entropy_w
if amount_w > -1.0:
return ceil(fabs(amount_w))
return 0 |
def validate_meta_object(meta: Dict[str, Any], allow_extra_meta_fields: bool) -> None:
"""
Validates that every key is one of `META_FIELDS` and has a value of the expected type.
"""
for key, value in meta.items():
if key in META_FIELDS:
if type(value) is not META_FIELDS[key]:
raise ValidationError(
f"Values for {key} are expected to have the type {META_FIELDS[key]}, "
f"instead got {type(value)}."
)
elif allow_extra_meta_fields:
if key[:2] != "x-":
raise ValidationError(
"Undefined meta fields need to begin with 'x-', "
f"{key} is not a valid undefined meta field."
)
else:
raise ValidationError(
f"{key} is not a permitted meta field. To allow undefined fields, "
"set `allow_extra_meta_fields` to True."
) | Validates that every key is one of `META_FIELDS` and has a value of the expected type. | Below is the the instruction that describes the task:
### Input:
Validates that every key is one of `META_FIELDS` and has a value of the expected type.
### Response:
def validate_meta_object(meta: Dict[str, Any], allow_extra_meta_fields: bool) -> None:
"""
Validates that every key is one of `META_FIELDS` and has a value of the expected type.
"""
for key, value in meta.items():
if key in META_FIELDS:
if type(value) is not META_FIELDS[key]:
raise ValidationError(
f"Values for {key} are expected to have the type {META_FIELDS[key]}, "
f"instead got {type(value)}."
)
elif allow_extra_meta_fields:
if key[:2] != "x-":
raise ValidationError(
"Undefined meta fields need to begin with 'x-', "
f"{key} is not a valid undefined meta field."
)
else:
raise ValidationError(
f"{key} is not a permitted meta field. To allow undefined fields, "
"set `allow_extra_meta_fields` to True."
) |
def return_daily_messages_count(self, sender):
""" Returns the number of messages sent in the last 24 hours so we can ensure the user does not exceed his messaging limits """
h24 = now() - timedelta(days=1)
return Message.objects.filter(sender=sender, sent_at__gte=h24).count() | Returns the number of messages sent in the last 24 hours so we can ensure the user does not exceed his messaging limits | Below is the the instruction that describes the task:
### Input:
Returns the number of messages sent in the last 24 hours so we can ensure the user does not exceed his messaging limits
### Response:
def return_daily_messages_count(self, sender):
""" Returns the number of messages sent in the last 24 hours so we can ensure the user does not exceed his messaging limits """
h24 = now() - timedelta(days=1)
return Message.objects.filter(sender=sender, sent_at__gte=h24).count() |
def get_fills(self, order_id=None, symbol=None, side=None, order_type=None,
start=None, end=None, page=None, limit=None):
"""Get a list of recent fills.
https://docs.kucoin.com/#list-fills
:param order_id: (optional) generated order id
:type order_id: string
:param symbol: (optional) Name of symbol e.g. KCS-BTC
:type symbol: string
:param side: (optional) buy or sell
:type side: string
:param order_type: (optional) limit, market, limit_stop or market_stop
:type order_type: string
:param start: Start time as unix timestamp (optional)
:type start: string
:param end: End time as unix timestamp (optional)
:type end: string
:param page: optional - Page to fetch
:type page: int
:param limit: optional - Number of orders
:type limit: int
.. code:: python
fills = client.get_fills()
:returns: ApiResponse
.. code:: python
{
"currentPage":1,
"pageSize":1,
"totalNum":251915,
"totalPage":251915,
"items":[
{
"symbol":"BTC-USDT",
"tradeId":"5c35c02709e4f67d5266954e",
"orderId":"5c35c02703aa673ceec2a168",
"counterOrderId":"5c1ab46003aa676e487fa8e3",
"side":"buy",
"liquidity":"taker",
"forceTaker":true,
"price":"0.083",
"size":"0.8424304",
"funds":"0.0699217232",
"fee":"0",
"feeRate":"0",
"feeCurrency":"USDT",
"stop":"",
"type":"limit",
"createdAt":1547026472000
}
]
}
:raises: KucoinResponseException, KucoinAPIException
"""
data = {}
if order_id:
data['orderId'] = order_id
if symbol:
data['symbol'] = symbol
if side:
data['side'] = side
if order_type:
data['type'] = order_type
if start:
data['startAt'] = start
if end:
data['endAt'] = end
if page:
data['page'] = page
if limit:
data['pageSize'] = limit
return self._get('fills', True, data=data) | Get a list of recent fills.
https://docs.kucoin.com/#list-fills
:param order_id: (optional) generated order id
:type order_id: string
:param symbol: (optional) Name of symbol e.g. KCS-BTC
:type symbol: string
:param side: (optional) buy or sell
:type side: string
:param order_type: (optional) limit, market, limit_stop or market_stop
:type order_type: string
:param start: Start time as unix timestamp (optional)
:type start: string
:param end: End time as unix timestamp (optional)
:type end: string
:param page: optional - Page to fetch
:type page: int
:param limit: optional - Number of orders
:type limit: int
.. code:: python
fills = client.get_fills()
:returns: ApiResponse
.. code:: python
{
"currentPage":1,
"pageSize":1,
"totalNum":251915,
"totalPage":251915,
"items":[
{
"symbol":"BTC-USDT",
"tradeId":"5c35c02709e4f67d5266954e",
"orderId":"5c35c02703aa673ceec2a168",
"counterOrderId":"5c1ab46003aa676e487fa8e3",
"side":"buy",
"liquidity":"taker",
"forceTaker":true,
"price":"0.083",
"size":"0.8424304",
"funds":"0.0699217232",
"fee":"0",
"feeRate":"0",
"feeCurrency":"USDT",
"stop":"",
"type":"limit",
"createdAt":1547026472000
}
]
}
:raises: KucoinResponseException, KucoinAPIException | Below is the the instruction that describes the task:
### Input:
Get a list of recent fills.
https://docs.kucoin.com/#list-fills
:param order_id: (optional) generated order id
:type order_id: string
:param symbol: (optional) Name of symbol e.g. KCS-BTC
:type symbol: string
:param side: (optional) buy or sell
:type side: string
:param order_type: (optional) limit, market, limit_stop or market_stop
:type order_type: string
:param start: Start time as unix timestamp (optional)
:type start: string
:param end: End time as unix timestamp (optional)
:type end: string
:param page: optional - Page to fetch
:type page: int
:param limit: optional - Number of orders
:type limit: int
.. code:: python
fills = client.get_fills()
:returns: ApiResponse
.. code:: python
{
"currentPage":1,
"pageSize":1,
"totalNum":251915,
"totalPage":251915,
"items":[
{
"symbol":"BTC-USDT",
"tradeId":"5c35c02709e4f67d5266954e",
"orderId":"5c35c02703aa673ceec2a168",
"counterOrderId":"5c1ab46003aa676e487fa8e3",
"side":"buy",
"liquidity":"taker",
"forceTaker":true,
"price":"0.083",
"size":"0.8424304",
"funds":"0.0699217232",
"fee":"0",
"feeRate":"0",
"feeCurrency":"USDT",
"stop":"",
"type":"limit",
"createdAt":1547026472000
}
]
}
:raises: KucoinResponseException, KucoinAPIException
### Response:
def get_fills(self, order_id=None, symbol=None, side=None, order_type=None,
start=None, end=None, page=None, limit=None):
"""Get a list of recent fills.
https://docs.kucoin.com/#list-fills
:param order_id: (optional) generated order id
:type order_id: string
:param symbol: (optional) Name of symbol e.g. KCS-BTC
:type symbol: string
:param side: (optional) buy or sell
:type side: string
:param order_type: (optional) limit, market, limit_stop or market_stop
:type order_type: string
:param start: Start time as unix timestamp (optional)
:type start: string
:param end: End time as unix timestamp (optional)
:type end: string
:param page: optional - Page to fetch
:type page: int
:param limit: optional - Number of orders
:type limit: int
.. code:: python
fills = client.get_fills()
:returns: ApiResponse
.. code:: python
{
"currentPage":1,
"pageSize":1,
"totalNum":251915,
"totalPage":251915,
"items":[
{
"symbol":"BTC-USDT",
"tradeId":"5c35c02709e4f67d5266954e",
"orderId":"5c35c02703aa673ceec2a168",
"counterOrderId":"5c1ab46003aa676e487fa8e3",
"side":"buy",
"liquidity":"taker",
"forceTaker":true,
"price":"0.083",
"size":"0.8424304",
"funds":"0.0699217232",
"fee":"0",
"feeRate":"0",
"feeCurrency":"USDT",
"stop":"",
"type":"limit",
"createdAt":1547026472000
}
]
}
:raises: KucoinResponseException, KucoinAPIException
"""
data = {}
if order_id:
data['orderId'] = order_id
if symbol:
data['symbol'] = symbol
if side:
data['side'] = side
if order_type:
data['type'] = order_type
if start:
data['startAt'] = start
if end:
data['endAt'] = end
if page:
data['page'] = page
if limit:
data['pageSize'] = limit
return self._get('fills', True, data=data) |
def healpix_to_lonlat(healpix_index, nside, dx=None, dy=None, order='ring'):
"""
Convert HEALPix indices (optionally with offsets) to longitudes/latitudes.
If no offsets (``dx`` and ``dy``) are provided, the coordinates will default
to those at the center of the HEALPix pixels.
Parameters
----------
healpix_index : int or `~numpy.ndarray`
HEALPix indices (as a scalar or array)
nside : int or `~numpy.ndarray`
Number of pixels along the side of each of the 12 top-level HEALPix tiles
dx, dy : float or `~numpy.ndarray`, optional
Offsets inside the HEALPix pixel, which must be in the range [0:1],
where 0.5 is the center of the HEALPix pixels (as scalars or arrays)
order : { 'nested' | 'ring' }, optional
Order of HEALPix pixels
Returns
-------
lon : :class:`~astropy.coordinates.Longitude`
The longitude values
lat : :class:`~astropy.coordinates.Latitude`
The latitude values
"""
_validate_nside(nside)
if _validate_order(order) == 'ring':
func = _core.healpix_ring_to_lonlat
else: # _validate_order(order) == 'nested'
func = _core.healpix_nested_to_lonlat
if dx is None:
dx = 0.5
else:
_validate_offset('x', dx)
if dy is None:
dy = 0.5
else:
_validate_offset('y', dy)
nside = np.asarray(nside, dtype=np.intc)
lon, lat = func(healpix_index, nside, dx, dy)
lon = Longitude(lon, unit=u.rad, copy=False)
lat = Latitude(lat, unit=u.rad, copy=False)
return lon, lat | Convert HEALPix indices (optionally with offsets) to longitudes/latitudes.
If no offsets (``dx`` and ``dy``) are provided, the coordinates will default
to those at the center of the HEALPix pixels.
Parameters
----------
healpix_index : int or `~numpy.ndarray`
HEALPix indices (as a scalar or array)
nside : int or `~numpy.ndarray`
Number of pixels along the side of each of the 12 top-level HEALPix tiles
dx, dy : float or `~numpy.ndarray`, optional
Offsets inside the HEALPix pixel, which must be in the range [0:1],
where 0.5 is the center of the HEALPix pixels (as scalars or arrays)
order : { 'nested' | 'ring' }, optional
Order of HEALPix pixels
Returns
-------
lon : :class:`~astropy.coordinates.Longitude`
The longitude values
lat : :class:`~astropy.coordinates.Latitude`
The latitude values | Below is the the instruction that describes the task:
### Input:
Convert HEALPix indices (optionally with offsets) to longitudes/latitudes.
If no offsets (``dx`` and ``dy``) are provided, the coordinates will default
to those at the center of the HEALPix pixels.
Parameters
----------
healpix_index : int or `~numpy.ndarray`
HEALPix indices (as a scalar or array)
nside : int or `~numpy.ndarray`
Number of pixels along the side of each of the 12 top-level HEALPix tiles
dx, dy : float or `~numpy.ndarray`, optional
Offsets inside the HEALPix pixel, which must be in the range [0:1],
where 0.5 is the center of the HEALPix pixels (as scalars or arrays)
order : { 'nested' | 'ring' }, optional
Order of HEALPix pixels
Returns
-------
lon : :class:`~astropy.coordinates.Longitude`
The longitude values
lat : :class:`~astropy.coordinates.Latitude`
The latitude values
### Response:
def healpix_to_lonlat(healpix_index, nside, dx=None, dy=None, order='ring'):
"""
Convert HEALPix indices (optionally with offsets) to longitudes/latitudes.
If no offsets (``dx`` and ``dy``) are provided, the coordinates will default
to those at the center of the HEALPix pixels.
Parameters
----------
healpix_index : int or `~numpy.ndarray`
HEALPix indices (as a scalar or array)
nside : int or `~numpy.ndarray`
Number of pixels along the side of each of the 12 top-level HEALPix tiles
dx, dy : float or `~numpy.ndarray`, optional
Offsets inside the HEALPix pixel, which must be in the range [0:1],
where 0.5 is the center of the HEALPix pixels (as scalars or arrays)
order : { 'nested' | 'ring' }, optional
Order of HEALPix pixels
Returns
-------
lon : :class:`~astropy.coordinates.Longitude`
The longitude values
lat : :class:`~astropy.coordinates.Latitude`
The latitude values
"""
_validate_nside(nside)
if _validate_order(order) == 'ring':
func = _core.healpix_ring_to_lonlat
else: # _validate_order(order) == 'nested'
func = _core.healpix_nested_to_lonlat
if dx is None:
dx = 0.5
else:
_validate_offset('x', dx)
if dy is None:
dy = 0.5
else:
_validate_offset('y', dy)
nside = np.asarray(nside, dtype=np.intc)
lon, lat = func(healpix_index, nside, dx, dy)
lon = Longitude(lon, unit=u.rad, copy=False)
lat = Latitude(lat, unit=u.rad, copy=False)
return lon, lat |
def pack(self, value=None):
"""Pack the message into a binary data.
One of the basic operations on a Message is the pack operation. During
the packing process, we convert all message attributes to binary
format.
Since that this is usually used before sending the message to a switch,
here we also call :meth:`update_header_length`.
.. seealso:: This method call its parent's :meth:`GenericStruct.pack`
after :meth:`update_header_length`.
Returns:
bytes: A binary data thats represents the Message.
Raises:
Exception: If there are validation errors.
"""
if value is None:
self.update_header_length()
return super().pack()
elif isinstance(value, type(self)):
return value.pack()
else:
msg = "{} is not an instance of {}".format(value,
type(self).__name__)
raise PackException(msg) | Pack the message into a binary data.
One of the basic operations on a Message is the pack operation. During
the packing process, we convert all message attributes to binary
format.
Since that this is usually used before sending the message to a switch,
here we also call :meth:`update_header_length`.
.. seealso:: This method call its parent's :meth:`GenericStruct.pack`
after :meth:`update_header_length`.
Returns:
bytes: A binary data thats represents the Message.
Raises:
Exception: If there are validation errors. | Below is the the instruction that describes the task:
### Input:
Pack the message into a binary data.
One of the basic operations on a Message is the pack operation. During
the packing process, we convert all message attributes to binary
format.
Since that this is usually used before sending the message to a switch,
here we also call :meth:`update_header_length`.
.. seealso:: This method call its parent's :meth:`GenericStruct.pack`
after :meth:`update_header_length`.
Returns:
bytes: A binary data thats represents the Message.
Raises:
Exception: If there are validation errors.
### Response:
def pack(self, value=None):
"""Pack the message into a binary data.
One of the basic operations on a Message is the pack operation. During
the packing process, we convert all message attributes to binary
format.
Since that this is usually used before sending the message to a switch,
here we also call :meth:`update_header_length`.
.. seealso:: This method call its parent's :meth:`GenericStruct.pack`
after :meth:`update_header_length`.
Returns:
bytes: A binary data thats represents the Message.
Raises:
Exception: If there are validation errors.
"""
if value is None:
self.update_header_length()
return super().pack()
elif isinstance(value, type(self)):
return value.pack()
else:
msg = "{} is not an instance of {}".format(value,
type(self).__name__)
raise PackException(msg) |
def set_tlsext_use_srtp(self, profiles):
"""
Enable support for negotiating SRTP keying material.
:param bytes profiles: A colon delimited list of protection profile
names, like ``b'SRTP_AES128_CM_SHA1_80:SRTP_AES128_CM_SHA1_32'``.
:return: None
"""
if not isinstance(profiles, bytes):
raise TypeError("profiles must be a byte string.")
_openssl_assert(
_lib.SSL_CTX_set_tlsext_use_srtp(self._context, profiles) == 0
) | Enable support for negotiating SRTP keying material.
:param bytes profiles: A colon delimited list of protection profile
names, like ``b'SRTP_AES128_CM_SHA1_80:SRTP_AES128_CM_SHA1_32'``.
:return: None | Below is the the instruction that describes the task:
### Input:
Enable support for negotiating SRTP keying material.
:param bytes profiles: A colon delimited list of protection profile
names, like ``b'SRTP_AES128_CM_SHA1_80:SRTP_AES128_CM_SHA1_32'``.
:return: None
### Response:
def set_tlsext_use_srtp(self, profiles):
"""
Enable support for negotiating SRTP keying material.
:param bytes profiles: A colon delimited list of protection profile
names, like ``b'SRTP_AES128_CM_SHA1_80:SRTP_AES128_CM_SHA1_32'``.
:return: None
"""
if not isinstance(profiles, bytes):
raise TypeError("profiles must be a byte string.")
_openssl_assert(
_lib.SSL_CTX_set_tlsext_use_srtp(self._context, profiles) == 0
) |
def get(self, request):
"""View for HTTP GET method.
Returns template and context from generate_page_title and
generate_sections to populate template.
"""
sections = self.generate_sections()
if self.paginated:
p = Paginator(sections, 25)
page = request.GET.get('page')
try:
sections = p.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
sections = p.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), return last page
# of results.
sections = p.page(p.num_pages)
pageUpper = int(p.num_pages) / 2
try:
pageLower = int(page) / 2
except TypeError:
pageLower = -999
else:
pageUpper = None
pageLower = None
context = {
'sections': sections,
'page_title': self.generate_page_title(),
'browse_type': self.browse_type,
'pageUpper': pageUpper,
'pageLower': pageLower
}
return render(
request,
self.template_path,
context
) | View for HTTP GET method.
Returns template and context from generate_page_title and
generate_sections to populate template. | Below is the the instruction that describes the task:
### Input:
View for HTTP GET method.
Returns template and context from generate_page_title and
generate_sections to populate template.
### Response:
def get(self, request):
"""View for HTTP GET method.
Returns template and context from generate_page_title and
generate_sections to populate template.
"""
sections = self.generate_sections()
if self.paginated:
p = Paginator(sections, 25)
page = request.GET.get('page')
try:
sections = p.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
sections = p.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), return last page
# of results.
sections = p.page(p.num_pages)
pageUpper = int(p.num_pages) / 2
try:
pageLower = int(page) / 2
except TypeError:
pageLower = -999
else:
pageUpper = None
pageLower = None
context = {
'sections': sections,
'page_title': self.generate_page_title(),
'browse_type': self.browse_type,
'pageUpper': pageUpper,
'pageLower': pageLower
}
return render(
request,
self.template_path,
context
) |
def splitnport(host, defport=-1):
"""Split host and port, returning numeric port.
Return given default port if no ':' found; defaults to -1.
Return numerical port if a valid number are found after ':'.
Return None if ':' but not a valid number."""
global _nportprog
if _nportprog is None:
import re
_nportprog = re.compile('^(.*):(.*)$')
match = _nportprog.match(host)
if match:
host, port = match.group(1, 2)
try:
if not port: raise ValueError("no digits")
nport = int(port)
except ValueError:
nport = None
return host, nport
return host, defport | Split host and port, returning numeric port.
Return given default port if no ':' found; defaults to -1.
Return numerical port if a valid number are found after ':'.
Return None if ':' but not a valid number. | Below is the the instruction that describes the task:
### Input:
Split host and port, returning numeric port.
Return given default port if no ':' found; defaults to -1.
Return numerical port if a valid number are found after ':'.
Return None if ':' but not a valid number.
### Response:
def splitnport(host, defport=-1):
"""Split host and port, returning numeric port.
Return given default port if no ':' found; defaults to -1.
Return numerical port if a valid number are found after ':'.
Return None if ':' but not a valid number."""
global _nportprog
if _nportprog is None:
import re
_nportprog = re.compile('^(.*):(.*)$')
match = _nportprog.match(host)
if match:
host, port = match.group(1, 2)
try:
if not port: raise ValueError("no digits")
nport = int(port)
except ValueError:
nport = None
return host, nport
return host, defport |
def is_ccw(points):
"""
Check if connected planar points are counterclockwise.
Parameters
-----------
points: (n,2) float, connected points on a plane
Returns
----------
ccw: bool, True if points are counterclockwise
"""
points = np.asanyarray(points, dtype=np.float64)
if (len(points.shape) != 2 or
points.shape[1] != 2):
raise ValueError('CCW is only defined for 2D')
xd = np.diff(points[:, 0])
yd = np.column_stack((
points[:, 1],
points[:, 1])).reshape(-1)[1:-1].reshape((-1, 2)).sum(axis=1)
area = np.sum(xd * yd) * .5
ccw = area < 0
return ccw | Check if connected planar points are counterclockwise.
Parameters
-----------
points: (n,2) float, connected points on a plane
Returns
----------
ccw: bool, True if points are counterclockwise | Below is the the instruction that describes the task:
### Input:
Check if connected planar points are counterclockwise.
Parameters
-----------
points: (n,2) float, connected points on a plane
Returns
----------
ccw: bool, True if points are counterclockwise
### Response:
def is_ccw(points):
"""
Check if connected planar points are counterclockwise.
Parameters
-----------
points: (n,2) float, connected points on a plane
Returns
----------
ccw: bool, True if points are counterclockwise
"""
points = np.asanyarray(points, dtype=np.float64)
if (len(points.shape) != 2 or
points.shape[1] != 2):
raise ValueError('CCW is only defined for 2D')
xd = np.diff(points[:, 0])
yd = np.column_stack((
points[:, 1],
points[:, 1])).reshape(-1)[1:-1].reshape((-1, 2)).sum(axis=1)
area = np.sum(xd * yd) * .5
ccw = area < 0
return ccw |
def _validate_instance(self, instance, errors, path_prefix=''):
"""Validates that the given instance of a document conforms to the given schema's
structure and validations. Any validation errors are added to the given errors
collection. The caller should assume the instance is considered valid if the
errors collection is empty when this method returns."""
if not isinstance(instance, dict):
errors[path_prefix] = "Expected instance of dict to validate against schema."
return
# validate against the schema level validators
self._apply_validations(errors, path_prefix, self._validates, instance)
# Loop over each field in the schema and check the instance value conforms
# to its spec
for field, spec in self.doc_spec.iteritems():
path = self._append_path(path_prefix, field)
# If the field is present, validate it's value.
if field in instance:
self._validate_value(instance[field], spec, path, errors)
else:
# If not, add an error if it was a required key.
if spec.get('required', False):
errors[path] = "{} is required.".format(path)
# Now loop over each field in the given instance and make sure we don't
# have any fields not declared in the schema, unless strict mode has been
# explicitly disabled.
if self._strict:
for field in instance:
if field not in self.doc_spec:
errors[self._append_path(path_prefix, field)] = "Unexpected document field not present in schema" | Validates that the given instance of a document conforms to the given schema's
structure and validations. Any validation errors are added to the given errors
collection. The caller should assume the instance is considered valid if the
errors collection is empty when this method returns. | Below is the the instruction that describes the task:
### Input:
Validates that the given instance of a document conforms to the given schema's
structure and validations. Any validation errors are added to the given errors
collection. The caller should assume the instance is considered valid if the
errors collection is empty when this method returns.
### Response:
def _validate_instance(self, instance, errors, path_prefix=''):
"""Validates that the given instance of a document conforms to the given schema's
structure and validations. Any validation errors are added to the given errors
collection. The caller should assume the instance is considered valid if the
errors collection is empty when this method returns."""
if not isinstance(instance, dict):
errors[path_prefix] = "Expected instance of dict to validate against schema."
return
# validate against the schema level validators
self._apply_validations(errors, path_prefix, self._validates, instance)
# Loop over each field in the schema and check the instance value conforms
# to its spec
for field, spec in self.doc_spec.iteritems():
path = self._append_path(path_prefix, field)
# If the field is present, validate it's value.
if field in instance:
self._validate_value(instance[field], spec, path, errors)
else:
# If not, add an error if it was a required key.
if spec.get('required', False):
errors[path] = "{} is required.".format(path)
# Now loop over each field in the given instance and make sure we don't
# have any fields not declared in the schema, unless strict mode has been
# explicitly disabled.
if self._strict:
for field in instance:
if field not in self.doc_spec:
errors[self._append_path(path_prefix, field)] = "Unexpected document field not present in schema" |
def insert_rows(self, table, rows, target_fields=None):
"""
A generic way to insert a set of tuples into a table.
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
"""
super().insert_rows(table, rows, target_fields, 0) | A generic way to insert a set of tuples into a table.
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings | Below is the the instruction that describes the task:
### Input:
A generic way to insert a set of tuples into a table.
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
### Response:
def insert_rows(self, table, rows, target_fields=None):
"""
A generic way to insert a set of tuples into a table.
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
"""
super().insert_rows(table, rows, target_fields, 0) |
def deployment(
*,
block_uri: URI,
contract_instance: str,
contract_type: str,
address: HexStr,
transaction: HexStr = None,
block: HexStr = None,
deployment_bytecode: Dict[str, Any] = None,
runtime_bytecode: Dict[str, Any] = None,
compiler: Dict[str, Any] = None,
) -> Manifest:
"""
Returns a manifest, with the newly included deployment. Requires a valid blockchain URI,
however no validation is provided that this URI is unique amongst the other deployment
URIs, so the user must take care that each blockchain URI represents a unique blockchain.
"""
return _deployment(
contract_instance,
contract_type,
deployment_bytecode,
runtime_bytecode,
compiler,
block_uri,
address,
transaction,
block,
) | Returns a manifest, with the newly included deployment. Requires a valid blockchain URI,
however no validation is provided that this URI is unique amongst the other deployment
URIs, so the user must take care that each blockchain URI represents a unique blockchain. | Below is the the instruction that describes the task:
### Input:
Returns a manifest, with the newly included deployment. Requires a valid blockchain URI,
however no validation is provided that this URI is unique amongst the other deployment
URIs, so the user must take care that each blockchain URI represents a unique blockchain.
### Response:
def deployment(
*,
block_uri: URI,
contract_instance: str,
contract_type: str,
address: HexStr,
transaction: HexStr = None,
block: HexStr = None,
deployment_bytecode: Dict[str, Any] = None,
runtime_bytecode: Dict[str, Any] = None,
compiler: Dict[str, Any] = None,
) -> Manifest:
"""
Returns a manifest, with the newly included deployment. Requires a valid blockchain URI,
however no validation is provided that this URI is unique amongst the other deployment
URIs, so the user must take care that each blockchain URI represents a unique blockchain.
"""
return _deployment(
contract_instance,
contract_type,
deployment_bytecode,
runtime_bytecode,
compiler,
block_uri,
address,
transaction,
block,
) |
def hide(self, wid, verbose=False):
"""
Hide and HTML browser in the Results Panel.
:param wid: Window ID
:param verbose: print more
"""
PARAMS={"id":wid}
response=api(url=self.__url+"/hide?",PARAMS=PARAMS, method="GET", verbose=verbose)
return response | Hide and HTML browser in the Results Panel.
:param wid: Window ID
:param verbose: print more | Below is the the instruction that describes the task:
### Input:
Hide and HTML browser in the Results Panel.
:param wid: Window ID
:param verbose: print more
### Response:
def hide(self, wid, verbose=False):
"""
Hide and HTML browser in the Results Panel.
:param wid: Window ID
:param verbose: print more
"""
PARAMS={"id":wid}
response=api(url=self.__url+"/hide?",PARAMS=PARAMS, method="GET", verbose=verbose)
return response |
def _parse_top_level(self, body):
""" Ensure compliance with the spec's top-level section """
link = 'jsonapi.org/format/#document-top-level'
try:
if not isinstance(body['data'], dict):
raise TypeError
except (KeyError, TypeError):
self.fail('JSON API payloads MUST be a hash at the most '
'top-level; rooted at a key named `data` where the '
'value must be a hash. Currently, we only support '
'JSON API payloads that comply with the single '
'Resource Object section.', link)
if 'errors' in body:
self.fail('JSON API payloads MUST not have both `data` & '
'`errors` top-level keys.', link) | Ensure compliance with the spec's top-level section | Below is the the instruction that describes the task:
### Input:
Ensure compliance with the spec's top-level section
### Response:
def _parse_top_level(self, body):
""" Ensure compliance with the spec's top-level section """
link = 'jsonapi.org/format/#document-top-level'
try:
if not isinstance(body['data'], dict):
raise TypeError
except (KeyError, TypeError):
self.fail('JSON API payloads MUST be a hash at the most '
'top-level; rooted at a key named `data` where the '
'value must be a hash. Currently, we only support '
'JSON API payloads that comply with the single '
'Resource Object section.', link)
if 'errors' in body:
self.fail('JSON API payloads MUST not have both `data` & '
'`errors` top-level keys.', link) |
def ICM(input_dim, num_outputs, kernel, W_rank=1,W=None,kappa=None,name='ICM'):
"""
Builds a kernel for an Intrinsic Coregionalization Model
:input_dim: Input dimensionality (does not include dimension of indices)
:num_outputs: Number of outputs
:param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B).
:type kernel: a GPy kernel
:param W_rank: number tuples of the corregionalization parameters 'W'
:type W_rank: integer
"""
if kernel.input_dim != input_dim:
kernel.input_dim = input_dim
warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.")
K = kernel.prod(GPy.kern.Coregionalize(1, num_outputs, active_dims=[input_dim], rank=W_rank,W=W,kappa=kappa,name='B'),name=name)
return K | Builds a kernel for an Intrinsic Coregionalization Model
:input_dim: Input dimensionality (does not include dimension of indices)
:num_outputs: Number of outputs
:param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B).
:type kernel: a GPy kernel
:param W_rank: number tuples of the corregionalization parameters 'W'
:type W_rank: integer | Below is the the instruction that describes the task:
### Input:
Builds a kernel for an Intrinsic Coregionalization Model
:input_dim: Input dimensionality (does not include dimension of indices)
:num_outputs: Number of outputs
:param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B).
:type kernel: a GPy kernel
:param W_rank: number tuples of the corregionalization parameters 'W'
:type W_rank: integer
### Response:
def ICM(input_dim, num_outputs, kernel, W_rank=1,W=None,kappa=None,name='ICM'):
"""
Builds a kernel for an Intrinsic Coregionalization Model
:input_dim: Input dimensionality (does not include dimension of indices)
:num_outputs: Number of outputs
:param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B).
:type kernel: a GPy kernel
:param W_rank: number tuples of the corregionalization parameters 'W'
:type W_rank: integer
"""
if kernel.input_dim != input_dim:
kernel.input_dim = input_dim
warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.")
K = kernel.prod(GPy.kern.Coregionalize(1, num_outputs, active_dims=[input_dim], rank=W_rank,W=W,kappa=kappa,name='B'),name=name)
return K |
def compare(testsuite, gold, select='i-id i-input mrs'):
"""
Compare two [incr tsdb()] profiles.
Args:
testsuite (str, TestSuite): path to the test [incr tsdb()]
testsuite or a :class:`TestSuite` object
gold (str, TestSuite): path to the gold [incr tsdb()]
testsuite or a :class:`TestSuite` object
select: TSQL query to select (id, input, mrs) triples
(default: `i-id i-input mrs`)
Yields:
dict: Comparison results as::
{"id": "item identifier",
"input": "input sentence",
"test": number_of_unique_results_in_test,
"shared": number_of_shared_results,
"gold": number_of_unique_results_in_gold}
"""
from delphin.mrs import simplemrs, compare as mrs_compare
if not isinstance(testsuite, itsdb.TestSuite):
if isinstance(testsuite, itsdb.ItsdbProfile):
testsuite = testsuite.root
testsuite = itsdb.TestSuite(testsuite)
if not isinstance(gold, itsdb.TestSuite):
if isinstance(gold, itsdb.ItsdbProfile):
gold = gold.root
gold = itsdb.TestSuite(gold)
queryobj = tsql.inspect_query('select ' + select)
if len(queryobj['projection']) != 3:
raise ValueError('select does not return 3 fields: ' + select)
input_select = '{} {}'.format(queryobj['projection'][0],
queryobj['projection'][1])
i_inputs = dict(tsql.select(input_select, testsuite))
matched_rows = itsdb.match_rows(
tsql.select(select, testsuite),
tsql.select(select, gold),
0)
for (key, testrows, goldrows) in matched_rows:
(test_unique, shared, gold_unique) = mrs_compare.compare_bags(
[simplemrs.loads_one(row[2]) for row in testrows],
[simplemrs.loads_one(row[2]) for row in goldrows])
yield {'id': key,
'input': i_inputs[key],
'test': test_unique,
'shared': shared,
'gold': gold_unique} | Compare two [incr tsdb()] profiles.
Args:
testsuite (str, TestSuite): path to the test [incr tsdb()]
testsuite or a :class:`TestSuite` object
gold (str, TestSuite): path to the gold [incr tsdb()]
testsuite or a :class:`TestSuite` object
select: TSQL query to select (id, input, mrs) triples
(default: `i-id i-input mrs`)
Yields:
dict: Comparison results as::
{"id": "item identifier",
"input": "input sentence",
"test": number_of_unique_results_in_test,
"shared": number_of_shared_results,
"gold": number_of_unique_results_in_gold} | Below is the the instruction that describes the task:
### Input:
Compare two [incr tsdb()] profiles.
Args:
testsuite (str, TestSuite): path to the test [incr tsdb()]
testsuite or a :class:`TestSuite` object
gold (str, TestSuite): path to the gold [incr tsdb()]
testsuite or a :class:`TestSuite` object
select: TSQL query to select (id, input, mrs) triples
(default: `i-id i-input mrs`)
Yields:
dict: Comparison results as::
{"id": "item identifier",
"input": "input sentence",
"test": number_of_unique_results_in_test,
"shared": number_of_shared_results,
"gold": number_of_unique_results_in_gold}
### Response:
def compare(testsuite, gold, select='i-id i-input mrs'):
"""
Compare two [incr tsdb()] profiles.
Args:
testsuite (str, TestSuite): path to the test [incr tsdb()]
testsuite or a :class:`TestSuite` object
gold (str, TestSuite): path to the gold [incr tsdb()]
testsuite or a :class:`TestSuite` object
select: TSQL query to select (id, input, mrs) triples
(default: `i-id i-input mrs`)
Yields:
dict: Comparison results as::
{"id": "item identifier",
"input": "input sentence",
"test": number_of_unique_results_in_test,
"shared": number_of_shared_results,
"gold": number_of_unique_results_in_gold}
"""
from delphin.mrs import simplemrs, compare as mrs_compare
if not isinstance(testsuite, itsdb.TestSuite):
if isinstance(testsuite, itsdb.ItsdbProfile):
testsuite = testsuite.root
testsuite = itsdb.TestSuite(testsuite)
if not isinstance(gold, itsdb.TestSuite):
if isinstance(gold, itsdb.ItsdbProfile):
gold = gold.root
gold = itsdb.TestSuite(gold)
queryobj = tsql.inspect_query('select ' + select)
if len(queryobj['projection']) != 3:
raise ValueError('select does not return 3 fields: ' + select)
input_select = '{} {}'.format(queryobj['projection'][0],
queryobj['projection'][1])
i_inputs = dict(tsql.select(input_select, testsuite))
matched_rows = itsdb.match_rows(
tsql.select(select, testsuite),
tsql.select(select, gold),
0)
for (key, testrows, goldrows) in matched_rows:
(test_unique, shared, gold_unique) = mrs_compare.compare_bags(
[simplemrs.loads_one(row[2]) for row in testrows],
[simplemrs.loads_one(row[2]) for row in goldrows])
yield {'id': key,
'input': i_inputs[key],
'test': test_unique,
'shared': shared,
'gold': gold_unique} |
def connectionMade(self):
"""Send a HTTP POST command with the appropriate CIM over HTTP
headers and payload."""
self.factory.request_xml = str(self.factory.payload)
self.sendCommand('POST', '/cimom')
self.sendHeader('Host', '%s:%d' %
(self.transport.addr[0], self.transport.addr[1]))
self.sendHeader('User-Agent', 'pywbem/twisted')
self.sendHeader('Content-length', len(self.factory.payload))
self.sendHeader('Content-type', 'application/xml')
if self.factory.creds:
auth = base64.b64encode('%s:%s' % (self.factory.creds[0],
self.factory.creds[1]))
self.sendHeader('Authorization', 'Basic %s' % auth)
self.sendHeader('CIMOperation', str(self.factory.operation))
self.sendHeader('CIMMethod', str(self.factory.method))
self.sendHeader('CIMObject', str(self.factory.object))
self.endHeaders()
# TODO: Figure out why twisted doesn't support unicode. An
# exception should be thrown by the str() call if the payload
# can't be converted to the current codepage.
self.transport.write(str(self.factory.payload)) | Send a HTTP POST command with the appropriate CIM over HTTP
headers and payload. | Below is the the instruction that describes the task:
### Input:
Send a HTTP POST command with the appropriate CIM over HTTP
headers and payload.
### Response:
def connectionMade(self):
"""Send a HTTP POST command with the appropriate CIM over HTTP
headers and payload."""
self.factory.request_xml = str(self.factory.payload)
self.sendCommand('POST', '/cimom')
self.sendHeader('Host', '%s:%d' %
(self.transport.addr[0], self.transport.addr[1]))
self.sendHeader('User-Agent', 'pywbem/twisted')
self.sendHeader('Content-length', len(self.factory.payload))
self.sendHeader('Content-type', 'application/xml')
if self.factory.creds:
auth = base64.b64encode('%s:%s' % (self.factory.creds[0],
self.factory.creds[1]))
self.sendHeader('Authorization', 'Basic %s' % auth)
self.sendHeader('CIMOperation', str(self.factory.operation))
self.sendHeader('CIMMethod', str(self.factory.method))
self.sendHeader('CIMObject', str(self.factory.object))
self.endHeaders()
# TODO: Figure out why twisted doesn't support unicode. An
# exception should be thrown by the str() call if the payload
# can't be converted to the current codepage.
self.transport.write(str(self.factory.payload)) |
def gradient_log_joint(self, x):
"""
The gradient of the log joint probability.
For the Gaussian terms, this is
d/dx [-1/2 x^T J x + h^T x] = -Jx + h.
For the likelihood terms, we have for each time t
d/dx log p(yt | xt)
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# Collect the Gaussian LDS prior terms
_, h_init, _ = self.info_init_params
_, _, _, h1, h2, _ = self.info_dynamics_params
H_diag, H_upper_diag = self.sparse_J_prior
# Compute the gradient from the prior
g = -1 * symm_block_tridiag_matmul(H_diag, H_upper_diag, x)
g[0] += h_init
g[:-1] += h1
g[1:] += h2
# Compute gradient from the likelihood terms
g += self.grad_local_log_likelihood(x)
return g | The gradient of the log joint probability.
For the Gaussian terms, this is
d/dx [-1/2 x^T J x + h^T x] = -Jx + h.
For the likelihood terms, we have for each time t
d/dx log p(yt | xt) | Below is the the instruction that describes the task:
### Input:
The gradient of the log joint probability.
For the Gaussian terms, this is
d/dx [-1/2 x^T J x + h^T x] = -Jx + h.
For the likelihood terms, we have for each time t
d/dx log p(yt | xt)
### Response:
def gradient_log_joint(self, x):
"""
The gradient of the log joint probability.
For the Gaussian terms, this is
d/dx [-1/2 x^T J x + h^T x] = -Jx + h.
For the likelihood terms, we have for each time t
d/dx log p(yt | xt)
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# Collect the Gaussian LDS prior terms
_, h_init, _ = self.info_init_params
_, _, _, h1, h2, _ = self.info_dynamics_params
H_diag, H_upper_diag = self.sparse_J_prior
# Compute the gradient from the prior
g = -1 * symm_block_tridiag_matmul(H_diag, H_upper_diag, x)
g[0] += h_init
g[:-1] += h1
g[1:] += h2
# Compute gradient from the likelihood terms
g += self.grad_local_log_likelihood(x)
return g |
async def SetMeterStatus(self, statues):
'''
statues : typing.Sequence[~MeterStatusParam]
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='MetricsDebug',
request='SetMeterStatus',
version=2,
params=_params)
_params['statues'] = statues
reply = await self.rpc(msg)
return reply | statues : typing.Sequence[~MeterStatusParam]
Returns -> typing.Sequence[~ErrorResult] | Below is the the instruction that describes the task:
### Input:
statues : typing.Sequence[~MeterStatusParam]
Returns -> typing.Sequence[~ErrorResult]
### Response:
async def SetMeterStatus(self, statues):
'''
statues : typing.Sequence[~MeterStatusParam]
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='MetricsDebug',
request='SetMeterStatus',
version=2,
params=_params)
_params['statues'] = statues
reply = await self.rpc(msg)
return reply |
def get_items(self, paginator, current_page):
"""Get list items for current page"""
fields = self.get_model_config().get_list_fields()
page = paginator.page(current_page)
items = []
for item in page:
items.append({
'id': item.id,
'url': item.get_absolute_url(),
'row_data': [
fields[field]['renderer'](item, field)
for field in self.get_current_fields()
]
})
return items | Get list items for current page | Below is the the instruction that describes the task:
### Input:
Get list items for current page
### Response:
def get_items(self, paginator, current_page):
"""Get list items for current page"""
fields = self.get_model_config().get_list_fields()
page = paginator.page(current_page)
items = []
for item in page:
items.append({
'id': item.id,
'url': item.get_absolute_url(),
'row_data': [
fields[field]['renderer'](item, field)
for field in self.get_current_fields()
]
})
return items |
def _format_finite(negative, digits, dot_pos):
"""Given a (possibly empty) string of digits and an integer
dot_pos indicating the position of the decimal point relative to
the start of that string, output a formatted numeric string with
the same value and same implicit exponent."""
# strip leading zeros
olddigits = digits
digits = digits.lstrip('0')
dot_pos -= len(olddigits) - len(digits)
# value is 0.digits * 10**dot_pos
use_exponent = dot_pos <= -4 or dot_pos > len(digits)
if use_exponent:
exp = dot_pos - 1 if digits else dot_pos
dot_pos -= exp
# left pad with zeros, insert decimal point, and add exponent
if dot_pos <= 0:
digits = '0' * (1 - dot_pos) + digits
dot_pos += 1 - dot_pos
assert 1 <= dot_pos <= len(digits)
if dot_pos < len(digits):
digits = digits[:dot_pos] + '.' + digits[dot_pos:]
if use_exponent:
digits += "e{0:+03d}".format(exp)
return '-' + digits if negative else digits | Given a (possibly empty) string of digits and an integer
dot_pos indicating the position of the decimal point relative to
the start of that string, output a formatted numeric string with
the same value and same implicit exponent. | Below is the the instruction that describes the task:
### Input:
Given a (possibly empty) string of digits and an integer
dot_pos indicating the position of the decimal point relative to
the start of that string, output a formatted numeric string with
the same value and same implicit exponent.
### Response:
def _format_finite(negative, digits, dot_pos):
"""Given a (possibly empty) string of digits and an integer
dot_pos indicating the position of the decimal point relative to
the start of that string, output a formatted numeric string with
the same value and same implicit exponent."""
# strip leading zeros
olddigits = digits
digits = digits.lstrip('0')
dot_pos -= len(olddigits) - len(digits)
# value is 0.digits * 10**dot_pos
use_exponent = dot_pos <= -4 or dot_pos > len(digits)
if use_exponent:
exp = dot_pos - 1 if digits else dot_pos
dot_pos -= exp
# left pad with zeros, insert decimal point, and add exponent
if dot_pos <= 0:
digits = '0' * (1 - dot_pos) + digits
dot_pos += 1 - dot_pos
assert 1 <= dot_pos <= len(digits)
if dot_pos < len(digits):
digits = digits[:dot_pos] + '.' + digits[dot_pos:]
if use_exponent:
digits += "e{0:+03d}".format(exp)
return '-' + digits if negative else digits |
def _decode_next_layer(self, dict_, proto=None, length=None, *, version=4, ipv6_exthdr=None):
"""Decode next layer extractor.
Positional arguments:
* dict_ -- dict, info buffer
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Keyword Arguments:
* version -- int, IP version (4 in default)
<keyword> 4 / 6
* ext_proto -- ProtoChain, ProtoChain of IPv6 extension headers
Returns:
* dict -- current protocol with next layer extracted
"""
if self._onerror:
next_ = beholder(self._import_next_layer)(self, proto, length, version=version)
else:
next_ = self._import_next_layer(proto, length, version=version)
info, chain = next_.info, next_.protochain
# make next layer protocol name
layer = next_.alias.lower()
# proto = next_.__class__.__name__
# write info and protocol chain into dict
dict_[layer] = info
self._next = next_
if ipv6_exthdr is not None:
for proto in reversed(ipv6_exthdr):
chain = ProtoChain(proto.__class__, proto.alias, basis=chain)
self._protos = ProtoChain(self.__class__, self.alias, basis=chain)
return dict_ | Decode next layer extractor.
Positional arguments:
* dict_ -- dict, info buffer
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Keyword Arguments:
* version -- int, IP version (4 in default)
<keyword> 4 / 6
* ext_proto -- ProtoChain, ProtoChain of IPv6 extension headers
Returns:
* dict -- current protocol with next layer extracted | Below is the the instruction that describes the task:
### Input:
Decode next layer extractor.
Positional arguments:
* dict_ -- dict, info buffer
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Keyword Arguments:
* version -- int, IP version (4 in default)
<keyword> 4 / 6
* ext_proto -- ProtoChain, ProtoChain of IPv6 extension headers
Returns:
* dict -- current protocol with next layer extracted
### Response:
def _decode_next_layer(self, dict_, proto=None, length=None, *, version=4, ipv6_exthdr=None):
"""Decode next layer extractor.
Positional arguments:
* dict_ -- dict, info buffer
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Keyword Arguments:
* version -- int, IP version (4 in default)
<keyword> 4 / 6
* ext_proto -- ProtoChain, ProtoChain of IPv6 extension headers
Returns:
* dict -- current protocol with next layer extracted
"""
if self._onerror:
next_ = beholder(self._import_next_layer)(self, proto, length, version=version)
else:
next_ = self._import_next_layer(proto, length, version=version)
info, chain = next_.info, next_.protochain
# make next layer protocol name
layer = next_.alias.lower()
# proto = next_.__class__.__name__
# write info and protocol chain into dict
dict_[layer] = info
self._next = next_
if ipv6_exthdr is not None:
for proto in reversed(ipv6_exthdr):
chain = ProtoChain(proto.__class__, proto.alias, basis=chain)
self._protos = ProtoChain(self.__class__, self.alias, basis=chain)
return dict_ |
def generate_moffat_profile(seeing_fwhm, alpha):
"""Generate a normalized Moffat profile from its FWHM and alpha"""
scale = 2 * math.sqrt(2**(1.0 / alpha) - 1)
gamma = seeing_fwhm / scale
amplitude = 1.0 / math.pi * (alpha - 1) / gamma**2
seeing_model = Moffat2D(amplitude=amplitude,
x_mean=0.0,
y_mean=0.0,
gamma=gamma,
alpha=alpha)
return seeing_model | Generate a normalized Moffat profile from its FWHM and alpha | Below is the the instruction that describes the task:
### Input:
Generate a normalized Moffat profile from its FWHM and alpha
### Response:
def generate_moffat_profile(seeing_fwhm, alpha):
"""Generate a normalized Moffat profile from its FWHM and alpha"""
scale = 2 * math.sqrt(2**(1.0 / alpha) - 1)
gamma = seeing_fwhm / scale
amplitude = 1.0 / math.pi * (alpha - 1) / gamma**2
seeing_model = Moffat2D(amplitude=amplitude,
x_mean=0.0,
y_mean=0.0,
gamma=gamma,
alpha=alpha)
return seeing_model |
def delete(name=None, group_id=None, region=None, key=None, keyid=None,
profile=None, vpc_id=None, vpc_name=None):
'''
Delete a security group.
CLI example::
salt myminion boto_secgroup.delete mysecgroup
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
group = _get_group(conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
group_id=group_id, region=region, key=key, keyid=keyid,
profile=profile)
if group:
deleted = conn.delete_security_group(group_id=group.id)
if deleted:
log.info('Deleted security group %s with id %s.', group.name, group.id)
return True
else:
msg = 'Failed to delete security group {0}.'.format(name)
log.error(msg)
return False
else:
log.debug('Security group not found.')
return False | Delete a security group.
CLI example::
salt myminion boto_secgroup.delete mysecgroup | Below is the the instruction that describes the task:
### Input:
Delete a security group.
CLI example::
salt myminion boto_secgroup.delete mysecgroup
### Response:
def delete(name=None, group_id=None, region=None, key=None, keyid=None,
profile=None, vpc_id=None, vpc_name=None):
'''
Delete a security group.
CLI example::
salt myminion boto_secgroup.delete mysecgroup
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
group = _get_group(conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
group_id=group_id, region=region, key=key, keyid=keyid,
profile=profile)
if group:
deleted = conn.delete_security_group(group_id=group.id)
if deleted:
log.info('Deleted security group %s with id %s.', group.name, group.id)
return True
else:
msg = 'Failed to delete security group {0}.'.format(name)
log.error(msg)
return False
else:
log.debug('Security group not found.')
return False |
def apply_defaults(self, other_config):
"""
Applies default values from a different ConfigObject or ConfigKey object to this ConfigObject.
If there are any values in this object that are also in the default object, it will use the values from this object.
"""
if isinstance(other_config, self.__class__):
self.config.load_from_dict(other_config.config, overwrite=False)
else:
self.config.load_from_dict(other_config, overwrite=False) | Applies default values from a different ConfigObject or ConfigKey object to this ConfigObject.
If there are any values in this object that are also in the default object, it will use the values from this object. | Below is the the instruction that describes the task:
### Input:
Applies default values from a different ConfigObject or ConfigKey object to this ConfigObject.
If there are any values in this object that are also in the default object, it will use the values from this object.
### Response:
def apply_defaults(self, other_config):
"""
Applies default values from a different ConfigObject or ConfigKey object to this ConfigObject.
If there are any values in this object that are also in the default object, it will use the values from this object.
"""
if isinstance(other_config, self.__class__):
self.config.load_from_dict(other_config.config, overwrite=False)
else:
self.config.load_from_dict(other_config, overwrite=False) |
def call_s3guard_prune(credential_name):
"""
Runs S3Guard prune command on external account associated with the
given credential_name.
""" # Get the AWS credential account associated with the credential
account = get_external_account(api, credential_name)
# Invoke the prune command for the account by its name
cmd = account.external_account_cmd_by_name('S3GuardPrune')
print ("Issued '{0}' command with id '{1}'".format(cmd.name, cmd.id))
print ("Waiting for command {0} to finish...".format(cmd.id))
cmd = cmd.wait()
print ("Command succeeded: {0}".format(cmd.success)) | Runs S3Guard prune command on external account associated with the
given credential_name. | Below is the the instruction that describes the task:
### Input:
Runs S3Guard prune command on external account associated with the
given credential_name.
### Response:
def call_s3guard_prune(credential_name):
"""
Runs S3Guard prune command on external account associated with the
given credential_name.
""" # Get the AWS credential account associated with the credential
account = get_external_account(api, credential_name)
# Invoke the prune command for the account by its name
cmd = account.external_account_cmd_by_name('S3GuardPrune')
print ("Issued '{0}' command with id '{1}'".format(cmd.name, cmd.id))
print ("Waiting for command {0} to finish...".format(cmd.id))
cmd = cmd.wait()
print ("Command succeeded: {0}".format(cmd.success)) |
def declare_local_variable(self, raw_name, type=None, prepend=False):
'''
This function may create a new variable in this scope. If raw_name has been used to create other variables,
the new variable will hide all other variables created using raw_name.
'''
# Get unique ID for the new variable
onnx_name = self.get_unique_variable_name(raw_name)
# Create the variable
variable = Variable(raw_name, onnx_name, self.name, type)
self.variables[onnx_name] = variable
if raw_name in self.variable_name_mapping:
# Hide existing variables with the same raw_name
if not prepend:
self.variable_name_mapping[raw_name].append(onnx_name)
else:
self.variable_name_mapping[raw_name].insert(0, onnx_name)
else:
self.variable_name_mapping[raw_name] = [onnx_name]
return variable | This function may create a new variable in this scope. If raw_name has been used to create other variables,
the new variable will hide all other variables created using raw_name. | Below is the the instruction that describes the task:
### Input:
This function may create a new variable in this scope. If raw_name has been used to create other variables,
the new variable will hide all other variables created using raw_name.
### Response:
def declare_local_variable(self, raw_name, type=None, prepend=False):
'''
This function may create a new variable in this scope. If raw_name has been used to create other variables,
the new variable will hide all other variables created using raw_name.
'''
# Get unique ID for the new variable
onnx_name = self.get_unique_variable_name(raw_name)
# Create the variable
variable = Variable(raw_name, onnx_name, self.name, type)
self.variables[onnx_name] = variable
if raw_name in self.variable_name_mapping:
# Hide existing variables with the same raw_name
if not prepend:
self.variable_name_mapping[raw_name].append(onnx_name)
else:
self.variable_name_mapping[raw_name].insert(0, onnx_name)
else:
self.variable_name_mapping[raw_name] = [onnx_name]
return variable |
def get_field_identifiers(self):
"""
Builds a list of the field identifiers for all tables and joined tables by calling
``get_field_identifiers()`` on each table
:return: list of field identifiers
:rtype: list of str
"""
field_identifiers = []
for table in self.tables:
field_identifiers += table.get_field_identifiers()
for join_item in self.joins:
field_identifiers += join_item.right_table.get_field_identifiers()
return field_identifiers | Builds a list of the field identifiers for all tables and joined tables by calling
``get_field_identifiers()`` on each table
:return: list of field identifiers
:rtype: list of str | Below is the the instruction that describes the task:
### Input:
Builds a list of the field identifiers for all tables and joined tables by calling
``get_field_identifiers()`` on each table
:return: list of field identifiers
:rtype: list of str
### Response:
def get_field_identifiers(self):
"""
Builds a list of the field identifiers for all tables and joined tables by calling
``get_field_identifiers()`` on each table
:return: list of field identifiers
:rtype: list of str
"""
field_identifiers = []
for table in self.tables:
field_identifiers += table.get_field_identifiers()
for join_item in self.joins:
field_identifiers += join_item.right_table.get_field_identifiers()
return field_identifiers |
def graphdata(data):
"""returns ratings and episode number
to be used for making graphs"""
data = jh.get_ratings(data)
num = 1
rating_final = []
episode_final = []
for k,v in data.iteritems():
rating=[]
epinum=[]
for r in v:
if r != None:
rating.append(float(r))
epinum.append(num)
num+=1
rating_final.append(rating)
episode_final.append(epinum)
return rating_final,episode_final | returns ratings and episode number
to be used for making graphs | Below is the the instruction that describes the task:
### Input:
returns ratings and episode number
to be used for making graphs
### Response:
def graphdata(data):
"""returns ratings and episode number
to be used for making graphs"""
data = jh.get_ratings(data)
num = 1
rating_final = []
episode_final = []
for k,v in data.iteritems():
rating=[]
epinum=[]
for r in v:
if r != None:
rating.append(float(r))
epinum.append(num)
num+=1
rating_final.append(rating)
episode_final.append(epinum)
return rating_final,episode_final |
def remove_global_hook(handler):
"""remove a callback from the list of global hooks
:param handler:
the callback function, previously added with global_hook, to remove
from the list of global hooks
:type handler: function
:returns: bool, whether the handler was removed from the global hooks
"""
for i, cb in enumerate(state.global_hooks):
cb = cb()
if cb is not None and cb is handler:
state.global_hooks.pop(i)
log.info("removing a global hook callback")
return True
return False | remove a callback from the list of global hooks
:param handler:
the callback function, previously added with global_hook, to remove
from the list of global hooks
:type handler: function
:returns: bool, whether the handler was removed from the global hooks | Below is the the instruction that describes the task:
### Input:
remove a callback from the list of global hooks
:param handler:
the callback function, previously added with global_hook, to remove
from the list of global hooks
:type handler: function
:returns: bool, whether the handler was removed from the global hooks
### Response:
def remove_global_hook(handler):
"""remove a callback from the list of global hooks
:param handler:
the callback function, previously added with global_hook, to remove
from the list of global hooks
:type handler: function
:returns: bool, whether the handler was removed from the global hooks
"""
for i, cb in enumerate(state.global_hooks):
cb = cb()
if cb is not None and cb is handler:
state.global_hooks.pop(i)
log.info("removing a global hook callback")
return True
return False |
def wrap2cylinder(script, radius=1, pitch=0, taper=0, pitch_func=None,
taper_func=None):
"""Deform mesh around cylinder of radius and axis z
y = 0 will be on the surface of radius "radius"
pitch != 0 will create a helix, with distance "pitch" traveled in z for each rotation
taper = change in r over z. E.g. a value of 0.5 will shrink r by 0.5 for every z length of 1
"""
"""vert_function(s=s, x='(%s+y-taper)*sin(x/(%s+y))' % (radius, radius),
y='(%s+y)*cos(x/(%s+y))' % (radius, radius),
z='z-%s*x/(2*%s*(%s+y))' % (pitch, pi, radius))"""
if pitch_func is None:
pitch_func = '-(pitch)*x/(2*pi*(radius))'
pitch_func = pitch_func.replace(
'pitch', str(pitch)).replace(
'pi', str(math.pi)).replace(
'radius', str(radius))
if taper_func is None:
taper_func = '-(taper)*(pitch_func)'
taper_func = taper_func.replace(
'taper', str(taper)).replace(
'pitch_func', str(pitch_func)).replace(
'pi', str(math.pi))
x_func = '(y+(radius)+(taper_func))*sin(x/(radius))'.replace(
'radius', str(radius)).replace('taper_func', str(taper_func))
y_func = '(y+(radius)+(taper_func))*cos(x/(radius))'.replace(
'radius', str(radius)).replace('taper_func', str(taper_func))
z_func = 'z+(pitch_func)'.replace('pitch_func', str(pitch_func))
vert_function(script, x_func, y_func, z_func)
return None | Deform mesh around cylinder of radius and axis z
y = 0 will be on the surface of radius "radius"
pitch != 0 will create a helix, with distance "pitch" traveled in z for each rotation
taper = change in r over z. E.g. a value of 0.5 will shrink r by 0.5 for every z length of 1 | Below is the the instruction that describes the task:
### Input:
Deform mesh around cylinder of radius and axis z
y = 0 will be on the surface of radius "radius"
pitch != 0 will create a helix, with distance "pitch" traveled in z for each rotation
taper = change in r over z. E.g. a value of 0.5 will shrink r by 0.5 for every z length of 1
### Response:
def wrap2cylinder(script, radius=1, pitch=0, taper=0, pitch_func=None,
taper_func=None):
"""Deform mesh around cylinder of radius and axis z
y = 0 will be on the surface of radius "radius"
pitch != 0 will create a helix, with distance "pitch" traveled in z for each rotation
taper = change in r over z. E.g. a value of 0.5 will shrink r by 0.5 for every z length of 1
"""
"""vert_function(s=s, x='(%s+y-taper)*sin(x/(%s+y))' % (radius, radius),
y='(%s+y)*cos(x/(%s+y))' % (radius, radius),
z='z-%s*x/(2*%s*(%s+y))' % (pitch, pi, radius))"""
if pitch_func is None:
pitch_func = '-(pitch)*x/(2*pi*(radius))'
pitch_func = pitch_func.replace(
'pitch', str(pitch)).replace(
'pi', str(math.pi)).replace(
'radius', str(radius))
if taper_func is None:
taper_func = '-(taper)*(pitch_func)'
taper_func = taper_func.replace(
'taper', str(taper)).replace(
'pitch_func', str(pitch_func)).replace(
'pi', str(math.pi))
x_func = '(y+(radius)+(taper_func))*sin(x/(radius))'.replace(
'radius', str(radius)).replace('taper_func', str(taper_func))
y_func = '(y+(radius)+(taper_func))*cos(x/(radius))'.replace(
'radius', str(radius)).replace('taper_func', str(taper_func))
z_func = 'z+(pitch_func)'.replace('pitch_func', str(pitch_func))
vert_function(script, x_func, y_func, z_func)
return None |
def _format_with_same_year_and_month(format_specifier):
"""
Return a version of `format_specifier` that renders a date
assuming it has the same year and month as another date. Usually this
means ommitting the year and month.
This can be overridden by specifying a format that has
`_SAME_YEAR_SAME_MONTH` appended to the name in the project's `formats`
spec.
"""
test_format_specifier = format_specifier + "_SAME_YEAR_SAME_MONTH"
test_format = get_format(test_format_specifier, use_l10n=True)
if test_format == test_format_specifier:
# this format string didn't resolve to anything and may be a raw format.
# Use a regex to remove year and month markers instead.
no_year = re.sub(YEAR_RE, '', get_format(format_specifier))
return re.sub(MONTH_RE, '', no_year)
else:
return test_format | Return a version of `format_specifier` that renders a date
assuming it has the same year and month as another date. Usually this
means ommitting the year and month.
This can be overridden by specifying a format that has
`_SAME_YEAR_SAME_MONTH` appended to the name in the project's `formats`
spec. | Below is the the instruction that describes the task:
### Input:
Return a version of `format_specifier` that renders a date
assuming it has the same year and month as another date. Usually this
means ommitting the year and month.
This can be overridden by specifying a format that has
`_SAME_YEAR_SAME_MONTH` appended to the name in the project's `formats`
spec.
### Response:
def _format_with_same_year_and_month(format_specifier):
"""
Return a version of `format_specifier` that renders a date
assuming it has the same year and month as another date. Usually this
means ommitting the year and month.
This can be overridden by specifying a format that has
`_SAME_YEAR_SAME_MONTH` appended to the name in the project's `formats`
spec.
"""
test_format_specifier = format_specifier + "_SAME_YEAR_SAME_MONTH"
test_format = get_format(test_format_specifier, use_l10n=True)
if test_format == test_format_specifier:
# this format string didn't resolve to anything and may be a raw format.
# Use a regex to remove year and month markers instead.
no_year = re.sub(YEAR_RE, '', get_format(format_specifier))
return re.sub(MONTH_RE, '', no_year)
else:
return test_format |
def constructTx(self):
""" Construct the actual transaction and store it in the class's dict
store
"""
ops = list()
for op in self.ops:
if isinstance(op, ProposalBuilder):
# This operation is a proposal an needs to be deal with
# differently
proposal = op.get_raw()
if proposal:
ops.append(proposal)
elif isinstance(op, self.operation_class):
ops.extend([op])
else:
# otherwise, we simply wrap ops into Operations
ops.extend([self.operation_class(op)])
# We now wrap everything into an actual transaction
ops = self.add_required_fees(ops, asset_id=self.fee_asset_id)
expiration = formatTimeFromNow(
self.expiration
or self.blockchain.expiration
or 30 # defaults to 30 seconds
)
ref_block_num, ref_block_prefix = self.get_block_params()
self.tx = self.signed_transaction_class(
ref_block_num=ref_block_num,
ref_block_prefix=ref_block_prefix,
expiration=expiration,
operations=ops,
)
dict.update(self, self.tx.json())
self._unset_require_reconstruction() | Construct the actual transaction and store it in the class's dict
store | Below is the the instruction that describes the task:
### Input:
Construct the actual transaction and store it in the class's dict
store
### Response:
def constructTx(self):
""" Construct the actual transaction and store it in the class's dict
store
"""
ops = list()
for op in self.ops:
if isinstance(op, ProposalBuilder):
# This operation is a proposal an needs to be deal with
# differently
proposal = op.get_raw()
if proposal:
ops.append(proposal)
elif isinstance(op, self.operation_class):
ops.extend([op])
else:
# otherwise, we simply wrap ops into Operations
ops.extend([self.operation_class(op)])
# We now wrap everything into an actual transaction
ops = self.add_required_fees(ops, asset_id=self.fee_asset_id)
expiration = formatTimeFromNow(
self.expiration
or self.blockchain.expiration
or 30 # defaults to 30 seconds
)
ref_block_num, ref_block_prefix = self.get_block_params()
self.tx = self.signed_transaction_class(
ref_block_num=ref_block_num,
ref_block_prefix=ref_block_prefix,
expiration=expiration,
operations=ops,
)
dict.update(self, self.tx.json())
self._unset_require_reconstruction() |
def run(self):
"""Execute the build command."""
module = self.distribution.ext_modules[0]
base_dir = os.path.dirname(__file__)
if base_dir:
os.chdir(base_dir)
exclusions = []
for define in self.define or []:
module.define_macros.append(define)
for library in self.libraries or []:
module.libraries.append(library)
building_for_windows = self.plat_name in ('win32','win-amd64')
building_for_osx = 'macosx' in self.plat_name
building_for_linux = 'linux' in self.plat_name
building_for_freebsd = 'freebsd' in self.plat_name
building_for_openbsd = 'openbsd' in self.plat_name # need testing
if building_for_linux:
module.define_macros.append(('USE_LINUX_PROC', '1'))
elif building_for_windows:
module.define_macros.append(('USE_WINDOWS_PROC', '1'))
module.define_macros.append(('_CRT_SECURE_NO_WARNINGS', '1'))
module.libraries.append('kernel32')
module.libraries.append('advapi32')
module.libraries.append('user32')
module.libraries.append('crypt32')
module.libraries.append('ws2_32')
elif building_for_osx:
module.define_macros.append(('USE_MACH_PROC', '1'))
module.include_dirs.append('/usr/local/opt/openssl/include')
module.include_dirs.append('/opt/local/include')
module.library_dirs.append('/opt/local/lib')
module.include_dirs.append('/usr/local/include')
module.library_dirs.append('/usr/local/lib')
elif building_for_freebsd:
module.define_macros.append(('USE_FREEBSD_PROC', '1'))
module.include_dirs.append('/opt/local/include')
module.library_dirs.append('/opt/local/lib')
module.include_dirs.append('/usr/local/include')
module.library_dirs.append('/usr/local/lib')
elif building_for_openbsd:
module.define_macros.append(('USE_OPENBSD_PROC', '1'))
module.include_dirs.append('/opt/local/include')
module.library_dirs.append('/opt/local/lib')
module.include_dirs.append('/usr/local/include')
module.library_dirs.append('/usr/local/lib')
else:
module.define_macros.append(('USE_NO_PROC', '1'))
if has_function('memmem'):
module.define_macros.append(('HAVE_MEMMEM', '1'))
if has_function('strlcpy'):
module.define_macros.append(('HAVE_STRLCPY', '1'))
if has_function('strlcat'):
module.define_macros.append(('HAVE_STRLCAT', '1'))
if self.enable_profiling:
module.define_macros.append(('PROFILING_ENABLED', '1'))
if self.dynamic_linking:
module.libraries.append('yara')
else:
if not self.define or not ('HASH_MODULE', '1') in self.define:
if (has_function('MD5_Init', libraries=['crypto']) and
has_function('SHA256_Init', libraries=['crypto'])):
module.define_macros.append(('HASH_MODULE', '1'))
module.define_macros.append(('HAVE_LIBCRYPTO', '1'))
module.libraries.append('crypto')
else:
exclusions.append('yara/libyara/modules/hash.c')
if self.enable_magic:
module.define_macros.append(('MAGIC_MODULE', '1'))
module.libraries.append('magic')
else:
exclusions.append('yara/libyara/modules/magic.c')
if self.enable_cuckoo:
module.define_macros.append(('CUCKOO_MODULE', '1'))
module.libraries.append('jansson')
else:
exclusions.append('yara/libyara/modules/cuckoo.c')
if self.enable_dotnet:
module.define_macros.append(('DOTNET_MODULE', '1'))
else:
exclusions.append('yara/libyara/modules/dotnet.c')
if self.enable_dex:
module.define_macros.append(('DEX_MODULE', '1'))
else:
exclusions.append('yara/libyara/modules/dex.c')
if self.enable_macho:
module.define_macros.append(('MACHO_MODULE', '1'))
else:
exclusions.append('yara/libyara/modules/macho.c')
exclusions = [os.path.normpath(x) for x in exclusions]
for directory, _, files in os.walk('yara/libyara/'):
for x in files:
x = os.path.normpath(os.path.join(directory, x))
if x.endswith('.c') and x not in exclusions:
module.sources.append(x)
build_ext.run(self) | Execute the build command. | Below is the the instruction that describes the task:
### Input:
Execute the build command.
### Response:
def run(self):
"""Execute the build command."""
module = self.distribution.ext_modules[0]
base_dir = os.path.dirname(__file__)
if base_dir:
os.chdir(base_dir)
exclusions = []
for define in self.define or []:
module.define_macros.append(define)
for library in self.libraries or []:
module.libraries.append(library)
building_for_windows = self.plat_name in ('win32','win-amd64')
building_for_osx = 'macosx' in self.plat_name
building_for_linux = 'linux' in self.plat_name
building_for_freebsd = 'freebsd' in self.plat_name
building_for_openbsd = 'openbsd' in self.plat_name # need testing
if building_for_linux:
module.define_macros.append(('USE_LINUX_PROC', '1'))
elif building_for_windows:
module.define_macros.append(('USE_WINDOWS_PROC', '1'))
module.define_macros.append(('_CRT_SECURE_NO_WARNINGS', '1'))
module.libraries.append('kernel32')
module.libraries.append('advapi32')
module.libraries.append('user32')
module.libraries.append('crypt32')
module.libraries.append('ws2_32')
elif building_for_osx:
module.define_macros.append(('USE_MACH_PROC', '1'))
module.include_dirs.append('/usr/local/opt/openssl/include')
module.include_dirs.append('/opt/local/include')
module.library_dirs.append('/opt/local/lib')
module.include_dirs.append('/usr/local/include')
module.library_dirs.append('/usr/local/lib')
elif building_for_freebsd:
module.define_macros.append(('USE_FREEBSD_PROC', '1'))
module.include_dirs.append('/opt/local/include')
module.library_dirs.append('/opt/local/lib')
module.include_dirs.append('/usr/local/include')
module.library_dirs.append('/usr/local/lib')
elif building_for_openbsd:
module.define_macros.append(('USE_OPENBSD_PROC', '1'))
module.include_dirs.append('/opt/local/include')
module.library_dirs.append('/opt/local/lib')
module.include_dirs.append('/usr/local/include')
module.library_dirs.append('/usr/local/lib')
else:
module.define_macros.append(('USE_NO_PROC', '1'))
if has_function('memmem'):
module.define_macros.append(('HAVE_MEMMEM', '1'))
if has_function('strlcpy'):
module.define_macros.append(('HAVE_STRLCPY', '1'))
if has_function('strlcat'):
module.define_macros.append(('HAVE_STRLCAT', '1'))
if self.enable_profiling:
module.define_macros.append(('PROFILING_ENABLED', '1'))
if self.dynamic_linking:
module.libraries.append('yara')
else:
if not self.define or not ('HASH_MODULE', '1') in self.define:
if (has_function('MD5_Init', libraries=['crypto']) and
has_function('SHA256_Init', libraries=['crypto'])):
module.define_macros.append(('HASH_MODULE', '1'))
module.define_macros.append(('HAVE_LIBCRYPTO', '1'))
module.libraries.append('crypto')
else:
exclusions.append('yara/libyara/modules/hash.c')
if self.enable_magic:
module.define_macros.append(('MAGIC_MODULE', '1'))
module.libraries.append('magic')
else:
exclusions.append('yara/libyara/modules/magic.c')
if self.enable_cuckoo:
module.define_macros.append(('CUCKOO_MODULE', '1'))
module.libraries.append('jansson')
else:
exclusions.append('yara/libyara/modules/cuckoo.c')
if self.enable_dotnet:
module.define_macros.append(('DOTNET_MODULE', '1'))
else:
exclusions.append('yara/libyara/modules/dotnet.c')
if self.enable_dex:
module.define_macros.append(('DEX_MODULE', '1'))
else:
exclusions.append('yara/libyara/modules/dex.c')
if self.enable_macho:
module.define_macros.append(('MACHO_MODULE', '1'))
else:
exclusions.append('yara/libyara/modules/macho.c')
exclusions = [os.path.normpath(x) for x in exclusions]
for directory, _, files in os.walk('yara/libyara/'):
for x in files:
x = os.path.normpath(os.path.join(directory, x))
if x.endswith('.c') and x not in exclusions:
module.sources.append(x)
build_ext.run(self) |
def get_issuer(self):
"""
Gets the Issuer of the Logout Response Message
:return: The Issuer
:rtype: string
"""
issuer = None
issuer_nodes = self.__query('/samlp:LogoutResponse/saml:Issuer')
if len(issuer_nodes) == 1:
issuer = OneLogin_Saml2_Utils.element_text(issuer_nodes[0])
return issuer | Gets the Issuer of the Logout Response Message
:return: The Issuer
:rtype: string | Below is the the instruction that describes the task:
### Input:
Gets the Issuer of the Logout Response Message
:return: The Issuer
:rtype: string
### Response:
def get_issuer(self):
"""
Gets the Issuer of the Logout Response Message
:return: The Issuer
:rtype: string
"""
issuer = None
issuer_nodes = self.__query('/samlp:LogoutResponse/saml:Issuer')
if len(issuer_nodes) == 1:
issuer = OneLogin_Saml2_Utils.element_text(issuer_nodes[0])
return issuer |
def authorized_signup_handler(resp, remote, *args, **kwargs):
"""Handle sign-in/up functionality.
:param remote: The remote application.
:param resp: The response.
:returns: Redirect response.
"""
# Remove any previously stored auto register session key
session.pop(token_session_key(remote.name) + '_autoregister', None)
# Store token in session
# ----------------------
# Set token in session - token object only returned if
# current_user.is_autenticated().
token = response_token_setter(remote, resp)
handlers = current_oauthclient.signup_handlers[remote.name]
# Sign-in/up user
# ---------------
if not current_user.is_authenticated:
account_info = handlers['info'](resp)
account_info_received.send(
remote, token=token, response=resp, account_info=account_info
)
user = oauth_get_user(
remote.consumer_key,
account_info=account_info,
access_token=token_getter(remote)[0],
)
if user is None:
# Auto sign-up if user not found
form = create_csrf_disabled_registrationform()
form = fill_form(
form,
account_info['user']
)
user = oauth_register(form)
# if registration fails ...
if user is None:
# requires extra information
session[
token_session_key(remote.name) + '_autoregister'] = True
session[token_session_key(remote.name) +
'_account_info'] = account_info
session[token_session_key(remote.name) +
'_response'] = resp
db.session.commit()
return redirect(url_for(
'.signup',
remote_app=remote.name,
))
# Authenticate user
if not oauth_authenticate(remote.consumer_key, user,
require_existing_link=False):
return current_app.login_manager.unauthorized()
# Link account
# ------------
# Need to store token in database instead of only the session when
# called first time.
token = response_token_setter(remote, resp)
# Setup account
# -------------
if not token.remote_account.extra_data:
account_setup = handlers['setup'](token, resp)
account_setup_received.send(
remote, token=token, response=resp, account_setup=account_setup
)
db.session.commit()
account_setup_committed.send(remote, token=token)
else:
db.session.commit()
# Redirect to next
next_url = get_session_next_url(remote.name)
if next_url:
return redirect(next_url)
return redirect(url_for('invenio_oauthclient_settings.index')) | Handle sign-in/up functionality.
:param remote: The remote application.
:param resp: The response.
:returns: Redirect response. | Below is the the instruction that describes the task:
### Input:
Handle sign-in/up functionality.
:param remote: The remote application.
:param resp: The response.
:returns: Redirect response.
### Response:
def authorized_signup_handler(resp, remote, *args, **kwargs):
"""Handle sign-in/up functionality.
:param remote: The remote application.
:param resp: The response.
:returns: Redirect response.
"""
# Remove any previously stored auto register session key
session.pop(token_session_key(remote.name) + '_autoregister', None)
# Store token in session
# ----------------------
# Set token in session - token object only returned if
# current_user.is_autenticated().
token = response_token_setter(remote, resp)
handlers = current_oauthclient.signup_handlers[remote.name]
# Sign-in/up user
# ---------------
if not current_user.is_authenticated:
account_info = handlers['info'](resp)
account_info_received.send(
remote, token=token, response=resp, account_info=account_info
)
user = oauth_get_user(
remote.consumer_key,
account_info=account_info,
access_token=token_getter(remote)[0],
)
if user is None:
# Auto sign-up if user not found
form = create_csrf_disabled_registrationform()
form = fill_form(
form,
account_info['user']
)
user = oauth_register(form)
# if registration fails ...
if user is None:
# requires extra information
session[
token_session_key(remote.name) + '_autoregister'] = True
session[token_session_key(remote.name) +
'_account_info'] = account_info
session[token_session_key(remote.name) +
'_response'] = resp
db.session.commit()
return redirect(url_for(
'.signup',
remote_app=remote.name,
))
# Authenticate user
if not oauth_authenticate(remote.consumer_key, user,
require_existing_link=False):
return current_app.login_manager.unauthorized()
# Link account
# ------------
# Need to store token in database instead of only the session when
# called first time.
token = response_token_setter(remote, resp)
# Setup account
# -------------
if not token.remote_account.extra_data:
account_setup = handlers['setup'](token, resp)
account_setup_received.send(
remote, token=token, response=resp, account_setup=account_setup
)
db.session.commit()
account_setup_committed.send(remote, token=token)
else:
db.session.commit()
# Redirect to next
next_url = get_session_next_url(remote.name)
if next_url:
return redirect(next_url)
return redirect(url_for('invenio_oauthclient_settings.index')) |
def get_pourbaix_plot(self, limits=None, title="",
label_domains=True, plt=None):
"""
Plot Pourbaix diagram.
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
title (str): Title to display on plot
label_domains (bool): whether to label pourbaix domains
plt (pyplot): Pyplot instance for plotting
Returns:
plt (pyplot) - matplotlib plot object with pourbaix diagram
"""
if limits is None:
limits = [[-2, 16], [-3, 3]]
plt = plt or pretty_plot(16)
xlim = limits[0]
ylim = limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
for entry, vertices in self._pd._stable_domain_vertices.items():
center = np.average(vertices, axis=0)
x, y = np.transpose(np.vstack([vertices, vertices[0]]))
plt.plot(x, y, 'k-', linewidth=lw)
if label_domains:
plt.annotate(generate_entry_label(entry), center, ha='center',
va='center', fontsize=20, color="b")
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight='bold')
return plt | Plot Pourbaix diagram.
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
title (str): Title to display on plot
label_domains (bool): whether to label pourbaix domains
plt (pyplot): Pyplot instance for plotting
Returns:
plt (pyplot) - matplotlib plot object with pourbaix diagram | Below is the the instruction that describes the task:
### Input:
Plot Pourbaix diagram.
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
title (str): Title to display on plot
label_domains (bool): whether to label pourbaix domains
plt (pyplot): Pyplot instance for plotting
Returns:
plt (pyplot) - matplotlib plot object with pourbaix diagram
### Response:
def get_pourbaix_plot(self, limits=None, title="",
label_domains=True, plt=None):
"""
Plot Pourbaix diagram.
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
title (str): Title to display on plot
label_domains (bool): whether to label pourbaix domains
plt (pyplot): Pyplot instance for plotting
Returns:
plt (pyplot) - matplotlib plot object with pourbaix diagram
"""
if limits is None:
limits = [[-2, 16], [-3, 3]]
plt = plt or pretty_plot(16)
xlim = limits[0]
ylim = limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
for entry, vertices in self._pd._stable_domain_vertices.items():
center = np.average(vertices, axis=0)
x, y = np.transpose(np.vstack([vertices, vertices[0]]))
plt.plot(x, y, 'k-', linewidth=lw)
if label_domains:
plt.annotate(generate_entry_label(entry), center, ha='center',
va='center', fontsize=20, color="b")
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight='bold')
return plt |
def send(self, src_file, filename, st_mode=DEFAULT_PUSH_MODE, mtime=None,
timeout=None):
"""Push a file-like object to the device.
Args:
src_file: File-like object for reading from
filename: Filename to push to on the device
st_mode: stat mode for filename on the device
mtime: modification time to set for the file on the device
timeout: Timeout to use for the send operation.
Raises:
AdbProtocolError: If we get an unexpected response.
AdbRemoteError: If there's a remote error (but valid protocol).
"""
transport = DataFilesyncTransport(self.stream)
transport.write_data('SEND', '%s,%s' % (filename, st_mode), timeout)
try:
while True:
data = src_file.read(MAX_PUSH_DATA_BYTES)
if not data:
break
transport.write_data('DATA', data, timeout)
mtime = mtime or int(time.time())
transport.write_message(
FilesyncMessageTypes.DoneMessage('DONE', mtime), timeout)
except usb_exceptions.AdbStreamClosedError:
# Try to do one last read to see if we can get any more information,
# ignoring any errors for this Read attempt. Note that this always
# raises, either a new AdbRemoteError, or the AdbStreamClosedError.
self._check_for_fail_message(transport, sys.exc_info(), timeout)
data_msg = transport.read_message(timeout)
data_msg.assert_command_is('OKAY') | Push a file-like object to the device.
Args:
src_file: File-like object for reading from
filename: Filename to push to on the device
st_mode: stat mode for filename on the device
mtime: modification time to set for the file on the device
timeout: Timeout to use for the send operation.
Raises:
AdbProtocolError: If we get an unexpected response.
AdbRemoteError: If there's a remote error (but valid protocol). | Below is the the instruction that describes the task:
### Input:
Push a file-like object to the device.
Args:
src_file: File-like object for reading from
filename: Filename to push to on the device
st_mode: stat mode for filename on the device
mtime: modification time to set for the file on the device
timeout: Timeout to use for the send operation.
Raises:
AdbProtocolError: If we get an unexpected response.
AdbRemoteError: If there's a remote error (but valid protocol).
### Response:
def send(self, src_file, filename, st_mode=DEFAULT_PUSH_MODE, mtime=None,
timeout=None):
"""Push a file-like object to the device.
Args:
src_file: File-like object for reading from
filename: Filename to push to on the device
st_mode: stat mode for filename on the device
mtime: modification time to set for the file on the device
timeout: Timeout to use for the send operation.
Raises:
AdbProtocolError: If we get an unexpected response.
AdbRemoteError: If there's a remote error (but valid protocol).
"""
transport = DataFilesyncTransport(self.stream)
transport.write_data('SEND', '%s,%s' % (filename, st_mode), timeout)
try:
while True:
data = src_file.read(MAX_PUSH_DATA_BYTES)
if not data:
break
transport.write_data('DATA', data, timeout)
mtime = mtime or int(time.time())
transport.write_message(
FilesyncMessageTypes.DoneMessage('DONE', mtime), timeout)
except usb_exceptions.AdbStreamClosedError:
# Try to do one last read to see if we can get any more information,
# ignoring any errors for this Read attempt. Note that this always
# raises, either a new AdbRemoteError, or the AdbStreamClosedError.
self._check_for_fail_message(transport, sys.exc_info(), timeout)
data_msg = transport.read_message(timeout)
data_msg.assert_command_is('OKAY') |
def fundarb(
self,
jsl_username,
jsl_password,
avolume=100,
bvolume=100,
ptype="price",
):
"""以字典形式返回分级A数据
:param jsl_username: 集思录用户名
:param jsl_password: 集思路登录密码
:param avolume: A成交额,单位百万
:param bvolume: B成交额,单位百万
:param ptype: 溢价计算方式,price=现价,buy=买一,sell=卖一
"""
session = requests.session()
headers = {
# pylint: disable=line-too-long
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
}
session.headers.update(headers)
logindata = dict(
return_url="http://www.jisilu.cn/",
user_name=jsl_username,
password=jsl_password,
net_auto_login="1",
_post_type="ajax",
)
rep = session.post(self.__jsl_login_url, data=logindata)
if rep.json()["err"] is not None:
return rep.json()
# 添加当前的ctime
fundarb_url = self.__fundarb_url.format(ctime=int(time.time()))
pdata = dict(
avolume=avolume,
bvolume=bvolume,
ptype=ptype,
is_search="1",
market=["sh", "sz"],
rp="50",
)
# 请求数据
rep = session.post(fundarb_url, data=pdata)
# 获取返回的json字符串
fundajson = json.loads(rep.text)
# 格式化返回的json字符串
data = self.formatfundajson(fundajson)
self.__fundarb = data
return self.__fundarb | 以字典形式返回分级A数据
:param jsl_username: 集思录用户名
:param jsl_password: 集思路登录密码
:param avolume: A成交额,单位百万
:param bvolume: B成交额,单位百万
:param ptype: 溢价计算方式,price=现价,buy=买一,sell=卖一 | Below is the the instruction that describes the task:
### Input:
以字典形式返回分级A数据
:param jsl_username: 集思录用户名
:param jsl_password: 集思路登录密码
:param avolume: A成交额,单位百万
:param bvolume: B成交额,单位百万
:param ptype: 溢价计算方式,price=现价,buy=买一,sell=卖一
### Response:
def fundarb(
self,
jsl_username,
jsl_password,
avolume=100,
bvolume=100,
ptype="price",
):
"""以字典形式返回分级A数据
:param jsl_username: 集思录用户名
:param jsl_password: 集思路登录密码
:param avolume: A成交额,单位百万
:param bvolume: B成交额,单位百万
:param ptype: 溢价计算方式,price=现价,buy=买一,sell=卖一
"""
session = requests.session()
headers = {
# pylint: disable=line-too-long
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
}
session.headers.update(headers)
logindata = dict(
return_url="http://www.jisilu.cn/",
user_name=jsl_username,
password=jsl_password,
net_auto_login="1",
_post_type="ajax",
)
rep = session.post(self.__jsl_login_url, data=logindata)
if rep.json()["err"] is not None:
return rep.json()
# 添加当前的ctime
fundarb_url = self.__fundarb_url.format(ctime=int(time.time()))
pdata = dict(
avolume=avolume,
bvolume=bvolume,
ptype=ptype,
is_search="1",
market=["sh", "sz"],
rp="50",
)
# 请求数据
rep = session.post(fundarb_url, data=pdata)
# 获取返回的json字符串
fundajson = json.loads(rep.text)
# 格式化返回的json字符串
data = self.formatfundajson(fundajson)
self.__fundarb = data
return self.__fundarb |
def line(self, x, label=None, y='bottom', color='grey', **kwargs):
'''
Creates a vertical line in the plot.
:param x:
The x coordinate of the line. Should be in the same units
as the x-axis.
:param string label:
The label to be displayed.
:param y:
May be 'top', 'bottom' or int.
The y coordinate of the text-label.
:param color color:
The color of the line.
'''
super(DiffPlotter, self).line(x, label, y, color, self.ax1, **kwargs)
super(DiffPlotter, self).line(x, '', 0, color, self.ax2, **kwargs) | Creates a vertical line in the plot.
:param x:
The x coordinate of the line. Should be in the same units
as the x-axis.
:param string label:
The label to be displayed.
:param y:
May be 'top', 'bottom' or int.
The y coordinate of the text-label.
:param color color:
The color of the line. | Below is the the instruction that describes the task:
### Input:
Creates a vertical line in the plot.
:param x:
The x coordinate of the line. Should be in the same units
as the x-axis.
:param string label:
The label to be displayed.
:param y:
May be 'top', 'bottom' or int.
The y coordinate of the text-label.
:param color color:
The color of the line.
### Response:
def line(self, x, label=None, y='bottom', color='grey', **kwargs):
'''
Creates a vertical line in the plot.
:param x:
The x coordinate of the line. Should be in the same units
as the x-axis.
:param string label:
The label to be displayed.
:param y:
May be 'top', 'bottom' or int.
The y coordinate of the text-label.
:param color color:
The color of the line.
'''
super(DiffPlotter, self).line(x, label, y, color, self.ax1, **kwargs)
super(DiffPlotter, self).line(x, '', 0, color, self.ax2, **kwargs) |
def deleteLink(self, linkdict):
"""Delete link if PDF"""
CheckParent(self)
val = _fitz.Page_deleteLink(self, linkdict)
if linkdict["xref"] == 0: return
linkid = linkdict["id"]
try:
linkobj = self._annot_refs[linkid]
linkobj._erase()
except:
pass
return val | Delete link if PDF | Below is the the instruction that describes the task:
### Input:
Delete link if PDF
### Response:
def deleteLink(self, linkdict):
"""Delete link if PDF"""
CheckParent(self)
val = _fitz.Page_deleteLink(self, linkdict)
if linkdict["xref"] == 0: return
linkid = linkdict["id"]
try:
linkobj = self._annot_refs[linkid]
linkobj._erase()
except:
pass
return val |
def log(self, message, severity=INFO, tag=u""):
"""
Add a given message to the log, and return its time.
:param string message: the message to be added
:param severity: the severity of the message
:type severity: :class:`~aeneas.logger.Logger`
:param string tag: the tag associated with the message;
usually, the name of the class generating the entry
:rtype: datetime
"""
entry = _LogEntry(
severity=severity,
time=datetime.datetime.now(),
tag=tag,
indentation=self.indentation,
message=self._sanitize(message)
)
self.entries.append(entry)
if self.tee:
gf.safe_print(entry.pretty_print(show_datetime=self.tee_show_datetime))
return entry.time | Add a given message to the log, and return its time.
:param string message: the message to be added
:param severity: the severity of the message
:type severity: :class:`~aeneas.logger.Logger`
:param string tag: the tag associated with the message;
usually, the name of the class generating the entry
:rtype: datetime | Below is the the instruction that describes the task:
### Input:
Add a given message to the log, and return its time.
:param string message: the message to be added
:param severity: the severity of the message
:type severity: :class:`~aeneas.logger.Logger`
:param string tag: the tag associated with the message;
usually, the name of the class generating the entry
:rtype: datetime
### Response:
def log(self, message, severity=INFO, tag=u""):
"""
Add a given message to the log, and return its time.
:param string message: the message to be added
:param severity: the severity of the message
:type severity: :class:`~aeneas.logger.Logger`
:param string tag: the tag associated with the message;
usually, the name of the class generating the entry
:rtype: datetime
"""
entry = _LogEntry(
severity=severity,
time=datetime.datetime.now(),
tag=tag,
indentation=self.indentation,
message=self._sanitize(message)
)
self.entries.append(entry)
if self.tee:
gf.safe_print(entry.pretty_print(show_datetime=self.tee_show_datetime))
return entry.time |
def maintained_selection():
"""Maintain selection during context
Example:
>>> with maintained_selection():
... # Modify selection
... cmds.select('node', replace=True)
>>> # Selection restored
"""
previous_selection = cmds.ls(selection=True)
try:
yield
finally:
if previous_selection:
cmds.select(previous_selection,
replace=True,
noExpand=True)
else:
cmds.select(deselect=True,
noExpand=True) | Maintain selection during context
Example:
>>> with maintained_selection():
... # Modify selection
... cmds.select('node', replace=True)
>>> # Selection restored | Below is the the instruction that describes the task:
### Input:
Maintain selection during context
Example:
>>> with maintained_selection():
... # Modify selection
... cmds.select('node', replace=True)
>>> # Selection restored
### Response:
def maintained_selection():
"""Maintain selection during context
Example:
>>> with maintained_selection():
... # Modify selection
... cmds.select('node', replace=True)
>>> # Selection restored
"""
previous_selection = cmds.ls(selection=True)
try:
yield
finally:
if previous_selection:
cmds.select(previous_selection,
replace=True,
noExpand=True)
else:
cmds.select(deselect=True,
noExpand=True) |
def update_git_repos(clean=False):
'''
Checkout git repos containing :ref:`Windows Software Package Definitions
<windows-package-manager>`.
.. important::
This function requires `Git for Windows`_ to be installed in order to
work. When installing, make sure to select an installation option which
permits the git executable to be run from the Command Prompt.
.. _`Git for Windows`: https://git-for-windows.github.io/
clean : False
Clean repo cachedirs which are not configured under
:conf_minion:`winrepo_remotes`.
.. note::
This option only applies if either pygit2_ or GitPython_ is
installed into Salt's bundled Python.
.. warning::
This argument should not be set to ``True`` if a mix of git and
non-git repo definitions are being used, as it will result in the
non-git repo definitions being removed.
.. versionadded:: 2015.8.0
.. _GitPython: https://github.com/gitpython-developers/GitPython
.. _pygit2: https://github.com/libgit2/pygit2
CLI Example:
.. code-block:: bash
salt-call winrepo.update_git_repos
'''
if not salt.utils.path.which('git'):
raise CommandExecutionError(
'Git for Windows is not installed, or not configured to be '
'accessible from the Command Prompt'
)
return _update_git_repos(opts=__opts__, clean=clean, masterless=True) | Checkout git repos containing :ref:`Windows Software Package Definitions
<windows-package-manager>`.
.. important::
This function requires `Git for Windows`_ to be installed in order to
work. When installing, make sure to select an installation option which
permits the git executable to be run from the Command Prompt.
.. _`Git for Windows`: https://git-for-windows.github.io/
clean : False
Clean repo cachedirs which are not configured under
:conf_minion:`winrepo_remotes`.
.. note::
This option only applies if either pygit2_ or GitPython_ is
installed into Salt's bundled Python.
.. warning::
This argument should not be set to ``True`` if a mix of git and
non-git repo definitions are being used, as it will result in the
non-git repo definitions being removed.
.. versionadded:: 2015.8.0
.. _GitPython: https://github.com/gitpython-developers/GitPython
.. _pygit2: https://github.com/libgit2/pygit2
CLI Example:
.. code-block:: bash
salt-call winrepo.update_git_repos | Below is the the instruction that describes the task:
### Input:
Checkout git repos containing :ref:`Windows Software Package Definitions
<windows-package-manager>`.
.. important::
This function requires `Git for Windows`_ to be installed in order to
work. When installing, make sure to select an installation option which
permits the git executable to be run from the Command Prompt.
.. _`Git for Windows`: https://git-for-windows.github.io/
clean : False
Clean repo cachedirs which are not configured under
:conf_minion:`winrepo_remotes`.
.. note::
This option only applies if either pygit2_ or GitPython_ is
installed into Salt's bundled Python.
.. warning::
This argument should not be set to ``True`` if a mix of git and
non-git repo definitions are being used, as it will result in the
non-git repo definitions being removed.
.. versionadded:: 2015.8.0
.. _GitPython: https://github.com/gitpython-developers/GitPython
.. _pygit2: https://github.com/libgit2/pygit2
CLI Example:
.. code-block:: bash
salt-call winrepo.update_git_repos
### Response:
def update_git_repos(clean=False):
'''
Checkout git repos containing :ref:`Windows Software Package Definitions
<windows-package-manager>`.
.. important::
This function requires `Git for Windows`_ to be installed in order to
work. When installing, make sure to select an installation option which
permits the git executable to be run from the Command Prompt.
.. _`Git for Windows`: https://git-for-windows.github.io/
clean : False
Clean repo cachedirs which are not configured under
:conf_minion:`winrepo_remotes`.
.. note::
This option only applies if either pygit2_ or GitPython_ is
installed into Salt's bundled Python.
.. warning::
This argument should not be set to ``True`` if a mix of git and
non-git repo definitions are being used, as it will result in the
non-git repo definitions being removed.
.. versionadded:: 2015.8.0
.. _GitPython: https://github.com/gitpython-developers/GitPython
.. _pygit2: https://github.com/libgit2/pygit2
CLI Example:
.. code-block:: bash
salt-call winrepo.update_git_repos
'''
if not salt.utils.path.which('git'):
raise CommandExecutionError(
'Git for Windows is not installed, or not configured to be '
'accessible from the Command Prompt'
)
return _update_git_repos(opts=__opts__, clean=clean, masterless=True) |
def _make_complex(self):
"""Convert the real SHCoeffs class to the complex class."""
rcomplex_coeffs = _shtools.SHrtoc(self.coeffs,
convention=1, switchcs=0)
# These coefficients are using real floats, and need to be
# converted to complex form.
complex_coeffs = _np.zeros((2, self.lmax+1, self.lmax+1),
dtype='complex')
complex_coeffs[0, :, :] = (rcomplex_coeffs[0, :, :] + 1j *
rcomplex_coeffs[1, :, :])
complex_coeffs[1, :, :] = complex_coeffs[0, :, :].conjugate()
for m in self.degrees():
if m % 2 == 1:
complex_coeffs[1, :, m] = - complex_coeffs[1, :, m]
# complex_coeffs is initialized in this function and can be
# passed as reference
return SHCoeffs.from_array(complex_coeffs,
normalization=self.normalization,
csphase=self.csphase, copy=False) | Convert the real SHCoeffs class to the complex class. | Below is the the instruction that describes the task:
### Input:
Convert the real SHCoeffs class to the complex class.
### Response:
def _make_complex(self):
"""Convert the real SHCoeffs class to the complex class."""
rcomplex_coeffs = _shtools.SHrtoc(self.coeffs,
convention=1, switchcs=0)
# These coefficients are using real floats, and need to be
# converted to complex form.
complex_coeffs = _np.zeros((2, self.lmax+1, self.lmax+1),
dtype='complex')
complex_coeffs[0, :, :] = (rcomplex_coeffs[0, :, :] + 1j *
rcomplex_coeffs[1, :, :])
complex_coeffs[1, :, :] = complex_coeffs[0, :, :].conjugate()
for m in self.degrees():
if m % 2 == 1:
complex_coeffs[1, :, m] = - complex_coeffs[1, :, m]
# complex_coeffs is initialized in this function and can be
# passed as reference
return SHCoeffs.from_array(complex_coeffs,
normalization=self.normalization,
csphase=self.csphase, copy=False) |
def safe_dump(data, stream=None, **kwds):
"""
Serialize a Python object into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=SafeDumper, **kwds) | Serialize a Python object into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead. | Below is the the instruction that describes the task:
### Input:
Serialize a Python object into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
### Response:
def safe_dump(data, stream=None, **kwds):
"""
Serialize a Python object into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=SafeDumper, **kwds) |
Subsets and Splits