code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def parse_formula(fml_file):
"""
Parse and return MaxSAT formula.
"""
if re.search('\.wcnf(\.(gz|bz2|lzma|xz))?$', fml_file):
fml = WCNF(from_file=fml_file)
else: # expecting '*.cnf'
fml = CNF(from_file=fml_file).weighted()
return fml | Parse and return MaxSAT formula. | Below is the the instruction that describes the task:
### Input:
Parse and return MaxSAT formula.
### Response:
def parse_formula(fml_file):
"""
Parse and return MaxSAT formula.
"""
if re.search('\.wcnf(\.(gz|bz2|lzma|xz))?$', fml_file):
fml = WCNF(from_file=fml_file)
else: # expecting '*.cnf'
fml = CNF(from_file=fml_file).weighted()
return fml |
def get_default_value_by_type(type_, state=None):
"""
Java specify defaults values for primitive and reference types. This
method returns the default value for a given type.
:param str type_: Name of type.
:return: Default value for this type.
"""
if type_ in ['byte', 'char', 'short', 'int', 'boolean']:
return BVS('default_value_{}'.format(type_), 32)
elif type_ == "long":
return BVS('default_value_{}'.format(type_), 64)
elif type_ == 'float':
return FPS('default_value_{}'.format(type_), FSORT_FLOAT)
elif type_ == 'double':
return FPS('default_value_{}'.format(type_), FSORT_DOUBLE)
elif state is not None:
if type_ == 'java.lang.String':
return SimSootValue_StringRef.new_string(state, StringS('default_value_{}'.format(type_), 1000))
if type_.endswith('[][]'):
raise NotImplementedError
# multiarray = SimSootExpr_NewMultiArray.new_array(self.state, element_type, size)
# multiarray.add_default_value_generator(lambda s: SimSootExpr_NewMultiArray._generate_inner_array(s, element_type, sizes))
# return multiarray
elif type_.endswith('[]'):
array = SimSootExpr_NewArray.new_array(state, type_[:-2], BVV(2, 32))
return array
else:
return SimSootValue_ThisRef.new_object(state, type_, symbolic=True, init_object=False)
else:
# not a primitive type
# => treat it as a reference
return SootNullConstant() | Java specify defaults values for primitive and reference types. This
method returns the default value for a given type.
:param str type_: Name of type.
:return: Default value for this type. | Below is the the instruction that describes the task:
### Input:
Java specify defaults values for primitive and reference types. This
method returns the default value for a given type.
:param str type_: Name of type.
:return: Default value for this type.
### Response:
def get_default_value_by_type(type_, state=None):
"""
Java specify defaults values for primitive and reference types. This
method returns the default value for a given type.
:param str type_: Name of type.
:return: Default value for this type.
"""
if type_ in ['byte', 'char', 'short', 'int', 'boolean']:
return BVS('default_value_{}'.format(type_), 32)
elif type_ == "long":
return BVS('default_value_{}'.format(type_), 64)
elif type_ == 'float':
return FPS('default_value_{}'.format(type_), FSORT_FLOAT)
elif type_ == 'double':
return FPS('default_value_{}'.format(type_), FSORT_DOUBLE)
elif state is not None:
if type_ == 'java.lang.String':
return SimSootValue_StringRef.new_string(state, StringS('default_value_{}'.format(type_), 1000))
if type_.endswith('[][]'):
raise NotImplementedError
# multiarray = SimSootExpr_NewMultiArray.new_array(self.state, element_type, size)
# multiarray.add_default_value_generator(lambda s: SimSootExpr_NewMultiArray._generate_inner_array(s, element_type, sizes))
# return multiarray
elif type_.endswith('[]'):
array = SimSootExpr_NewArray.new_array(state, type_[:-2], BVV(2, 32))
return array
else:
return SimSootValue_ThisRef.new_object(state, type_, symbolic=True, init_object=False)
else:
# not a primitive type
# => treat it as a reference
return SootNullConstant() |
def get_hacr_channels(db=None, gps=None, connection=None,
**conectkwargs):
"""Return the names of all channels present in the given HACR database
"""
# connect if needed
if connection is None:
if gps is None:
gps = from_gps('now')
if db is None:
db = get_database_names(gps, gps)[0]
connection = connect(db=db, **conectkwargs)
# query
out = query("select channel from job where monitorName = 'chacr'")
return [r[0] for r in out] | Return the names of all channels present in the given HACR database | Below is the the instruction that describes the task:
### Input:
Return the names of all channels present in the given HACR database
### Response:
def get_hacr_channels(db=None, gps=None, connection=None,
**conectkwargs):
"""Return the names of all channels present in the given HACR database
"""
# connect if needed
if connection is None:
if gps is None:
gps = from_gps('now')
if db is None:
db = get_database_names(gps, gps)[0]
connection = connect(db=db, **conectkwargs)
# query
out = query("select channel from job where monitorName = 'chacr'")
return [r[0] for r in out] |
def human(self):
"""Emit the address in human-readible format (AA.BB.CC)."""
strout = ''
first = True
for i in range(0, 28, 2):
if first:
first = False
else:
strout = strout + '.'
strout = strout + self.hex[i:i + 2]
return strout | Emit the address in human-readible format (AA.BB.CC). | Below is the the instruction that describes the task:
### Input:
Emit the address in human-readible format (AA.BB.CC).
### Response:
def human(self):
"""Emit the address in human-readible format (AA.BB.CC)."""
strout = ''
first = True
for i in range(0, 28, 2):
if first:
first = False
else:
strout = strout + '.'
strout = strout + self.hex[i:i + 2]
return strout |
def set_temperature(self, zone, temperature, until=None):
"""Sets the temperature of the given zone."""
if until is None:
data = {"Value": temperature, "Status": "Hold", "NextTime": None}
else:
data = {"Value": temperature,
"Status": "Temporary",
"NextTime": until.strftime('%Y-%m-%dT%H:%M:%SZ')}
self._set_heat_setpoint(zone, data) | Sets the temperature of the given zone. | Below is the the instruction that describes the task:
### Input:
Sets the temperature of the given zone.
### Response:
def set_temperature(self, zone, temperature, until=None):
"""Sets the temperature of the given zone."""
if until is None:
data = {"Value": temperature, "Status": "Hold", "NextTime": None}
else:
data = {"Value": temperature,
"Status": "Temporary",
"NextTime": until.strftime('%Y-%m-%dT%H:%M:%SZ')}
self._set_heat_setpoint(zone, data) |
def pop(self):
"""
Remove and return the last element from the set.
Raises KeyError if the set is empty.
Example:
>>> oset = OrderedSet([1, 2, 3])
>>> oset.pop()
3
"""
if not self.items:
raise KeyError("Set is empty")
elem = self.items[-1]
del self.items[-1]
del self.map[elem]
return elem | Remove and return the last element from the set.
Raises KeyError if the set is empty.
Example:
>>> oset = OrderedSet([1, 2, 3])
>>> oset.pop()
3 | Below is the the instruction that describes the task:
### Input:
Remove and return the last element from the set.
Raises KeyError if the set is empty.
Example:
>>> oset = OrderedSet([1, 2, 3])
>>> oset.pop()
3
### Response:
def pop(self):
"""
Remove and return the last element from the set.
Raises KeyError if the set is empty.
Example:
>>> oset = OrderedSet([1, 2, 3])
>>> oset.pop()
3
"""
if not self.items:
raise KeyError("Set is empty")
elem = self.items[-1]
del self.items[-1]
del self.map[elem]
return elem |
def get_logger(name):
"""Return a logger with a file handler."""
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
# File output handler
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s %(name)12s %(levelname)8s %(lineno)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger | Return a logger with a file handler. | Below is the the instruction that describes the task:
### Input:
Return a logger with a file handler.
### Response:
def get_logger(name):
"""Return a logger with a file handler."""
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
# File output handler
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s %(name)12s %(levelname)8s %(lineno)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger |
def fitlin(imgarr,refarr):
""" Compute the least-squares fit between two arrays.
A Python translation of 'FITLIN' from 'drutil.f' (Drizzle V2.9).
"""
# Initialize variables
_mat = np.zeros((3,3),dtype=np.float64)
_xorg = imgarr[0][0]
_yorg = imgarr[0][1]
_xoorg = refarr[0][0]
_yoorg = refarr[0][1]
_sigxox = 0.
_sigxoy = 0.
_sigxo = 0.
_sigyox = 0.
_sigyoy = 0.
_sigyo = 0.
_npos = len(imgarr)
# Populate matrices
for i in range(_npos):
_mat[0][0] += np.power((imgarr[i][0] - _xorg),2)
_mat[0][1] += (imgarr[i][0] - _xorg) * (imgarr[i][1] - _yorg)
_mat[0][2] += (imgarr[i][0] - _xorg)
_mat[1][1] += np.power((imgarr[i][1] - _yorg),2)
_mat[1][2] += imgarr[i][1] - _yorg
_sigxox += (refarr[i][0] - _xoorg)*(imgarr[i][0] - _xorg)
_sigxoy += (refarr[i][0] - _xoorg)*(imgarr[i][1] - _yorg)
_sigxo += refarr[i][0] - _xoorg
_sigyox += (refarr[i][1] - _yoorg)*(imgarr[i][0] -_xorg)
_sigyoy += (refarr[i][1] - _yoorg)*(imgarr[i][1] - _yorg)
_sigyo += refarr[i][1] - _yoorg
_mat[2][2] = _npos
_mat[1][0] = _mat[0][1]
_mat[2][0] = _mat[0][2]
_mat[2][1] = _mat[1][2]
# Now invert this matrix
_mat = linalg.inv(_mat)
_a = _sigxox*_mat[0][0]+_sigxoy*_mat[0][1]+_sigxo*_mat[0][2]
_b = -1*(_sigxox*_mat[1][0]+_sigxoy*_mat[1][1]+_sigxo*_mat[1][2])
#_x0 = _sigxox*_mat[2][0]+_sigxoy*_mat[2][1]+_sigxo*_mat[2][2]
_c = _sigyox*_mat[1][0]+_sigyoy*_mat[1][1]+_sigyo*_mat[1][2]
_d = _sigyox*_mat[0][0]+_sigyoy*_mat[0][1]+_sigyo*_mat[0][2]
#_y0 = _sigyox*_mat[2][0]+_sigyoy*_mat[2][1]+_sigyo*_mat[2][2]
_xt = _xoorg - _a*_xorg+_b*_yorg
_yt = _yoorg - _d*_xorg-_c*_yorg
return [_a,_b,_xt],[_c,_d,_yt] | Compute the least-squares fit between two arrays.
A Python translation of 'FITLIN' from 'drutil.f' (Drizzle V2.9). | Below is the the instruction that describes the task:
### Input:
Compute the least-squares fit between two arrays.
A Python translation of 'FITLIN' from 'drutil.f' (Drizzle V2.9).
### Response:
def fitlin(imgarr,refarr):
""" Compute the least-squares fit between two arrays.
A Python translation of 'FITLIN' from 'drutil.f' (Drizzle V2.9).
"""
# Initialize variables
_mat = np.zeros((3,3),dtype=np.float64)
_xorg = imgarr[0][0]
_yorg = imgarr[0][1]
_xoorg = refarr[0][0]
_yoorg = refarr[0][1]
_sigxox = 0.
_sigxoy = 0.
_sigxo = 0.
_sigyox = 0.
_sigyoy = 0.
_sigyo = 0.
_npos = len(imgarr)
# Populate matrices
for i in range(_npos):
_mat[0][0] += np.power((imgarr[i][0] - _xorg),2)
_mat[0][1] += (imgarr[i][0] - _xorg) * (imgarr[i][1] - _yorg)
_mat[0][2] += (imgarr[i][0] - _xorg)
_mat[1][1] += np.power((imgarr[i][1] - _yorg),2)
_mat[1][2] += imgarr[i][1] - _yorg
_sigxox += (refarr[i][0] - _xoorg)*(imgarr[i][0] - _xorg)
_sigxoy += (refarr[i][0] - _xoorg)*(imgarr[i][1] - _yorg)
_sigxo += refarr[i][0] - _xoorg
_sigyox += (refarr[i][1] - _yoorg)*(imgarr[i][0] -_xorg)
_sigyoy += (refarr[i][1] - _yoorg)*(imgarr[i][1] - _yorg)
_sigyo += refarr[i][1] - _yoorg
_mat[2][2] = _npos
_mat[1][0] = _mat[0][1]
_mat[2][0] = _mat[0][2]
_mat[2][1] = _mat[1][2]
# Now invert this matrix
_mat = linalg.inv(_mat)
_a = _sigxox*_mat[0][0]+_sigxoy*_mat[0][1]+_sigxo*_mat[0][2]
_b = -1*(_sigxox*_mat[1][0]+_sigxoy*_mat[1][1]+_sigxo*_mat[1][2])
#_x0 = _sigxox*_mat[2][0]+_sigxoy*_mat[2][1]+_sigxo*_mat[2][2]
_c = _sigyox*_mat[1][0]+_sigyoy*_mat[1][1]+_sigyo*_mat[1][2]
_d = _sigyox*_mat[0][0]+_sigyoy*_mat[0][1]+_sigyo*_mat[0][2]
#_y0 = _sigyox*_mat[2][0]+_sigyoy*_mat[2][1]+_sigyo*_mat[2][2]
_xt = _xoorg - _a*_xorg+_b*_yorg
_yt = _yoorg - _d*_xorg-_c*_yorg
return [_a,_b,_xt],[_c,_d,_yt] |
def ref_info(self):
"""Gets a dictionary of ref positions and the ref IDs of the refs for
that game.
:returns: A dictionary of ref positions and IDs.
"""
doc = self.get_doc()
table = doc('table#officials')
return sportsref.utils.parse_info_table(table) | Gets a dictionary of ref positions and the ref IDs of the refs for
that game.
:returns: A dictionary of ref positions and IDs. | Below is the the instruction that describes the task:
### Input:
Gets a dictionary of ref positions and the ref IDs of the refs for
that game.
:returns: A dictionary of ref positions and IDs.
### Response:
def ref_info(self):
"""Gets a dictionary of ref positions and the ref IDs of the refs for
that game.
:returns: A dictionary of ref positions and IDs.
"""
doc = self.get_doc()
table = doc('table#officials')
return sportsref.utils.parse_info_table(table) |
def taint(taintedSet, taintedAttribute):
u"""Adds an attribute to a set of attributes.
Related attributes are also included."""
taintedSet.add(taintedAttribute)
if taintedAttribute == 'marker':
taintedSet |= set(['marker-start', 'marker-mid', 'marker-end'])
if taintedAttribute in ['marker-start', 'marker-mid', 'marker-end']:
taintedSet.add('marker')
return taintedSet | u"""Adds an attribute to a set of attributes.
Related attributes are also included. | Below is the the instruction that describes the task:
### Input:
u"""Adds an attribute to a set of attributes.
Related attributes are also included.
### Response:
def taint(taintedSet, taintedAttribute):
u"""Adds an attribute to a set of attributes.
Related attributes are also included."""
taintedSet.add(taintedAttribute)
if taintedAttribute == 'marker':
taintedSet |= set(['marker-start', 'marker-mid', 'marker-end'])
if taintedAttribute in ['marker-start', 'marker-mid', 'marker-end']:
taintedSet.add('marker')
return taintedSet |
def del_export(exports='/etc/exports', path=None):
'''
Remove an export
CLI Example:
.. code-block:: bash
salt '*' nfs.del_export /media/storage
'''
edict = list_exports(exports)
del edict[path]
_write_exports(exports, edict)
return edict | Remove an export
CLI Example:
.. code-block:: bash
salt '*' nfs.del_export /media/storage | Below is the the instruction that describes the task:
### Input:
Remove an export
CLI Example:
.. code-block:: bash
salt '*' nfs.del_export /media/storage
### Response:
def del_export(exports='/etc/exports', path=None):
'''
Remove an export
CLI Example:
.. code-block:: bash
salt '*' nfs.del_export /media/storage
'''
edict = list_exports(exports)
del edict[path]
_write_exports(exports, edict)
return edict |
def _log(code, message, level, domain):
"""Call this to add an entry in the journal"""
entry = LogEntry(level, domain, code, message)
Logger.journal.append(entry)
if Logger.silent:
return
if level >= Logger._verbosity:
_print_entry(entry) | Call this to add an entry in the journal | Below is the the instruction that describes the task:
### Input:
Call this to add an entry in the journal
### Response:
def _log(code, message, level, domain):
"""Call this to add an entry in the journal"""
entry = LogEntry(level, domain, code, message)
Logger.journal.append(entry)
if Logger.silent:
return
if level >= Logger._verbosity:
_print_entry(entry) |
def process_inline_members_definition(members):
"""
:param members: this can be any of the following:
- a string containing a space and/or comma separated list of names: e.g.:
"item1 item2 item3" OR "item1,item2,item3" OR "item1, item2, item3"
- tuple/list/Set of strings (names)
- Mapping of (name, data) pairs
- any kind of iterable that yields (name, data) pairs
:return: An iterable of (name, data) pairs.
"""
if isinstance(members, str):
members = ((name, UNDEFINED) for name in members.replace(',', ' ').split())
elif isinstance(members, (tuple, list, collections.Set)):
if members and isinstance(next(iter(members)), str):
members = ((name, UNDEFINED) for name in members)
elif isinstance(members, collections.Mapping):
members = members.items()
return members | :param members: this can be any of the following:
- a string containing a space and/or comma separated list of names: e.g.:
"item1 item2 item3" OR "item1,item2,item3" OR "item1, item2, item3"
- tuple/list/Set of strings (names)
- Mapping of (name, data) pairs
- any kind of iterable that yields (name, data) pairs
:return: An iterable of (name, data) pairs. | Below is the the instruction that describes the task:
### Input:
:param members: this can be any of the following:
- a string containing a space and/or comma separated list of names: e.g.:
"item1 item2 item3" OR "item1,item2,item3" OR "item1, item2, item3"
- tuple/list/Set of strings (names)
- Mapping of (name, data) pairs
- any kind of iterable that yields (name, data) pairs
:return: An iterable of (name, data) pairs.
### Response:
def process_inline_members_definition(members):
"""
:param members: this can be any of the following:
- a string containing a space and/or comma separated list of names: e.g.:
"item1 item2 item3" OR "item1,item2,item3" OR "item1, item2, item3"
- tuple/list/Set of strings (names)
- Mapping of (name, data) pairs
- any kind of iterable that yields (name, data) pairs
:return: An iterable of (name, data) pairs.
"""
if isinstance(members, str):
members = ((name, UNDEFINED) for name in members.replace(',', ' ').split())
elif isinstance(members, (tuple, list, collections.Set)):
if members and isinstance(next(iter(members)), str):
members = ((name, UNDEFINED) for name in members)
elif isinstance(members, collections.Mapping):
members = members.items()
return members |
def create(self, status_callback=values.unset, unique_name=values.unset):
"""
Create a new ModelBuildInstance
:param unicode status_callback: The URL we should call using a POST method to send status information to your application
:param unicode unique_name: An application-defined string that uniquely identifies the new resource
:returns: Newly created ModelBuildInstance
:rtype: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildInstance
"""
data = values.of({'StatusCallback': status_callback, 'UniqueName': unique_name, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return ModelBuildInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], ) | Create a new ModelBuildInstance
:param unicode status_callback: The URL we should call using a POST method to send status information to your application
:param unicode unique_name: An application-defined string that uniquely identifies the new resource
:returns: Newly created ModelBuildInstance
:rtype: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildInstance | Below is the the instruction that describes the task:
### Input:
Create a new ModelBuildInstance
:param unicode status_callback: The URL we should call using a POST method to send status information to your application
:param unicode unique_name: An application-defined string that uniquely identifies the new resource
:returns: Newly created ModelBuildInstance
:rtype: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildInstance
### Response:
def create(self, status_callback=values.unset, unique_name=values.unset):
"""
Create a new ModelBuildInstance
:param unicode status_callback: The URL we should call using a POST method to send status information to your application
:param unicode unique_name: An application-defined string that uniquely identifies the new resource
:returns: Newly created ModelBuildInstance
:rtype: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildInstance
"""
data = values.of({'StatusCallback': status_callback, 'UniqueName': unique_name, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return ModelBuildInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], ) |
def aggregateLine(requestContext, seriesList, func='avg'):
"""
Takes a metric or wildcard seriesList and draws a horizontal line
based on the function applied to each series.
Note: By default, the graphite renderer consolidates data points by
averaging data points over time. If you are using the 'min' or 'max'
function for aggregateLine, this can cause an unusual gap in the
line drawn by this function and the data itself. To fix this, you
should use the consolidateBy() function with the same function
argument you are using for aggregateLine. This will ensure that the
proper data points are retained and the graph should line up
correctly.
Example::
&target=aggregateLine(server01.connections.total, 'avg')
&target=aggregateLine(server*.connections.total, 'avg')
"""
t_funcs = {'avg': safeAvg, 'min': safeMin, 'max': safeMax}
if func not in t_funcs:
raise ValueError("Invalid function %s" % func)
results = []
for series in seriesList:
value = t_funcs[func](series)
if value is not None:
name = 'aggregateLine(%s, %g)' % (series.name, value)
else:
name = 'aggregateLine(%s, None)' % (series.name)
[series] = constantLine(requestContext, value)
series.name = name
series.pathExpression = series.name
results.append(series)
return results | Takes a metric or wildcard seriesList and draws a horizontal line
based on the function applied to each series.
Note: By default, the graphite renderer consolidates data points by
averaging data points over time. If you are using the 'min' or 'max'
function for aggregateLine, this can cause an unusual gap in the
line drawn by this function and the data itself. To fix this, you
should use the consolidateBy() function with the same function
argument you are using for aggregateLine. This will ensure that the
proper data points are retained and the graph should line up
correctly.
Example::
&target=aggregateLine(server01.connections.total, 'avg')
&target=aggregateLine(server*.connections.total, 'avg') | Below is the the instruction that describes the task:
### Input:
Takes a metric or wildcard seriesList and draws a horizontal line
based on the function applied to each series.
Note: By default, the graphite renderer consolidates data points by
averaging data points over time. If you are using the 'min' or 'max'
function for aggregateLine, this can cause an unusual gap in the
line drawn by this function and the data itself. To fix this, you
should use the consolidateBy() function with the same function
argument you are using for aggregateLine. This will ensure that the
proper data points are retained and the graph should line up
correctly.
Example::
&target=aggregateLine(server01.connections.total, 'avg')
&target=aggregateLine(server*.connections.total, 'avg')
### Response:
def aggregateLine(requestContext, seriesList, func='avg'):
"""
Takes a metric or wildcard seriesList and draws a horizontal line
based on the function applied to each series.
Note: By default, the graphite renderer consolidates data points by
averaging data points over time. If you are using the 'min' or 'max'
function for aggregateLine, this can cause an unusual gap in the
line drawn by this function and the data itself. To fix this, you
should use the consolidateBy() function with the same function
argument you are using for aggregateLine. This will ensure that the
proper data points are retained and the graph should line up
correctly.
Example::
&target=aggregateLine(server01.connections.total, 'avg')
&target=aggregateLine(server*.connections.total, 'avg')
"""
t_funcs = {'avg': safeAvg, 'min': safeMin, 'max': safeMax}
if func not in t_funcs:
raise ValueError("Invalid function %s" % func)
results = []
for series in seriesList:
value = t_funcs[func](series)
if value is not None:
name = 'aggregateLine(%s, %g)' % (series.name, value)
else:
name = 'aggregateLine(%s, None)' % (series.name)
[series] = constantLine(requestContext, value)
series.name = name
series.pathExpression = series.name
results.append(series)
return results |
def count_tags(self):
'''Count tag occurences by type and update the tag collection'''
for key, model in TAGGED.items():
collection = '{0}_tags'.format(key)
results = (model.objects(tags__exists=True)
.map_reduce(map_tags, reduce_tags, collection))
for result in results:
tag, created = Tag.objects.get_or_create(name=result.key,
auto_save=False)
tag.counts[key] = int(result.value) if result.value else 0
tag.save() | Count tag occurences by type and update the tag collection | Below is the the instruction that describes the task:
### Input:
Count tag occurences by type and update the tag collection
### Response:
def count_tags(self):
'''Count tag occurences by type and update the tag collection'''
for key, model in TAGGED.items():
collection = '{0}_tags'.format(key)
results = (model.objects(tags__exists=True)
.map_reduce(map_tags, reduce_tags, collection))
for result in results:
tag, created = Tag.objects.get_or_create(name=result.key,
auto_save=False)
tag.counts[key] = int(result.value) if result.value else 0
tag.save() |
def download_as_json(name):
"""
Download IPList as json. This would allow for easily
manipulation of the IPList, but generally recommended only for
smaller lists
:param str name: name of IPList
:return: None
"""
location = list(IPList.objects.filter(name))
if location:
iplist = location[0]
return iplist.download(as_type='json') | Download IPList as json. This would allow for easily
manipulation of the IPList, but generally recommended only for
smaller lists
:param str name: name of IPList
:return: None | Below is the the instruction that describes the task:
### Input:
Download IPList as json. This would allow for easily
manipulation of the IPList, but generally recommended only for
smaller lists
:param str name: name of IPList
:return: None
### Response:
def download_as_json(name):
"""
Download IPList as json. This would allow for easily
manipulation of the IPList, but generally recommended only for
smaller lists
:param str name: name of IPList
:return: None
"""
location = list(IPList.objects.filter(name))
if location:
iplist = location[0]
return iplist.download(as_type='json') |
def _convert_to_color(cls, color_spec):
"""
Convert ``color_spec`` to an openpyxl v2 Color object
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color
"""
from openpyxl.styles import Color
if isinstance(color_spec, str):
return Color(color_spec)
else:
return Color(**color_spec) | Convert ``color_spec`` to an openpyxl v2 Color object
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color | Below is the the instruction that describes the task:
### Input:
Convert ``color_spec`` to an openpyxl v2 Color object
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color
### Response:
def _convert_to_color(cls, color_spec):
"""
Convert ``color_spec`` to an openpyxl v2 Color object
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color
"""
from openpyxl.styles import Color
if isinstance(color_spec, str):
return Color(color_spec)
else:
return Color(**color_spec) |
def subtract(self, es):
"""
Subtract the BED elements in es from self.
:param es: a list of BED elements (or anything with chrom, start, end)
:return: a list of BED elements which represent what is left of
self after the subtraction. This might be an empty list.
"""
workingSet = [self]
for e in es:
newWorkingSet = []
for w in workingSet:
newWorkingSet += w.__singleIntersect(e)
workingSet = newWorkingSet
return workingSet | Subtract the BED elements in es from self.
:param es: a list of BED elements (or anything with chrom, start, end)
:return: a list of BED elements which represent what is left of
self after the subtraction. This might be an empty list. | Below is the the instruction that describes the task:
### Input:
Subtract the BED elements in es from self.
:param es: a list of BED elements (or anything with chrom, start, end)
:return: a list of BED elements which represent what is left of
self after the subtraction. This might be an empty list.
### Response:
def subtract(self, es):
"""
Subtract the BED elements in es from self.
:param es: a list of BED elements (or anything with chrom, start, end)
:return: a list of BED elements which represent what is left of
self after the subtraction. This might be an empty list.
"""
workingSet = [self]
for e in es:
newWorkingSet = []
for w in workingSet:
newWorkingSet += w.__singleIntersect(e)
workingSet = newWorkingSet
return workingSet |
def get_project(url=None, username=None, password=None, token=None, scope=None, scope_id=None,
env_filename=None, status=ScopeStatus.ACTIVE):
"""
Retrieve and return the KE-chain project to be used throughout an app.
This helper is made to bootstrap a pykechain enabled python script or an jupyter notebook with the correct
project (technically this is a `pykechain.models.Scope` model).
When no parameters are passed in this function, it will try to retrieve `url`, `token`, `scope` (or `scope_id`)
from the environment variables or a neatly placed '.env' file.
when the environment variable KECHAIN_FORCE_ENV_USE is set to true, (or ok, on, 1, yes) then the use of
environmentvariables for the retrieval of the scope are enforced. The following environment variables can be set::
KECHAIN_URL - full url of KE-chain where to connect to eg: 'https://<some>.ke-chain.com'
KECHAIN_TOKEN - authentication token for the KE-chain user provided from KE-chain user account control
KECHAIN_USERNAME - the username for the credentials
KECHAIN_PASSWORD - the password for the credentials
KECHAIN_SCOPE - the name of the project / scope. Should be unique, otherwise use scope_id
KECHAIN_SCOPE_ID - the UUID of the project / scope.
KECHAIN_FORCE_ENV_USE - set to 'true', '1', 'ok', or 'yes' to always use the environment variables.
KECHAIN_SCOPE_STATUS - the status of the Scope to retrieve, defaults to None to retrieve all scopes
.. versionadded:: 1.12
:param url: (optional) url of KE-chain
:type url: basestring or None
:param username: (optional) username for authentication (together with password, if not token)
:type username: basestring or None
:param password: (optional) password for username/password authentication (together with username, if not token)
:type password: basestring or None
:param token: (optional) token for authentication (if not username/password)
:type token: basestring or None
:param scope: (optional) name of the scope to retrieve from KE-chain.
:type scope: basestring or None
:param scope_id: (optional) UUID of the scope to retrieve and return from KE-chain
:type scope_id: basestring or None
:param env_filename: (optional) name of the environment filename to bootstrap the Client
:type env_filename: basestring or None
:param status: (optional) status of the scope to retrieve, defaults to :attr:`enums.Scopestatus.ACTIVE`
:type status: basestring or None
:return: pykechain.models.Scope
:raises NotFoundError: If the scope could not be found
:raises ClientError: If the client connection to KE-chain was unsuccessful
:raises APIError: If other Errors occur to retrieve the scope
Example
-------
An example with parameters provided
>>> from pykechain import get_project
>>> project = get_project(url='http://localhost:8000',
... username='foo', password='bar', scope='1st!')
>>> print(project.name)
1st
An example with a .env file on disk::
# This is an .env file on disk.
KECHAIN_TOKEN=bd9377793f7e74a29dbb11fce969
KECHAIN_URL=http://localhost:8080
KECHAIN_SCOPE_ID=c9f0-228e-4d3a-9dc0-ec5a75d7
>>> project = get_project(env_filename='/path/to/.env')
>>> project.id
c9f0-228e-4d3a-9dc0-ec5a75d7
An example for get_project that will extract all from the environment variables
>>> env_vars = os.environ
>>> env_vars.get('KECHAIN_TOKEN')
bd9377793f7e74a29dbb11fce969
>>> env_vars.get('KECHAIN_URL')
http://localhost:8080
>>> env_vars.get('KECHAIN_SCOPE')
Bike Project
>>> project = get_project()
>>> project.name
Bike Project
"""
if env.bool(kecenv.KECHAIN_FORCE_ENV_USE, default=False):
if not os.getenv(kecenv.KECHAIN_URL):
raise ClientError(
"Error: KECHAIN_URL should be provided as environment variable (use of env vars is enforced)")
if not (os.getenv(kecenv.KECHAIN_TOKEN) or
(os.getenv(kecenv.KECHAIN_PASSWORD) and os.getenv(kecenv.KECHAIN_PASSWORD))):
raise ClientError("Error: KECHAIN_TOKEN or KECHAIN_USERNAME and KECHAIN_PASSWORD should be provided as "
"environment variable(s) (use of env vars is enforced)")
if not (os.getenv(kecenv.KECHAIN_SCOPE) or os.getenv(kecenv.KECHAIN_SCOPE_ID)):
raise ClientError("Error: KECHAIN_SCOPE or KECHAIN_SCOPE_ID should be provided as environment variable "
"(use of env vars is enforced)")
if env.bool(kecenv.KECHAIN_FORCE_ENV_USE, default=False) or \
not any((url, username, password, token, scope, scope_id)):
client = Client.from_env(env_filename=env_filename)
scope_id = env(kecenv.KECHAIN_SCOPE_ID, default=None)
scope = env(kecenv.KECHAIN_SCOPE, default=None)
status = env(kecenv.KECHAIN_SCOPE_STATUS, default=None)
elif (url and ((username and password) or (token)) and (scope or scope_id)) and \
not env.bool(kecenv.KECHAIN_FORCE_ENV_USE, default=False):
client = Client(url=url)
client.login(username=username, password=password, token=token)
else:
raise ClientError("Error: insufficient arguments to connect to KE-chain. "
"See documentation of `pykechain.get_project()`")
if scope_id:
return client.scope(pk=scope_id, status=status)
else:
return client.scope(name=scope, status=status) | Retrieve and return the KE-chain project to be used throughout an app.
This helper is made to bootstrap a pykechain enabled python script or an jupyter notebook with the correct
project (technically this is a `pykechain.models.Scope` model).
When no parameters are passed in this function, it will try to retrieve `url`, `token`, `scope` (or `scope_id`)
from the environment variables or a neatly placed '.env' file.
when the environment variable KECHAIN_FORCE_ENV_USE is set to true, (or ok, on, 1, yes) then the use of
environmentvariables for the retrieval of the scope are enforced. The following environment variables can be set::
KECHAIN_URL - full url of KE-chain where to connect to eg: 'https://<some>.ke-chain.com'
KECHAIN_TOKEN - authentication token for the KE-chain user provided from KE-chain user account control
KECHAIN_USERNAME - the username for the credentials
KECHAIN_PASSWORD - the password for the credentials
KECHAIN_SCOPE - the name of the project / scope. Should be unique, otherwise use scope_id
KECHAIN_SCOPE_ID - the UUID of the project / scope.
KECHAIN_FORCE_ENV_USE - set to 'true', '1', 'ok', or 'yes' to always use the environment variables.
KECHAIN_SCOPE_STATUS - the status of the Scope to retrieve, defaults to None to retrieve all scopes
.. versionadded:: 1.12
:param url: (optional) url of KE-chain
:type url: basestring or None
:param username: (optional) username for authentication (together with password, if not token)
:type username: basestring or None
:param password: (optional) password for username/password authentication (together with username, if not token)
:type password: basestring or None
:param token: (optional) token for authentication (if not username/password)
:type token: basestring or None
:param scope: (optional) name of the scope to retrieve from KE-chain.
:type scope: basestring or None
:param scope_id: (optional) UUID of the scope to retrieve and return from KE-chain
:type scope_id: basestring or None
:param env_filename: (optional) name of the environment filename to bootstrap the Client
:type env_filename: basestring or None
:param status: (optional) status of the scope to retrieve, defaults to :attr:`enums.Scopestatus.ACTIVE`
:type status: basestring or None
:return: pykechain.models.Scope
:raises NotFoundError: If the scope could not be found
:raises ClientError: If the client connection to KE-chain was unsuccessful
:raises APIError: If other Errors occur to retrieve the scope
Example
-------
An example with parameters provided
>>> from pykechain import get_project
>>> project = get_project(url='http://localhost:8000',
... username='foo', password='bar', scope='1st!')
>>> print(project.name)
1st
An example with a .env file on disk::
# This is an .env file on disk.
KECHAIN_TOKEN=bd9377793f7e74a29dbb11fce969
KECHAIN_URL=http://localhost:8080
KECHAIN_SCOPE_ID=c9f0-228e-4d3a-9dc0-ec5a75d7
>>> project = get_project(env_filename='/path/to/.env')
>>> project.id
c9f0-228e-4d3a-9dc0-ec5a75d7
An example for get_project that will extract all from the environment variables
>>> env_vars = os.environ
>>> env_vars.get('KECHAIN_TOKEN')
bd9377793f7e74a29dbb11fce969
>>> env_vars.get('KECHAIN_URL')
http://localhost:8080
>>> env_vars.get('KECHAIN_SCOPE')
Bike Project
>>> project = get_project()
>>> project.name
Bike Project | Below is the the instruction that describes the task:
### Input:
Retrieve and return the KE-chain project to be used throughout an app.
This helper is made to bootstrap a pykechain enabled python script or an jupyter notebook with the correct
project (technically this is a `pykechain.models.Scope` model).
When no parameters are passed in this function, it will try to retrieve `url`, `token`, `scope` (or `scope_id`)
from the environment variables or a neatly placed '.env' file.
when the environment variable KECHAIN_FORCE_ENV_USE is set to true, (or ok, on, 1, yes) then the use of
environmentvariables for the retrieval of the scope are enforced. The following environment variables can be set::
KECHAIN_URL - full url of KE-chain where to connect to eg: 'https://<some>.ke-chain.com'
KECHAIN_TOKEN - authentication token for the KE-chain user provided from KE-chain user account control
KECHAIN_USERNAME - the username for the credentials
KECHAIN_PASSWORD - the password for the credentials
KECHAIN_SCOPE - the name of the project / scope. Should be unique, otherwise use scope_id
KECHAIN_SCOPE_ID - the UUID of the project / scope.
KECHAIN_FORCE_ENV_USE - set to 'true', '1', 'ok', or 'yes' to always use the environment variables.
KECHAIN_SCOPE_STATUS - the status of the Scope to retrieve, defaults to None to retrieve all scopes
.. versionadded:: 1.12
:param url: (optional) url of KE-chain
:type url: basestring or None
:param username: (optional) username for authentication (together with password, if not token)
:type username: basestring or None
:param password: (optional) password for username/password authentication (together with username, if not token)
:type password: basestring or None
:param token: (optional) token for authentication (if not username/password)
:type token: basestring or None
:param scope: (optional) name of the scope to retrieve from KE-chain.
:type scope: basestring or None
:param scope_id: (optional) UUID of the scope to retrieve and return from KE-chain
:type scope_id: basestring or None
:param env_filename: (optional) name of the environment filename to bootstrap the Client
:type env_filename: basestring or None
:param status: (optional) status of the scope to retrieve, defaults to :attr:`enums.Scopestatus.ACTIVE`
:type status: basestring or None
:return: pykechain.models.Scope
:raises NotFoundError: If the scope could not be found
:raises ClientError: If the client connection to KE-chain was unsuccessful
:raises APIError: If other Errors occur to retrieve the scope
Example
-------
An example with parameters provided
>>> from pykechain import get_project
>>> project = get_project(url='http://localhost:8000',
... username='foo', password='bar', scope='1st!')
>>> print(project.name)
1st
An example with a .env file on disk::
# This is an .env file on disk.
KECHAIN_TOKEN=bd9377793f7e74a29dbb11fce969
KECHAIN_URL=http://localhost:8080
KECHAIN_SCOPE_ID=c9f0-228e-4d3a-9dc0-ec5a75d7
>>> project = get_project(env_filename='/path/to/.env')
>>> project.id
c9f0-228e-4d3a-9dc0-ec5a75d7
An example for get_project that will extract all from the environment variables
>>> env_vars = os.environ
>>> env_vars.get('KECHAIN_TOKEN')
bd9377793f7e74a29dbb11fce969
>>> env_vars.get('KECHAIN_URL')
http://localhost:8080
>>> env_vars.get('KECHAIN_SCOPE')
Bike Project
>>> project = get_project()
>>> project.name
Bike Project
### Response:
def get_project(url=None, username=None, password=None, token=None, scope=None, scope_id=None,
env_filename=None, status=ScopeStatus.ACTIVE):
"""
Retrieve and return the KE-chain project to be used throughout an app.
This helper is made to bootstrap a pykechain enabled python script or an jupyter notebook with the correct
project (technically this is a `pykechain.models.Scope` model).
When no parameters are passed in this function, it will try to retrieve `url`, `token`, `scope` (or `scope_id`)
from the environment variables or a neatly placed '.env' file.
when the environment variable KECHAIN_FORCE_ENV_USE is set to true, (or ok, on, 1, yes) then the use of
environmentvariables for the retrieval of the scope are enforced. The following environment variables can be set::
KECHAIN_URL - full url of KE-chain where to connect to eg: 'https://<some>.ke-chain.com'
KECHAIN_TOKEN - authentication token for the KE-chain user provided from KE-chain user account control
KECHAIN_USERNAME - the username for the credentials
KECHAIN_PASSWORD - the password for the credentials
KECHAIN_SCOPE - the name of the project / scope. Should be unique, otherwise use scope_id
KECHAIN_SCOPE_ID - the UUID of the project / scope.
KECHAIN_FORCE_ENV_USE - set to 'true', '1', 'ok', or 'yes' to always use the environment variables.
KECHAIN_SCOPE_STATUS - the status of the Scope to retrieve, defaults to None to retrieve all scopes
.. versionadded:: 1.12
:param url: (optional) url of KE-chain
:type url: basestring or None
:param username: (optional) username for authentication (together with password, if not token)
:type username: basestring or None
:param password: (optional) password for username/password authentication (together with username, if not token)
:type password: basestring or None
:param token: (optional) token for authentication (if not username/password)
:type token: basestring or None
:param scope: (optional) name of the scope to retrieve from KE-chain.
:type scope: basestring or None
:param scope_id: (optional) UUID of the scope to retrieve and return from KE-chain
:type scope_id: basestring or None
:param env_filename: (optional) name of the environment filename to bootstrap the Client
:type env_filename: basestring or None
:param status: (optional) status of the scope to retrieve, defaults to :attr:`enums.Scopestatus.ACTIVE`
:type status: basestring or None
:return: pykechain.models.Scope
:raises NotFoundError: If the scope could not be found
:raises ClientError: If the client connection to KE-chain was unsuccessful
:raises APIError: If other Errors occur to retrieve the scope
Example
-------
An example with parameters provided
>>> from pykechain import get_project
>>> project = get_project(url='http://localhost:8000',
... username='foo', password='bar', scope='1st!')
>>> print(project.name)
1st
An example with a .env file on disk::
# This is an .env file on disk.
KECHAIN_TOKEN=bd9377793f7e74a29dbb11fce969
KECHAIN_URL=http://localhost:8080
KECHAIN_SCOPE_ID=c9f0-228e-4d3a-9dc0-ec5a75d7
>>> project = get_project(env_filename='/path/to/.env')
>>> project.id
c9f0-228e-4d3a-9dc0-ec5a75d7
An example for get_project that will extract all from the environment variables
>>> env_vars = os.environ
>>> env_vars.get('KECHAIN_TOKEN')
bd9377793f7e74a29dbb11fce969
>>> env_vars.get('KECHAIN_URL')
http://localhost:8080
>>> env_vars.get('KECHAIN_SCOPE')
Bike Project
>>> project = get_project()
>>> project.name
Bike Project
"""
if env.bool(kecenv.KECHAIN_FORCE_ENV_USE, default=False):
if not os.getenv(kecenv.KECHAIN_URL):
raise ClientError(
"Error: KECHAIN_URL should be provided as environment variable (use of env vars is enforced)")
if not (os.getenv(kecenv.KECHAIN_TOKEN) or
(os.getenv(kecenv.KECHAIN_PASSWORD) and os.getenv(kecenv.KECHAIN_PASSWORD))):
raise ClientError("Error: KECHAIN_TOKEN or KECHAIN_USERNAME and KECHAIN_PASSWORD should be provided as "
"environment variable(s) (use of env vars is enforced)")
if not (os.getenv(kecenv.KECHAIN_SCOPE) or os.getenv(kecenv.KECHAIN_SCOPE_ID)):
raise ClientError("Error: KECHAIN_SCOPE or KECHAIN_SCOPE_ID should be provided as environment variable "
"(use of env vars is enforced)")
if env.bool(kecenv.KECHAIN_FORCE_ENV_USE, default=False) or \
not any((url, username, password, token, scope, scope_id)):
client = Client.from_env(env_filename=env_filename)
scope_id = env(kecenv.KECHAIN_SCOPE_ID, default=None)
scope = env(kecenv.KECHAIN_SCOPE, default=None)
status = env(kecenv.KECHAIN_SCOPE_STATUS, default=None)
elif (url and ((username and password) or (token)) and (scope or scope_id)) and \
not env.bool(kecenv.KECHAIN_FORCE_ENV_USE, default=False):
client = Client(url=url)
client.login(username=username, password=password, token=token)
else:
raise ClientError("Error: insufficient arguments to connect to KE-chain. "
"See documentation of `pykechain.get_project()`")
if scope_id:
return client.scope(pk=scope_id, status=status)
else:
return client.scope(name=scope, status=status) |
def _create_executor(self, handler, args, cpus_per_worker=1):
"""Return a new :class:`.Executor` instance."""
if self._args.parallel > 0:
workers = self._args.parallel
else:
try:
workers = mp.cpu_count() // cpus_per_worker
except NotImplementedError:
workers = 1
if workers != 1:
logger.info('Using {} parallel worker processes...'.format(
workers))
executor = ProcessPoolExecutor(
processes=workers, handler_init=handler, handler_args=args)
else:
logger.info('Using single worker...')
executor = SequentialExecutor(
handler_init=handler, handler_args=args)
return executor | Return a new :class:`.Executor` instance. | Below is the the instruction that describes the task:
### Input:
Return a new :class:`.Executor` instance.
### Response:
def _create_executor(self, handler, args, cpus_per_worker=1):
"""Return a new :class:`.Executor` instance."""
if self._args.parallel > 0:
workers = self._args.parallel
else:
try:
workers = mp.cpu_count() // cpus_per_worker
except NotImplementedError:
workers = 1
if workers != 1:
logger.info('Using {} parallel worker processes...'.format(
workers))
executor = ProcessPoolExecutor(
processes=workers, handler_init=handler, handler_args=args)
else:
logger.info('Using single worker...')
executor = SequentialExecutor(
handler_init=handler, handler_args=args)
return executor |
def clip_extents(self):
"""Computes a bounding box in user coordinates
covering the area inside the current clip.
:return:
A ``(x1, y1, x2, y2)`` tuple of floats:
the left, top, right and bottom of the resulting extents,
respectively.
"""
extents = ffi.new('double[4]')
cairo.cairo_clip_extents(
self._pointer, extents + 0, extents + 1, extents + 2, extents + 3)
self._check_status()
return tuple(extents) | Computes a bounding box in user coordinates
covering the area inside the current clip.
:return:
A ``(x1, y1, x2, y2)`` tuple of floats:
the left, top, right and bottom of the resulting extents,
respectively. | Below is the the instruction that describes the task:
### Input:
Computes a bounding box in user coordinates
covering the area inside the current clip.
:return:
A ``(x1, y1, x2, y2)`` tuple of floats:
the left, top, right and bottom of the resulting extents,
respectively.
### Response:
def clip_extents(self):
"""Computes a bounding box in user coordinates
covering the area inside the current clip.
:return:
A ``(x1, y1, x2, y2)`` tuple of floats:
the left, top, right and bottom of the resulting extents,
respectively.
"""
extents = ffi.new('double[4]')
cairo.cairo_clip_extents(
self._pointer, extents + 0, extents + 1, extents + 2, extents + 3)
self._check_status()
return tuple(extents) |
def fruchterman_rheingold(self,attraction_multiplier=None,conflict_avoidance=None,\
defaultEdgeWeight=None,EdgeAttribute=None,gravity_multiplier=None,\
layout3D=None,max_distance_factor=None,maxWeightCutoff=None,minWeightCutoff=None,\
network=None,nIterations=None,NodeAttribute=None,nodeList=None,randomize=None,\
repulsion_multiplier=None,singlePartition=None,spread_factor=None,\
temperature=None,Type=None,update_iterations=None,verbose=None):
"""
Execute the Edge-weighted Force directed (BioLayout) on a network
:param attraction_multiplier (string, optional): Divisor to calculate the a
ttraction force, in numeric value
:param conflict_avoidance (string, optional): Constant force applied to avo
id conflicts, in numeric value
:param defaultEdgeWeight (string, optional): The default edge weight to con
sider, default is 0.5
:param EdgeAttribute (string, optional): The name of the edge column contai
ning numeric values that will be used as weights in the layout algor
ithm. Only columns containing numeric values are shown
:param gravity_multiplier (string, optional): Multiplier to calculate the g
ravity force, in numeric value
:param layout3D (string, optional): Layout nodes in 3D; boolean values only
, true or false; defaults to true
:param max_distance_factor (string, optional): Percent of graph used for no
de repulsion calculations, in numeric value
:param maxWeightCutoff (string, optional): The maximum edge weight to consi
der, default to the Double.MAX value
:param minWeightCutoff (string, optional): The minimum edge weight to consi
der, numeric values, default is 0
:param network (string, optional): Specifies a network by name, or by SUID
if the prefix SUID: is used. The keyword CURRENT, or a blank value c
an also be used to specify the current network.
:param nIterations (string, optional): Number of iterations, in numeric val
ue
:param NodeAttribute (string, optional): The name of the node column contai
ning numeric values that will be used as weights in the layout algor
ithm. Only columns containing numeric values are shown
:param nodeList (string, optional): Specifies a list of nodes. The keywords
all, selected, or unselected can be used to specify nodes by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix
is not used, the NAME column is matched by default. A list of COLUMN
:VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be
used to match multiple values.
:param randomize (string, optional): Randomize graph before layout; boolean
values only, true or false; defaults to true
:param repulsion_multiplier (string, optional): Multiplier to calculate the
repulsion force, in numeric value
:param singlePartition (string, optional): Don't partition graph before lay
out; boolean values only, true or false; defaults to false
:param spread_factor (string, optional): Amount of extra room for layout, i
n numeric value
:param temperature (string, optional): Initial temperature, in numeric valu
e
:param Type (string, optional): How to interpret weight values; must be one
of Heuristic, -Log(value), 1 - normalized value and normalized valu
e. Defaults to Heuristic = ['Heuristic', '-Log(value)', '1 - normali
zed value', 'normalized value']
:param update_iterations (string, optional): Number of iterations before up
dating display, in numeric value (0: update only at end)
"""
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(['attraction_multiplier','conflict_avoidance',\
'defaultEdgeWeight','EdgeAttribute','gravity_multiplier','layout3D',\
'max_distance_factor','maxWeightCutoff','minWeightCutoff','network',\
'nIterations','NodeAttribute','nodeList','randomize','repulsion_multiplier',\
'singlePartition','spread_factor','temperature','Type','update_iterations'],\
[attraction_multiplier,conflict_avoidance,defaultEdgeWeight,EdgeAttribute,\
gravity_multiplier,layout3D,max_distance_factor,maxWeightCutoff,\
minWeightCutoff,network,nIterations,NodeAttribute,nodeList,randomize,\
repulsion_multiplier,singlePartition,spread_factor,temperature,Type,\
update_iterations])
response=api(url=self.__url+"/fruchterman-rheingold", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | Execute the Edge-weighted Force directed (BioLayout) on a network
:param attraction_multiplier (string, optional): Divisor to calculate the a
ttraction force, in numeric value
:param conflict_avoidance (string, optional): Constant force applied to avo
id conflicts, in numeric value
:param defaultEdgeWeight (string, optional): The default edge weight to con
sider, default is 0.5
:param EdgeAttribute (string, optional): The name of the edge column contai
ning numeric values that will be used as weights in the layout algor
ithm. Only columns containing numeric values are shown
:param gravity_multiplier (string, optional): Multiplier to calculate the g
ravity force, in numeric value
:param layout3D (string, optional): Layout nodes in 3D; boolean values only
, true or false; defaults to true
:param max_distance_factor (string, optional): Percent of graph used for no
de repulsion calculations, in numeric value
:param maxWeightCutoff (string, optional): The maximum edge weight to consi
der, default to the Double.MAX value
:param minWeightCutoff (string, optional): The minimum edge weight to consi
der, numeric values, default is 0
:param network (string, optional): Specifies a network by name, or by SUID
if the prefix SUID: is used. The keyword CURRENT, or a blank value c
an also be used to specify the current network.
:param nIterations (string, optional): Number of iterations, in numeric val
ue
:param NodeAttribute (string, optional): The name of the node column contai
ning numeric values that will be used as weights in the layout algor
ithm. Only columns containing numeric values are shown
:param nodeList (string, optional): Specifies a list of nodes. The keywords
all, selected, or unselected can be used to specify nodes by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix
is not used, the NAME column is matched by default. A list of COLUMN
:VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be
used to match multiple values.
:param randomize (string, optional): Randomize graph before layout; boolean
values only, true or false; defaults to true
:param repulsion_multiplier (string, optional): Multiplier to calculate the
repulsion force, in numeric value
:param singlePartition (string, optional): Don't partition graph before lay
out; boolean values only, true or false; defaults to false
:param spread_factor (string, optional): Amount of extra room for layout, i
n numeric value
:param temperature (string, optional): Initial temperature, in numeric valu
e
:param Type (string, optional): How to interpret weight values; must be one
of Heuristic, -Log(value), 1 - normalized value and normalized valu
e. Defaults to Heuristic = ['Heuristic', '-Log(value)', '1 - normali
zed value', 'normalized value']
:param update_iterations (string, optional): Number of iterations before up
dating display, in numeric value (0: update only at end) | Below is the the instruction that describes the task:
### Input:
Execute the Edge-weighted Force directed (BioLayout) on a network
:param attraction_multiplier (string, optional): Divisor to calculate the a
ttraction force, in numeric value
:param conflict_avoidance (string, optional): Constant force applied to avo
id conflicts, in numeric value
:param defaultEdgeWeight (string, optional): The default edge weight to con
sider, default is 0.5
:param EdgeAttribute (string, optional): The name of the edge column contai
ning numeric values that will be used as weights in the layout algor
ithm. Only columns containing numeric values are shown
:param gravity_multiplier (string, optional): Multiplier to calculate the g
ravity force, in numeric value
:param layout3D (string, optional): Layout nodes in 3D; boolean values only
, true or false; defaults to true
:param max_distance_factor (string, optional): Percent of graph used for no
de repulsion calculations, in numeric value
:param maxWeightCutoff (string, optional): The maximum edge weight to consi
der, default to the Double.MAX value
:param minWeightCutoff (string, optional): The minimum edge weight to consi
der, numeric values, default is 0
:param network (string, optional): Specifies a network by name, or by SUID
if the prefix SUID: is used. The keyword CURRENT, or a blank value c
an also be used to specify the current network.
:param nIterations (string, optional): Number of iterations, in numeric val
ue
:param NodeAttribute (string, optional): The name of the node column contai
ning numeric values that will be used as weights in the layout algor
ithm. Only columns containing numeric values are shown
:param nodeList (string, optional): Specifies a list of nodes. The keywords
all, selected, or unselected can be used to specify nodes by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix
is not used, the NAME column is matched by default. A list of COLUMN
:VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be
used to match multiple values.
:param randomize (string, optional): Randomize graph before layout; boolean
values only, true or false; defaults to true
:param repulsion_multiplier (string, optional): Multiplier to calculate the
repulsion force, in numeric value
:param singlePartition (string, optional): Don't partition graph before lay
out; boolean values only, true or false; defaults to false
:param spread_factor (string, optional): Amount of extra room for layout, i
n numeric value
:param temperature (string, optional): Initial temperature, in numeric valu
e
:param Type (string, optional): How to interpret weight values; must be one
of Heuristic, -Log(value), 1 - normalized value and normalized valu
e. Defaults to Heuristic = ['Heuristic', '-Log(value)', '1 - normali
zed value', 'normalized value']
:param update_iterations (string, optional): Number of iterations before up
dating display, in numeric value (0: update only at end)
### Response:
def fruchterman_rheingold(self,attraction_multiplier=None,conflict_avoidance=None,\
defaultEdgeWeight=None,EdgeAttribute=None,gravity_multiplier=None,\
layout3D=None,max_distance_factor=None,maxWeightCutoff=None,minWeightCutoff=None,\
network=None,nIterations=None,NodeAttribute=None,nodeList=None,randomize=None,\
repulsion_multiplier=None,singlePartition=None,spread_factor=None,\
temperature=None,Type=None,update_iterations=None,verbose=None):
"""
Execute the Edge-weighted Force directed (BioLayout) on a network
:param attraction_multiplier (string, optional): Divisor to calculate the a
ttraction force, in numeric value
:param conflict_avoidance (string, optional): Constant force applied to avo
id conflicts, in numeric value
:param defaultEdgeWeight (string, optional): The default edge weight to con
sider, default is 0.5
:param EdgeAttribute (string, optional): The name of the edge column contai
ning numeric values that will be used as weights in the layout algor
ithm. Only columns containing numeric values are shown
:param gravity_multiplier (string, optional): Multiplier to calculate the g
ravity force, in numeric value
:param layout3D (string, optional): Layout nodes in 3D; boolean values only
, true or false; defaults to true
:param max_distance_factor (string, optional): Percent of graph used for no
de repulsion calculations, in numeric value
:param maxWeightCutoff (string, optional): The maximum edge weight to consi
der, default to the Double.MAX value
:param minWeightCutoff (string, optional): The minimum edge weight to consi
der, numeric values, default is 0
:param network (string, optional): Specifies a network by name, or by SUID
if the prefix SUID: is used. The keyword CURRENT, or a blank value c
an also be used to specify the current network.
:param nIterations (string, optional): Number of iterations, in numeric val
ue
:param NodeAttribute (string, optional): The name of the node column contai
ning numeric values that will be used as weights in the layout algor
ithm. Only columns containing numeric values are shown
:param nodeList (string, optional): Specifies a list of nodes. The keywords
all, selected, or unselected can be used to specify nodes by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix
is not used, the NAME column is matched by default. A list of COLUMN
:VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be
used to match multiple values.
:param randomize (string, optional): Randomize graph before layout; boolean
values only, true or false; defaults to true
:param repulsion_multiplier (string, optional): Multiplier to calculate the
repulsion force, in numeric value
:param singlePartition (string, optional): Don't partition graph before lay
out; boolean values only, true or false; defaults to false
:param spread_factor (string, optional): Amount of extra room for layout, i
n numeric value
:param temperature (string, optional): Initial temperature, in numeric valu
e
:param Type (string, optional): How to interpret weight values; must be one
of Heuristic, -Log(value), 1 - normalized value and normalized valu
e. Defaults to Heuristic = ['Heuristic', '-Log(value)', '1 - normali
zed value', 'normalized value']
:param update_iterations (string, optional): Number of iterations before up
dating display, in numeric value (0: update only at end)
"""
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(['attraction_multiplier','conflict_avoidance',\
'defaultEdgeWeight','EdgeAttribute','gravity_multiplier','layout3D',\
'max_distance_factor','maxWeightCutoff','minWeightCutoff','network',\
'nIterations','NodeAttribute','nodeList','randomize','repulsion_multiplier',\
'singlePartition','spread_factor','temperature','Type','update_iterations'],\
[attraction_multiplier,conflict_avoidance,defaultEdgeWeight,EdgeAttribute,\
gravity_multiplier,layout3D,max_distance_factor,maxWeightCutoff,\
minWeightCutoff,network,nIterations,NodeAttribute,nodeList,randomize,\
repulsion_multiplier,singlePartition,spread_factor,temperature,Type,\
update_iterations])
response=api(url=self.__url+"/fruchterman-rheingold", PARAMS=PARAMS, method="POST", verbose=verbose)
return response |
def _set_option_by_index(self, index):
"""
Sets a single option in the Combo by its index, returning True if it was able too.
"""
if index < len(self._options):
self._selected.set(self._options[index])
return True
else:
return False | Sets a single option in the Combo by its index, returning True if it was able too. | Below is the the instruction that describes the task:
### Input:
Sets a single option in the Combo by its index, returning True if it was able too.
### Response:
def _set_option_by_index(self, index):
"""
Sets a single option in the Combo by its index, returning True if it was able too.
"""
if index < len(self._options):
self._selected.set(self._options[index])
return True
else:
return False |
def get_value(self):
"""Return modified Dataframe -- this is *not* a copy"""
# It is import to avoid accessing Qt C++ object as it has probably
# already been destroyed, due to the Qt.WA_DeleteOnClose attribute
df = self.dataModel.get_data()
if self.is_series:
return df.iloc[:, 0]
else:
return df | Return modified Dataframe -- this is *not* a copy | Below is the the instruction that describes the task:
### Input:
Return modified Dataframe -- this is *not* a copy
### Response:
def get_value(self):
"""Return modified Dataframe -- this is *not* a copy"""
# It is import to avoid accessing Qt C++ object as it has probably
# already been destroyed, due to the Qt.WA_DeleteOnClose attribute
df = self.dataModel.get_data()
if self.is_series:
return df.iloc[:, 0]
else:
return df |
def data_to_binary(self):
"""
:return: bytes
"""
tmp = 0x00
if 1 in self.channels:
tmp += 0x03
if 2 in self.channels:
tmp += 0x0c
return bytes([COMMAND_CODE, tmp]) | :return: bytes | Below is the the instruction that describes the task:
### Input:
:return: bytes
### Response:
def data_to_binary(self):
"""
:return: bytes
"""
tmp = 0x00
if 1 in self.channels:
tmp += 0x03
if 2 in self.channels:
tmp += 0x0c
return bytes([COMMAND_CODE, tmp]) |
def process_delete(self, obj, pk_set=None, action=None, update_fields=None, **kwargs):
"""Recreate queryset from the index and rebuild the index."""
build_kwargs = self.delete_cache.take(obj)
if build_kwargs:
self.index.build(**build_kwargs) | Recreate queryset from the index and rebuild the index. | Below is the the instruction that describes the task:
### Input:
Recreate queryset from the index and rebuild the index.
### Response:
def process_delete(self, obj, pk_set=None, action=None, update_fields=None, **kwargs):
"""Recreate queryset from the index and rebuild the index."""
build_kwargs = self.delete_cache.take(obj)
if build_kwargs:
self.index.build(**build_kwargs) |
def raise_if_cant_commit(self):
"""Verify VCS status and raise an error if commit is disallowed
:return:
"""
cmd = self._command.status()
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Can\'t verify VCS status. Process exited with code %d and message: %s' % (
code, stderr or stdout))
for line in stdout.splitlines():
if line.startswith(('??', '!!')):
continue
raise errors.VCSStateError("VCS status doesn't allow to commit. Please commit or stash your changes and try again") | Verify VCS status and raise an error if commit is disallowed
:return: | Below is the the instruction that describes the task:
### Input:
Verify VCS status and raise an error if commit is disallowed
:return:
### Response:
def raise_if_cant_commit(self):
"""Verify VCS status and raise an error if commit is disallowed
:return:
"""
cmd = self._command.status()
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Can\'t verify VCS status. Process exited with code %d and message: %s' % (
code, stderr or stdout))
for line in stdout.splitlines():
if line.startswith(('??', '!!')):
continue
raise errors.VCSStateError("VCS status doesn't allow to commit. Please commit or stash your changes and try again") |
def _link_or_update_vars(self):
"""
Creates or updates the symlink to group_vars and returns None.
:returns: None
"""
for d, source in self.links.items():
target = os.path.join(self.inventory_directory, d)
source = os.path.join(self._config.scenario.directory, source)
if not os.path.exists(source):
msg = "The source path '{}' does not exist.".format(source)
util.sysexit_with_message(msg)
msg = "Inventory {} linked to {}".format(source, target)
LOG.info(msg)
os.symlink(source, target) | Creates or updates the symlink to group_vars and returns None.
:returns: None | Below is the the instruction that describes the task:
### Input:
Creates or updates the symlink to group_vars and returns None.
:returns: None
### Response:
def _link_or_update_vars(self):
"""
Creates or updates the symlink to group_vars and returns None.
:returns: None
"""
for d, source in self.links.items():
target = os.path.join(self.inventory_directory, d)
source = os.path.join(self._config.scenario.directory, source)
if not os.path.exists(source):
msg = "The source path '{}' does not exist.".format(source)
util.sysexit_with_message(msg)
msg = "Inventory {} linked to {}".format(source, target)
LOG.info(msg)
os.symlink(source, target) |
def serialise_packet(self):
"""
Serialise a message, including framing information inferred from the ``Meta`` inner class of the packet.
``self.Meta.endpoint`` must be defined to call this method.
:return: A serialised message, ready to be sent to the Pebble.
"""
if not hasattr(self, '_Meta'):
raise ReferenceError("Can't serialise a packet that doesn't have an endpoint ID.")
serialised = self.serialise()
return struct.pack('!HH', len(serialised), self._Meta['endpoint']) + serialised | Serialise a message, including framing information inferred from the ``Meta`` inner class of the packet.
``self.Meta.endpoint`` must be defined to call this method.
:return: A serialised message, ready to be sent to the Pebble. | Below is the the instruction that describes the task:
### Input:
Serialise a message, including framing information inferred from the ``Meta`` inner class of the packet.
``self.Meta.endpoint`` must be defined to call this method.
:return: A serialised message, ready to be sent to the Pebble.
### Response:
def serialise_packet(self):
"""
Serialise a message, including framing information inferred from the ``Meta`` inner class of the packet.
``self.Meta.endpoint`` must be defined to call this method.
:return: A serialised message, ready to be sent to the Pebble.
"""
if not hasattr(self, '_Meta'):
raise ReferenceError("Can't serialise a packet that doesn't have an endpoint ID.")
serialised = self.serialise()
return struct.pack('!HH', len(serialised), self._Meta['endpoint']) + serialised |
def update(self):
""" Update the state """
vm = self._cs_api.list_virtualmachines(id=self.id)[0]
self.is_running = self._is_running(vm.state) | Update the state | Below is the the instruction that describes the task:
### Input:
Update the state
### Response:
def update(self):
""" Update the state """
vm = self._cs_api.list_virtualmachines(id=self.id)[0]
self.is_running = self._is_running(vm.state) |
def download(self, remote, writer):
"""
Downloads a file
:param remote: remote file name
:param writer: an object the implements the write(bytes) interface (typical a file descriptor)
:return:
"""
fd = self.open(remote)
while True:
chunk = self.read(fd)
if chunk == b'':
break
writer.write(chunk)
self.close(fd) | Downloads a file
:param remote: remote file name
:param writer: an object the implements the write(bytes) interface (typical a file descriptor)
:return: | Below is the the instruction that describes the task:
### Input:
Downloads a file
:param remote: remote file name
:param writer: an object the implements the write(bytes) interface (typical a file descriptor)
:return:
### Response:
def download(self, remote, writer):
"""
Downloads a file
:param remote: remote file name
:param writer: an object the implements the write(bytes) interface (typical a file descriptor)
:return:
"""
fd = self.open(remote)
while True:
chunk = self.read(fd)
if chunk == b'':
break
writer.write(chunk)
self.close(fd) |
def get_color_data(self, condition):
'''
Disambiguate similarly-named weather conditions, and return the icon
and color that match.
'''
if condition not in self.color_icons:
# Check for similarly-named conditions if no exact match found
condition_lc = condition.lower()
if 'cloudy' in condition_lc or 'clouds' in condition_lc:
if 'partly' in condition_lc:
condition = 'Partly Cloudy'
else:
condition = 'Cloudy'
elif condition_lc == 'overcast':
condition = 'Cloudy'
elif 'thunder' in condition_lc or 't-storm' in condition_lc:
condition = 'Thunderstorm'
elif 'snow' in condition_lc:
condition = 'Snow'
elif 'rain' in condition_lc or 'showers' in condition_lc:
condition = 'Rainy'
elif 'sunny' in condition_lc:
condition = 'Sunny'
elif 'clear' in condition_lc or 'fair' in condition_lc:
condition = 'Fair'
elif 'fog' in condition_lc:
condition = 'Fog'
return self.color_icons['default'] \
if condition not in self.color_icons \
else self.color_icons[condition] | Disambiguate similarly-named weather conditions, and return the icon
and color that match. | Below is the the instruction that describes the task:
### Input:
Disambiguate similarly-named weather conditions, and return the icon
and color that match.
### Response:
def get_color_data(self, condition):
'''
Disambiguate similarly-named weather conditions, and return the icon
and color that match.
'''
if condition not in self.color_icons:
# Check for similarly-named conditions if no exact match found
condition_lc = condition.lower()
if 'cloudy' in condition_lc or 'clouds' in condition_lc:
if 'partly' in condition_lc:
condition = 'Partly Cloudy'
else:
condition = 'Cloudy'
elif condition_lc == 'overcast':
condition = 'Cloudy'
elif 'thunder' in condition_lc or 't-storm' in condition_lc:
condition = 'Thunderstorm'
elif 'snow' in condition_lc:
condition = 'Snow'
elif 'rain' in condition_lc or 'showers' in condition_lc:
condition = 'Rainy'
elif 'sunny' in condition_lc:
condition = 'Sunny'
elif 'clear' in condition_lc or 'fair' in condition_lc:
condition = 'Fair'
elif 'fog' in condition_lc:
condition = 'Fog'
return self.color_icons['default'] \
if condition not in self.color_icons \
else self.color_icons[condition] |
def check_robotstxt(url, session):
"""Check if robots.txt allows our user agent for the given URL.
@raises: IOError if URL is not allowed
"""
roboturl = get_roboturl(url)
rp = get_robotstxt_parser(roboturl, session=session)
if not rp.can_fetch(UserAgent, str(url)):
raise IOError("%s is disallowed by %s" % (url, roboturl)) | Check if robots.txt allows our user agent for the given URL.
@raises: IOError if URL is not allowed | Below is the the instruction that describes the task:
### Input:
Check if robots.txt allows our user agent for the given URL.
@raises: IOError if URL is not allowed
### Response:
def check_robotstxt(url, session):
"""Check if robots.txt allows our user agent for the given URL.
@raises: IOError if URL is not allowed
"""
roboturl = get_roboturl(url)
rp = get_robotstxt_parser(roboturl, session=session)
if not rp.can_fetch(UserAgent, str(url)):
raise IOError("%s is disallowed by %s" % (url, roboturl)) |
def disconnect_handler(remote, *args, **kwargs):
"""Handle unlinking of remote account.
This default handler will just delete the remote account link. You may
wish to extend this module to perform clean-up in the remote service
before removing the link (e.g. removing install webhooks).
:param remote: The remote application.
:returns: Redirect response.
"""
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
with db.session.begin_nested():
account = RemoteAccount.get(
user_id=current_user.get_id(),
client_id=remote.consumer_key
)
if account:
account.delete()
db.session.commit()
return redirect(url_for('invenio_oauthclient_settings.index')) | Handle unlinking of remote account.
This default handler will just delete the remote account link. You may
wish to extend this module to perform clean-up in the remote service
before removing the link (e.g. removing install webhooks).
:param remote: The remote application.
:returns: Redirect response. | Below is the the instruction that describes the task:
### Input:
Handle unlinking of remote account.
This default handler will just delete the remote account link. You may
wish to extend this module to perform clean-up in the remote service
before removing the link (e.g. removing install webhooks).
:param remote: The remote application.
:returns: Redirect response.
### Response:
def disconnect_handler(remote, *args, **kwargs):
"""Handle unlinking of remote account.
This default handler will just delete the remote account link. You may
wish to extend this module to perform clean-up in the remote service
before removing the link (e.g. removing install webhooks).
:param remote: The remote application.
:returns: Redirect response.
"""
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
with db.session.begin_nested():
account = RemoteAccount.get(
user_id=current_user.get_id(),
client_id=remote.consumer_key
)
if account:
account.delete()
db.session.commit()
return redirect(url_for('invenio_oauthclient_settings.index')) |
def create_design_doc(self):
"""Create a design document from a Python map function"""
source = [x for x in inspect.getsourcelines(self.func)[0]
if not x.startswith('@')]
doc = {
'_id': '_design/{}'.format(self.name),
'language': 'python',
'views': {
self.name: {
'map': ''.join(source)
}
}
}
return doc | Create a design document from a Python map function | Below is the the instruction that describes the task:
### Input:
Create a design document from a Python map function
### Response:
def create_design_doc(self):
"""Create a design document from a Python map function"""
source = [x for x in inspect.getsourcelines(self.func)[0]
if not x.startswith('@')]
doc = {
'_id': '_design/{}'.format(self.name),
'language': 'python',
'views': {
self.name: {
'map': ''.join(source)
}
}
}
return doc |
def put(local_path, hdfs_path):
"""Put a file on hdfs
:param local_path: Source (str)
:param hdfs_path: Destination (str)
:raises: IOError: If unsuccessful
"""
cmd = "hadoop fs -put %s %s" % (local_path, hdfs_path)
rcode, stdout, stderr = _checked_hadoop_fs_command(cmd) | Put a file on hdfs
:param local_path: Source (str)
:param hdfs_path: Destination (str)
:raises: IOError: If unsuccessful | Below is the the instruction that describes the task:
### Input:
Put a file on hdfs
:param local_path: Source (str)
:param hdfs_path: Destination (str)
:raises: IOError: If unsuccessful
### Response:
def put(local_path, hdfs_path):
"""Put a file on hdfs
:param local_path: Source (str)
:param hdfs_path: Destination (str)
:raises: IOError: If unsuccessful
"""
cmd = "hadoop fs -put %s %s" % (local_path, hdfs_path)
rcode, stdout, stderr = _checked_hadoop_fs_command(cmd) |
def _set_apply_exp_traffic_class_map_name(self, v, load=False):
"""
Setter method for apply_exp_traffic_class_map_name, mapped from YANG variable /qos_mpls/map_apply/apply_exp_traffic_class_map_name (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_apply_exp_traffic_class_map_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_apply_exp_traffic_class_map_name() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=apply_exp_traffic_class_map_name.apply_exp_traffic_class_map_name, is_container='container', presence=False, yang_name="apply-exp-traffic-class-map-name", rest_name="exp-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply exp traffic class map', u'cli-sequence-commands': None, u'alt-name': u'exp-traffic-class', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """apply_exp_traffic_class_map_name must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=apply_exp_traffic_class_map_name.apply_exp_traffic_class_map_name, is_container='container', presence=False, yang_name="apply-exp-traffic-class-map-name", rest_name="exp-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply exp traffic class map', u'cli-sequence-commands': None, u'alt-name': u'exp-traffic-class', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='container', is_config=True)""",
})
self.__apply_exp_traffic_class_map_name = t
if hasattr(self, '_set'):
self._set() | Setter method for apply_exp_traffic_class_map_name, mapped from YANG variable /qos_mpls/map_apply/apply_exp_traffic_class_map_name (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_apply_exp_traffic_class_map_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_apply_exp_traffic_class_map_name() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for apply_exp_traffic_class_map_name, mapped from YANG variable /qos_mpls/map_apply/apply_exp_traffic_class_map_name (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_apply_exp_traffic_class_map_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_apply_exp_traffic_class_map_name() directly.
### Response:
def _set_apply_exp_traffic_class_map_name(self, v, load=False):
"""
Setter method for apply_exp_traffic_class_map_name, mapped from YANG variable /qos_mpls/map_apply/apply_exp_traffic_class_map_name (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_apply_exp_traffic_class_map_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_apply_exp_traffic_class_map_name() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=apply_exp_traffic_class_map_name.apply_exp_traffic_class_map_name, is_container='container', presence=False, yang_name="apply-exp-traffic-class-map-name", rest_name="exp-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply exp traffic class map', u'cli-sequence-commands': None, u'alt-name': u'exp-traffic-class', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """apply_exp_traffic_class_map_name must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=apply_exp_traffic_class_map_name.apply_exp_traffic_class_map_name, is_container='container', presence=False, yang_name="apply-exp-traffic-class-map-name", rest_name="exp-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply exp traffic class map', u'cli-sequence-commands': None, u'alt-name': u'exp-traffic-class', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='container', is_config=True)""",
})
self.__apply_exp_traffic_class_map_name = t
if hasattr(self, '_set'):
self._set() |
def step(self, loss, optimizer, scheduler, update=True):
"""
Performs one step of the optimizer.
:param loss: value of loss function
:param optimizer: optimizer
:param update: if True executes weight update
"""
loss.backward()
if update:
if self.grad_clip != float('inf'):
clip_grad_norm_(self.model.parameters(), self.grad_clip)
scheduler.step()
optimizer.step()
self.model.zero_grad() | Performs one step of the optimizer.
:param loss: value of loss function
:param optimizer: optimizer
:param update: if True executes weight update | Below is the the instruction that describes the task:
### Input:
Performs one step of the optimizer.
:param loss: value of loss function
:param optimizer: optimizer
:param update: if True executes weight update
### Response:
def step(self, loss, optimizer, scheduler, update=True):
"""
Performs one step of the optimizer.
:param loss: value of loss function
:param optimizer: optimizer
:param update: if True executes weight update
"""
loss.backward()
if update:
if self.grad_clip != float('inf'):
clip_grad_norm_(self.model.parameters(), self.grad_clip)
scheduler.step()
optimizer.step()
self.model.zero_grad() |
def check_all(self):
"""
Run all checks on the input file.
"""
self.file_errors = 0
self.line_number = 0
self.indent_char = None
self.indent_level = 0
self.previous_logical = ''
self.blank_lines = 0
self.tokens = []
parens = 0
for token in tokenize.generate_tokens(self.readline_check_physical):
# print tokenize.tok_name[token[0]], repr(token)
self.tokens.append(token)
token_type, text = token[0:2]
if token_type == tokenize.OP and text in '([{':
parens += 1
if token_type == tokenize.OP and text in '}])':
parens -= 1
if token_type == tokenize.NEWLINE and not parens:
self.check_logical()
self.blank_lines = 0
self.tokens = []
if token_type == tokenize.NL and not parens:
if len(self.tokens) <= 1:
# The physical line contains only this token.
self.blank_lines += 1
self.tokens = []
if token_type == tokenize.COMMENT:
source_line = token[4]
token_start = token[2][1]
if source_line[:token_start].strip() == '':
self.blank_lines = 0
if text.endswith('\n') and not parens:
# The comment also ends a physical line. This works around
# Python < 2.6 behaviour, which does not generate NL after
# a comment which is on a line by itself.
self.tokens = []
return self.file_errors | Run all checks on the input file. | Below is the the instruction that describes the task:
### Input:
Run all checks on the input file.
### Response:
def check_all(self):
"""
Run all checks on the input file.
"""
self.file_errors = 0
self.line_number = 0
self.indent_char = None
self.indent_level = 0
self.previous_logical = ''
self.blank_lines = 0
self.tokens = []
parens = 0
for token in tokenize.generate_tokens(self.readline_check_physical):
# print tokenize.tok_name[token[0]], repr(token)
self.tokens.append(token)
token_type, text = token[0:2]
if token_type == tokenize.OP and text in '([{':
parens += 1
if token_type == tokenize.OP and text in '}])':
parens -= 1
if token_type == tokenize.NEWLINE and not parens:
self.check_logical()
self.blank_lines = 0
self.tokens = []
if token_type == tokenize.NL and not parens:
if len(self.tokens) <= 1:
# The physical line contains only this token.
self.blank_lines += 1
self.tokens = []
if token_type == tokenize.COMMENT:
source_line = token[4]
token_start = token[2][1]
if source_line[:token_start].strip() == '':
self.blank_lines = 0
if text.endswith('\n') and not parens:
# The comment also ends a physical line. This works around
# Python < 2.6 behaviour, which does not generate NL after
# a comment which is on a line by itself.
self.tokens = []
return self.file_errors |
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) | Add `dist` if we ``can_add()`` it and it has not already been added | Below is the the instruction that describes the task:
### Input:
Add `dist` if we ``can_add()`` it and it has not already been added
### Response:
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) |
def _pidgin_status(status, message):
""" Updates status and message for Pidgin IM application.
`status`
Status type.
`message`
Status message.
"""
try:
iface = _dbus_get_interface('im.pidgin.purple.PurpleService',
'/im/pidgin/purple/PurpleObject',
'im.pidgin.purple.PurpleInterface')
if iface:
# create new transient status
code = PIDGIN_CODE_MAP[status]
saved_status = iface.PurpleSavedstatusNew('', code)
# set the message, if provided
iface.PurpleSavedstatusSetMessage(saved_status, message)
# activate status
iface.PurpleSavedstatusActivate(saved_status)
except dbus.exceptions.DBusException:
pass | Updates status and message for Pidgin IM application.
`status`
Status type.
`message`
Status message. | Below is the the instruction that describes the task:
### Input:
Updates status and message for Pidgin IM application.
`status`
Status type.
`message`
Status message.
### Response:
def _pidgin_status(status, message):
""" Updates status and message for Pidgin IM application.
`status`
Status type.
`message`
Status message.
"""
try:
iface = _dbus_get_interface('im.pidgin.purple.PurpleService',
'/im/pidgin/purple/PurpleObject',
'im.pidgin.purple.PurpleInterface')
if iface:
# create new transient status
code = PIDGIN_CODE_MAP[status]
saved_status = iface.PurpleSavedstatusNew('', code)
# set the message, if provided
iface.PurpleSavedstatusSetMessage(saved_status, message)
# activate status
iface.PurpleSavedstatusActivate(saved_status)
except dbus.exceptions.DBusException:
pass |
def verify_hmac_sha1(request, client_secret=None,
resource_owner_secret=None):
"""Verify a HMAC-SHA1 signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2
"""
norm_params = normalize_parameters(request.params)
bs_uri = base_string_uri(request.uri)
sig_base_str = signature_base_string(request.http_method, bs_uri,
norm_params)
signature = sign_hmac_sha1(sig_base_str, client_secret,
resource_owner_secret)
match = safe_string_equals(signature, request.signature)
if not match:
log.debug('Verify HMAC-SHA1 failed: signature base string: %s',
sig_base_str)
return match | Verify a HMAC-SHA1 signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2 | Below is the the instruction that describes the task:
### Input:
Verify a HMAC-SHA1 signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2
### Response:
def verify_hmac_sha1(request, client_secret=None,
resource_owner_secret=None):
"""Verify a HMAC-SHA1 signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2
"""
norm_params = normalize_parameters(request.params)
bs_uri = base_string_uri(request.uri)
sig_base_str = signature_base_string(request.http_method, bs_uri,
norm_params)
signature = sign_hmac_sha1(sig_base_str, client_secret,
resource_owner_secret)
match = safe_string_equals(signature, request.signature)
if not match:
log.debug('Verify HMAC-SHA1 failed: signature base string: %s',
sig_base_str)
return match |
def _pfp__build(self, stream=None, save_offset=False):
"""Build the field and write the result into the stream
:stream: An IO stream that can be written to
:returns: None
"""
if save_offset and stream is not None:
self._pfp__offset = stream.tell()
# returns either num bytes written or total data
res = utils.binary("") if stream is None else 0
# iterate IN ORDER
for child in self._pfp__children:
child_res = child._pfp__build(stream, save_offset)
res += child_res
return res | Build the field and write the result into the stream
:stream: An IO stream that can be written to
:returns: None | Below is the the instruction that describes the task:
### Input:
Build the field and write the result into the stream
:stream: An IO stream that can be written to
:returns: None
### Response:
def _pfp__build(self, stream=None, save_offset=False):
"""Build the field and write the result into the stream
:stream: An IO stream that can be written to
:returns: None
"""
if save_offset and stream is not None:
self._pfp__offset = stream.tell()
# returns either num bytes written or total data
res = utils.binary("") if stream is None else 0
# iterate IN ORDER
for child in self._pfp__children:
child_res = child._pfp__build(stream, save_offset)
res += child_res
return res |
def classify_regions(dataset, masks, method='ERF', threshold=0.08,
remove_overlap=True, regularization='scale',
output='summary', studies=None, features=None,
class_weight='auto', classifier=None,
cross_val='4-Fold', param_grid=None, scoring='accuracy'):
""" Perform classification on specified regions
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features. Then it trains an algorithm to
classify studies based on features and tests performance.
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
method: a string indicating which method to used.
'SVM': Support Vector Classifier with rbf kernel
'ERF': Extremely Randomized Forest classifier
'Dummy': A dummy classifier using stratified classes as
predictor
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
regularization: A string indicating type of regularization to use.
If None, performs no regularization.
'scale': Unit scale without demeaning
output: A string indicating output type
'summary': Dictionary with summary statistics including score
and n
'summary_clf': Same as above but also includes classifier
'clf': Only returns classifier
Warning: using cv without grid will return an untrained
classifier
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
features: An optional list of feature names used to constrain the
set used in classification. If None, will use all features in
the dataset.
class_weight: Parameter to pass to classifier determining how to
weight classes
classifier: An optional sci-kit learn classifier to use instead of
pre-set up classifiers set up using 'method'
cross_val: A string indicating type of cross validation to use.
Can also pass a scikit_classifier
param_grid: A dictionary indicating which parameters to optimize
using GridSearchCV. If None, no GridSearch will be used
Returns:
A tuple (X, y) of np arrays.
X is a feature by studies matrix and y is a vector of class labels
"""
(X, y) = get_studies_by_regions(dataset, masks, threshold, remove_overlap,
studies, features,
regularization=regularization)
return classify(X, y, method, classifier, output, cross_val,
class_weight, scoring=scoring, param_grid=param_grid) | Perform classification on specified regions
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features. Then it trains an algorithm to
classify studies based on features and tests performance.
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
method: a string indicating which method to used.
'SVM': Support Vector Classifier with rbf kernel
'ERF': Extremely Randomized Forest classifier
'Dummy': A dummy classifier using stratified classes as
predictor
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
regularization: A string indicating type of regularization to use.
If None, performs no regularization.
'scale': Unit scale without demeaning
output: A string indicating output type
'summary': Dictionary with summary statistics including score
and n
'summary_clf': Same as above but also includes classifier
'clf': Only returns classifier
Warning: using cv without grid will return an untrained
classifier
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
features: An optional list of feature names used to constrain the
set used in classification. If None, will use all features in
the dataset.
class_weight: Parameter to pass to classifier determining how to
weight classes
classifier: An optional sci-kit learn classifier to use instead of
pre-set up classifiers set up using 'method'
cross_val: A string indicating type of cross validation to use.
Can also pass a scikit_classifier
param_grid: A dictionary indicating which parameters to optimize
using GridSearchCV. If None, no GridSearch will be used
Returns:
A tuple (X, y) of np arrays.
X is a feature by studies matrix and y is a vector of class labels | Below is the the instruction that describes the task:
### Input:
Perform classification on specified regions
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features. Then it trains an algorithm to
classify studies based on features and tests performance.
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
method: a string indicating which method to used.
'SVM': Support Vector Classifier with rbf kernel
'ERF': Extremely Randomized Forest classifier
'Dummy': A dummy classifier using stratified classes as
predictor
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
regularization: A string indicating type of regularization to use.
If None, performs no regularization.
'scale': Unit scale without demeaning
output: A string indicating output type
'summary': Dictionary with summary statistics including score
and n
'summary_clf': Same as above but also includes classifier
'clf': Only returns classifier
Warning: using cv without grid will return an untrained
classifier
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
features: An optional list of feature names used to constrain the
set used in classification. If None, will use all features in
the dataset.
class_weight: Parameter to pass to classifier determining how to
weight classes
classifier: An optional sci-kit learn classifier to use instead of
pre-set up classifiers set up using 'method'
cross_val: A string indicating type of cross validation to use.
Can also pass a scikit_classifier
param_grid: A dictionary indicating which parameters to optimize
using GridSearchCV. If None, no GridSearch will be used
Returns:
A tuple (X, y) of np arrays.
X is a feature by studies matrix and y is a vector of class labels
### Response:
def classify_regions(dataset, masks, method='ERF', threshold=0.08,
remove_overlap=True, regularization='scale',
output='summary', studies=None, features=None,
class_weight='auto', classifier=None,
cross_val='4-Fold', param_grid=None, scoring='accuracy'):
""" Perform classification on specified regions
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features. Then it trains an algorithm to
classify studies based on features and tests performance.
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
method: a string indicating which method to used.
'SVM': Support Vector Classifier with rbf kernel
'ERF': Extremely Randomized Forest classifier
'Dummy': A dummy classifier using stratified classes as
predictor
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
regularization: A string indicating type of regularization to use.
If None, performs no regularization.
'scale': Unit scale without demeaning
output: A string indicating output type
'summary': Dictionary with summary statistics including score
and n
'summary_clf': Same as above but also includes classifier
'clf': Only returns classifier
Warning: using cv without grid will return an untrained
classifier
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
features: An optional list of feature names used to constrain the
set used in classification. If None, will use all features in
the dataset.
class_weight: Parameter to pass to classifier determining how to
weight classes
classifier: An optional sci-kit learn classifier to use instead of
pre-set up classifiers set up using 'method'
cross_val: A string indicating type of cross validation to use.
Can also pass a scikit_classifier
param_grid: A dictionary indicating which parameters to optimize
using GridSearchCV. If None, no GridSearch will be used
Returns:
A tuple (X, y) of np arrays.
X is a feature by studies matrix and y is a vector of class labels
"""
(X, y) = get_studies_by_regions(dataset, masks, threshold, remove_overlap,
studies, features,
regularization=regularization)
return classify(X, y, method, classifier, output, cross_val,
class_weight, scoring=scoring, param_grid=param_grid) |
def in_edit_mode(self, request, placeholder):
"""
Returns True, if the plugin is in "edit mode".
"""
toolbar = getattr(request, 'toolbar', None)
edit_mode = getattr(toolbar, 'edit_mode', False) and getattr(placeholder, 'is_editable', True)
if edit_mode:
edit_mode = placeholder.has_change_permission(request.user)
return edit_mode | Returns True, if the plugin is in "edit mode". | Below is the the instruction that describes the task:
### Input:
Returns True, if the plugin is in "edit mode".
### Response:
def in_edit_mode(self, request, placeholder):
"""
Returns True, if the plugin is in "edit mode".
"""
toolbar = getattr(request, 'toolbar', None)
edit_mode = getattr(toolbar, 'edit_mode', False) and getattr(placeholder, 'is_editable', True)
if edit_mode:
edit_mode = placeholder.has_change_permission(request.user)
return edit_mode |
def get_task_param_string(task):
"""Get all parameters of a task as one string
Returns:
str: task parameter string
"""
# get dict str -> str from luigi
param_dict = task.to_str_params()
# sort keys, serialize
items = []
for key in sorted(param_dict.keys()):
items.append("'{:s}': '{:s}'".format(key, param_dict[key]))
return "{" + ", ".join(items) + "}" | Get all parameters of a task as one string
Returns:
str: task parameter string | Below is the the instruction that describes the task:
### Input:
Get all parameters of a task as one string
Returns:
str: task parameter string
### Response:
def get_task_param_string(task):
"""Get all parameters of a task as one string
Returns:
str: task parameter string
"""
# get dict str -> str from luigi
param_dict = task.to_str_params()
# sort keys, serialize
items = []
for key in sorted(param_dict.keys()):
items.append("'{:s}': '{:s}'".format(key, param_dict[key]))
return "{" + ", ".join(items) + "}" |
def p_navigation_step_2(self, p):
'''navigation_step : ARROW identifier LSQBR identifier DOT phrase RSQBR'''
p[0] = NavigationStepNode(key_letter=p[2],
rel_id=p[4],
phrase=p[6]) | navigation_step : ARROW identifier LSQBR identifier DOT phrase RSQBR | Below is the the instruction that describes the task:
### Input:
navigation_step : ARROW identifier LSQBR identifier DOT phrase RSQBR
### Response:
def p_navigation_step_2(self, p):
'''navigation_step : ARROW identifier LSQBR identifier DOT phrase RSQBR'''
p[0] = NavigationStepNode(key_letter=p[2],
rel_id=p[4],
phrase=p[6]) |
def make_prefix(self, prefix, iterable):
"""
Add prefix to the label
:param prefix:
:param iterable:
:return:
"""
if not prefix:
yield from iterable
for key, value in iterable:
yield f"{prefix}_{key}", value | Add prefix to the label
:param prefix:
:param iterable:
:return: | Below is the the instruction that describes the task:
### Input:
Add prefix to the label
:param prefix:
:param iterable:
:return:
### Response:
def make_prefix(self, prefix, iterable):
"""
Add prefix to the label
:param prefix:
:param iterable:
:return:
"""
if not prefix:
yield from iterable
for key, value in iterable:
yield f"{prefix}_{key}", value |
def get_referenced_object(self):
"""
:rtype: core.BunqModel
:raise: BunqException
"""
if self._TabUsageSingle is not None:
return self._TabUsageSingle
if self._TabUsageMultiple is not None:
return self._TabUsageMultiple
raise exception.BunqException(self._ERROR_NULL_FIELDS) | :rtype: core.BunqModel
:raise: BunqException | Below is the the instruction that describes the task:
### Input:
:rtype: core.BunqModel
:raise: BunqException
### Response:
def get_referenced_object(self):
"""
:rtype: core.BunqModel
:raise: BunqException
"""
if self._TabUsageSingle is not None:
return self._TabUsageSingle
if self._TabUsageMultiple is not None:
return self._TabUsageMultiple
raise exception.BunqException(self._ERROR_NULL_FIELDS) |
def list_tables(self, limit=None, start_table=None):
"""
Return a list of the names of all Tables associated with the
current account and region.
TODO - Layer2 should probably automatically handle pagination.
:type limit: int
:param limit: The maximum number of tables to return.
:type start_table: str
:param limit: The name of the table that starts the
list. If you ran a previous list_tables and not
all results were returned, the response dict would
include a LastEvaluatedTableName attribute. Use
that value here to continue the listing.
"""
result = self.layer1.list_tables(limit, start_table)
return result['TableNames'] | Return a list of the names of all Tables associated with the
current account and region.
TODO - Layer2 should probably automatically handle pagination.
:type limit: int
:param limit: The maximum number of tables to return.
:type start_table: str
:param limit: The name of the table that starts the
list. If you ran a previous list_tables and not
all results were returned, the response dict would
include a LastEvaluatedTableName attribute. Use
that value here to continue the listing. | Below is the the instruction that describes the task:
### Input:
Return a list of the names of all Tables associated with the
current account and region.
TODO - Layer2 should probably automatically handle pagination.
:type limit: int
:param limit: The maximum number of tables to return.
:type start_table: str
:param limit: The name of the table that starts the
list. If you ran a previous list_tables and not
all results were returned, the response dict would
include a LastEvaluatedTableName attribute. Use
that value here to continue the listing.
### Response:
def list_tables(self, limit=None, start_table=None):
"""
Return a list of the names of all Tables associated with the
current account and region.
TODO - Layer2 should probably automatically handle pagination.
:type limit: int
:param limit: The maximum number of tables to return.
:type start_table: str
:param limit: The name of the table that starts the
list. If you ran a previous list_tables and not
all results were returned, the response dict would
include a LastEvaluatedTableName attribute. Use
that value here to continue the listing.
"""
result = self.layer1.list_tables(limit, start_table)
return result['TableNames'] |
def reduce_duplicate_frequencies(self):
"""In case multiple frequencies were measured, average them and compute
std, min, max values for zt.
In case timesteps were added (i.e., multiple separate measurements),
group over those and average for each timestep.
Examples
--------
::
import tempfile
import reda
with tempfile.TemporaryDirectory() as fid:
reda.data.download_data('sip04_fs_06', fid)
sip = reda.SIP()
sip.import_sip04(fid + '/sip_dataA.mat', timestep=0)
# well, add the spectrum again as another timestep
sip.import_sip04(fid + '/sip_dataA.mat', timestep=1)
df = sip.reduce_duplicate_frequencies()
"""
group_keys = ['frequency', ]
if 'timestep' in self.data.columns:
group_keys = group_keys + ['timestep', ]
g = self.data.groupby(group_keys)
def group_apply(item):
y = item[['zt_1', 'zt_2', 'zt_3']].values.flatten()
zt_imag_std = np.std(y.imag)
zt_real_std = np.std(y.real)
zt_imag_min = np.min(y.imag)
zt_real_min = np.min(y.real)
zt_imag_max = np.max(y.imag)
zt_real_max = np.max(y.real)
zt_imag_mean = np.mean(y.imag)
zt_real_mean = np.mean(y.real)
dfn = pd.DataFrame(
{
'zt_real_mean': zt_real_mean,
'zt_real_std': zt_real_std,
'zt_real_min': zt_real_min,
'zt_real_max': zt_real_max,
'zt_imag_mean': zt_imag_mean,
'zt_imag_std': zt_imag_std,
'zt_imag_min': zt_imag_min,
'zt_imag_max': zt_imag_max,
},
index=[0, ]
)
dfn['count'] = len(y)
dfn.index.name = 'index'
return dfn
p = g.apply(group_apply)
p.index = p.index.droplevel('index')
if len(group_keys) > 1:
p = p.swaplevel(0, 1).sort_index()
return p | In case multiple frequencies were measured, average them and compute
std, min, max values for zt.
In case timesteps were added (i.e., multiple separate measurements),
group over those and average for each timestep.
Examples
--------
::
import tempfile
import reda
with tempfile.TemporaryDirectory() as fid:
reda.data.download_data('sip04_fs_06', fid)
sip = reda.SIP()
sip.import_sip04(fid + '/sip_dataA.mat', timestep=0)
# well, add the spectrum again as another timestep
sip.import_sip04(fid + '/sip_dataA.mat', timestep=1)
df = sip.reduce_duplicate_frequencies() | Below is the the instruction that describes the task:
### Input:
In case multiple frequencies were measured, average them and compute
std, min, max values for zt.
In case timesteps were added (i.e., multiple separate measurements),
group over those and average for each timestep.
Examples
--------
::
import tempfile
import reda
with tempfile.TemporaryDirectory() as fid:
reda.data.download_data('sip04_fs_06', fid)
sip = reda.SIP()
sip.import_sip04(fid + '/sip_dataA.mat', timestep=0)
# well, add the spectrum again as another timestep
sip.import_sip04(fid + '/sip_dataA.mat', timestep=1)
df = sip.reduce_duplicate_frequencies()
### Response:
def reduce_duplicate_frequencies(self):
"""In case multiple frequencies were measured, average them and compute
std, min, max values for zt.
In case timesteps were added (i.e., multiple separate measurements),
group over those and average for each timestep.
Examples
--------
::
import tempfile
import reda
with tempfile.TemporaryDirectory() as fid:
reda.data.download_data('sip04_fs_06', fid)
sip = reda.SIP()
sip.import_sip04(fid + '/sip_dataA.mat', timestep=0)
# well, add the spectrum again as another timestep
sip.import_sip04(fid + '/sip_dataA.mat', timestep=1)
df = sip.reduce_duplicate_frequencies()
"""
group_keys = ['frequency', ]
if 'timestep' in self.data.columns:
group_keys = group_keys + ['timestep', ]
g = self.data.groupby(group_keys)
def group_apply(item):
y = item[['zt_1', 'zt_2', 'zt_3']].values.flatten()
zt_imag_std = np.std(y.imag)
zt_real_std = np.std(y.real)
zt_imag_min = np.min(y.imag)
zt_real_min = np.min(y.real)
zt_imag_max = np.max(y.imag)
zt_real_max = np.max(y.real)
zt_imag_mean = np.mean(y.imag)
zt_real_mean = np.mean(y.real)
dfn = pd.DataFrame(
{
'zt_real_mean': zt_real_mean,
'zt_real_std': zt_real_std,
'zt_real_min': zt_real_min,
'zt_real_max': zt_real_max,
'zt_imag_mean': zt_imag_mean,
'zt_imag_std': zt_imag_std,
'zt_imag_min': zt_imag_min,
'zt_imag_max': zt_imag_max,
},
index=[0, ]
)
dfn['count'] = len(y)
dfn.index.name = 'index'
return dfn
p = g.apply(group_apply)
p.index = p.index.droplevel('index')
if len(group_keys) > 1:
p = p.swaplevel(0, 1).sort_index()
return p |
def _CreateStopPlacemark(self, stop_folder, stop, style_id):
"""Creates a new stop <Placemark/> element.
Args:
stop_folder: the KML folder the placemark will be added to.
stop: the actual Stop to create a placemark for.
style_id: optional argument indicating a style id to add to the placemark.
"""
desc_items = []
desc_items.append("Stop id: %s" % stop.stop_id)
if stop.stop_desc:
desc_items.append(stop.stop_desc)
if stop.stop_url:
desc_items.append('Stop info page: <a href="%s">%s</a>' % (
stop.stop_url, stop.stop_url))
description = '<br/>'.join(desc_items) or None
placemark = self._CreatePlacemark(stop_folder, stop.stop_name,
description=description,
style_id=style_id)
point = ET.SubElement(placemark, 'Point')
coordinates = ET.SubElement(point, 'coordinates')
coordinates.text = '%.6f,%.6f' % (stop.stop_lon, stop.stop_lat) | Creates a new stop <Placemark/> element.
Args:
stop_folder: the KML folder the placemark will be added to.
stop: the actual Stop to create a placemark for.
style_id: optional argument indicating a style id to add to the placemark. | Below is the the instruction that describes the task:
### Input:
Creates a new stop <Placemark/> element.
Args:
stop_folder: the KML folder the placemark will be added to.
stop: the actual Stop to create a placemark for.
style_id: optional argument indicating a style id to add to the placemark.
### Response:
def _CreateStopPlacemark(self, stop_folder, stop, style_id):
"""Creates a new stop <Placemark/> element.
Args:
stop_folder: the KML folder the placemark will be added to.
stop: the actual Stop to create a placemark for.
style_id: optional argument indicating a style id to add to the placemark.
"""
desc_items = []
desc_items.append("Stop id: %s" % stop.stop_id)
if stop.stop_desc:
desc_items.append(stop.stop_desc)
if stop.stop_url:
desc_items.append('Stop info page: <a href="%s">%s</a>' % (
stop.stop_url, stop.stop_url))
description = '<br/>'.join(desc_items) or None
placemark = self._CreatePlacemark(stop_folder, stop.stop_name,
description=description,
style_id=style_id)
point = ET.SubElement(placemark, 'Point')
coordinates = ET.SubElement(point, 'coordinates')
coordinates.text = '%.6f,%.6f' % (stop.stop_lon, stop.stop_lat) |
def create_coupon(self, currency, amount, receiver):
"""
This method allows you to create Coupons.
Please, note: In order to use this method, you need the Coupon key privilege. You can make a request to
enable it by submitting a ticket to Support..
You need to create the API key that you are going to use for this method in advance. Please provide
the first 8 characters of the key (e.g. HKG82W66) in your ticket to support. We'll enable the Coupon privilege
for this key.
You must also provide us the IP-addresses from which you will be accessing the API.
When using this method, there will be no additional confirmations of transactions. Please note that you are
fully responsible for keeping the secret of the API key safe after we have enabled the Withdraw
privilege for it.
:param str currency: currency (ex. 'BTC')
:param int amount: withdrawal amount
:param str receiver: name of user who is allowed to redeem the code
"""
return self._trade_api_call('CreateCoupon', currency=currency, amount=amount, receiver=receiver) | This method allows you to create Coupons.
Please, note: In order to use this method, you need the Coupon key privilege. You can make a request to
enable it by submitting a ticket to Support..
You need to create the API key that you are going to use for this method in advance. Please provide
the first 8 characters of the key (e.g. HKG82W66) in your ticket to support. We'll enable the Coupon privilege
for this key.
You must also provide us the IP-addresses from which you will be accessing the API.
When using this method, there will be no additional confirmations of transactions. Please note that you are
fully responsible for keeping the secret of the API key safe after we have enabled the Withdraw
privilege for it.
:param str currency: currency (ex. 'BTC')
:param int amount: withdrawal amount
:param str receiver: name of user who is allowed to redeem the code | Below is the the instruction that describes the task:
### Input:
This method allows you to create Coupons.
Please, note: In order to use this method, you need the Coupon key privilege. You can make a request to
enable it by submitting a ticket to Support..
You need to create the API key that you are going to use for this method in advance. Please provide
the first 8 characters of the key (e.g. HKG82W66) in your ticket to support. We'll enable the Coupon privilege
for this key.
You must also provide us the IP-addresses from which you will be accessing the API.
When using this method, there will be no additional confirmations of transactions. Please note that you are
fully responsible for keeping the secret of the API key safe after we have enabled the Withdraw
privilege for it.
:param str currency: currency (ex. 'BTC')
:param int amount: withdrawal amount
:param str receiver: name of user who is allowed to redeem the code
### Response:
def create_coupon(self, currency, amount, receiver):
"""
This method allows you to create Coupons.
Please, note: In order to use this method, you need the Coupon key privilege. You can make a request to
enable it by submitting a ticket to Support..
You need to create the API key that you are going to use for this method in advance. Please provide
the first 8 characters of the key (e.g. HKG82W66) in your ticket to support. We'll enable the Coupon privilege
for this key.
You must also provide us the IP-addresses from which you will be accessing the API.
When using this method, there will be no additional confirmations of transactions. Please note that you are
fully responsible for keeping the secret of the API key safe after we have enabled the Withdraw
privilege for it.
:param str currency: currency (ex. 'BTC')
:param int amount: withdrawal amount
:param str receiver: name of user who is allowed to redeem the code
"""
return self._trade_api_call('CreateCoupon', currency=currency, amount=amount, receiver=receiver) |
async def change_presence(self, *, activity=None, status=None, afk=False, shard_id=None):
"""|coro|
Changes the client's presence.
The activity parameter is a :class:`Activity` object (not a string) that represents
the activity being done currently. This could also be the slimmed down versions,
:class:`Game` and :class:`Streaming`.
Example: ::
game = discord.Game("with the API")
await client.change_presence(status=discord.Status.idle, activity=game)
Parameters
----------
activity: Optional[Union[:class:`Game`, :class:`Streaming`, :class:`Activity`]]
The activity being done. ``None`` if no currently active activity is done.
status: Optional[:class:`Status`]
Indicates what status to change to. If None, then
:attr:`Status.online` is used.
afk: :class:`bool`
Indicates if you are going AFK. This allows the discord
client to know how to handle push notifications better
for you in case you are actually idle and not lying.
shard_id: Optional[:class:`int`]
The shard_id to change the presence to. If not specified
or ``None``, then it will change the presence of every
shard the bot can see.
Raises
------
InvalidArgument
If the ``activity`` parameter is not of proper type.
"""
if status is None:
status = 'online'
status_enum = Status.online
elif status is Status.offline:
status = 'invisible'
status_enum = Status.offline
else:
status_enum = status
status = str(status)
if shard_id is None:
for shard in self.shards.values():
await shard.ws.change_presence(activity=activity, status=status, afk=afk)
guilds = self._connection.guilds
else:
shard = self.shards[shard_id]
await shard.ws.change_presence(activity=activity, status=status, afk=afk)
guilds = [g for g in self._connection.guilds if g.shard_id == shard_id]
for guild in guilds:
me = guild.me
if me is None:
continue
me.activities = (activity,)
me.status = status_enum | |coro|
Changes the client's presence.
The activity parameter is a :class:`Activity` object (not a string) that represents
the activity being done currently. This could also be the slimmed down versions,
:class:`Game` and :class:`Streaming`.
Example: ::
game = discord.Game("with the API")
await client.change_presence(status=discord.Status.idle, activity=game)
Parameters
----------
activity: Optional[Union[:class:`Game`, :class:`Streaming`, :class:`Activity`]]
The activity being done. ``None`` if no currently active activity is done.
status: Optional[:class:`Status`]
Indicates what status to change to. If None, then
:attr:`Status.online` is used.
afk: :class:`bool`
Indicates if you are going AFK. This allows the discord
client to know how to handle push notifications better
for you in case you are actually idle and not lying.
shard_id: Optional[:class:`int`]
The shard_id to change the presence to. If not specified
or ``None``, then it will change the presence of every
shard the bot can see.
Raises
------
InvalidArgument
If the ``activity`` parameter is not of proper type. | Below is the the instruction that describes the task:
### Input:
|coro|
Changes the client's presence.
The activity parameter is a :class:`Activity` object (not a string) that represents
the activity being done currently. This could also be the slimmed down versions,
:class:`Game` and :class:`Streaming`.
Example: ::
game = discord.Game("with the API")
await client.change_presence(status=discord.Status.idle, activity=game)
Parameters
----------
activity: Optional[Union[:class:`Game`, :class:`Streaming`, :class:`Activity`]]
The activity being done. ``None`` if no currently active activity is done.
status: Optional[:class:`Status`]
Indicates what status to change to. If None, then
:attr:`Status.online` is used.
afk: :class:`bool`
Indicates if you are going AFK. This allows the discord
client to know how to handle push notifications better
for you in case you are actually idle and not lying.
shard_id: Optional[:class:`int`]
The shard_id to change the presence to. If not specified
or ``None``, then it will change the presence of every
shard the bot can see.
Raises
------
InvalidArgument
If the ``activity`` parameter is not of proper type.
### Response:
async def change_presence(self, *, activity=None, status=None, afk=False, shard_id=None):
"""|coro|
Changes the client's presence.
The activity parameter is a :class:`Activity` object (not a string) that represents
the activity being done currently. This could also be the slimmed down versions,
:class:`Game` and :class:`Streaming`.
Example: ::
game = discord.Game("with the API")
await client.change_presence(status=discord.Status.idle, activity=game)
Parameters
----------
activity: Optional[Union[:class:`Game`, :class:`Streaming`, :class:`Activity`]]
The activity being done. ``None`` if no currently active activity is done.
status: Optional[:class:`Status`]
Indicates what status to change to. If None, then
:attr:`Status.online` is used.
afk: :class:`bool`
Indicates if you are going AFK. This allows the discord
client to know how to handle push notifications better
for you in case you are actually idle and not lying.
shard_id: Optional[:class:`int`]
The shard_id to change the presence to. If not specified
or ``None``, then it will change the presence of every
shard the bot can see.
Raises
------
InvalidArgument
If the ``activity`` parameter is not of proper type.
"""
if status is None:
status = 'online'
status_enum = Status.online
elif status is Status.offline:
status = 'invisible'
status_enum = Status.offline
else:
status_enum = status
status = str(status)
if shard_id is None:
for shard in self.shards.values():
await shard.ws.change_presence(activity=activity, status=status, afk=afk)
guilds = self._connection.guilds
else:
shard = self.shards[shard_id]
await shard.ws.change_presence(activity=activity, status=status, afk=afk)
guilds = [g for g in self._connection.guilds if g.shard_id == shard_id]
for guild in guilds:
me = guild.me
if me is None:
continue
me.activities = (activity,)
me.status = status_enum |
def create_key_for_data(prefix, data, key_params):
"""
From ``data`` params in task create corresponding key with help of ``key_params`` (defined in decorator)
"""
d = data.get_data()
values = []
for k in key_params:
if k in d and type(d[k]) is list:
values.append("{0}:{1}".format(k, " -".join(d[k])))
else:
value = d[k] if k in d else ''
values.append("{0}:{1}".format(k, value))
return "{0}-{1}".format(prefix, "-".join(values)) | From ``data`` params in task create corresponding key with help of ``key_params`` (defined in decorator) | Below is the the instruction that describes the task:
### Input:
From ``data`` params in task create corresponding key with help of ``key_params`` (defined in decorator)
### Response:
def create_key_for_data(prefix, data, key_params):
"""
From ``data`` params in task create corresponding key with help of ``key_params`` (defined in decorator)
"""
d = data.get_data()
values = []
for k in key_params:
if k in d and type(d[k]) is list:
values.append("{0}:{1}".format(k, " -".join(d[k])))
else:
value = d[k] if k in d else ''
values.append("{0}:{1}".format(k, value))
return "{0}-{1}".format(prefix, "-".join(values)) |
def unroll_auth_headers(self, authheaders, exclude_signature=False, sep=",", quote=True):
"""Converts an authorization header dict-like object into a string representing the authorization.
Keyword arguments:
authheaders -- A string-indexable object which contains the headers appropriate for this signature version.
"""
res = ""
ordered = collections.OrderedDict(sorted(authheaders.items()))
form = '{0}=\"{1}\"' if quote else '{0}={1}'
if exclude_signature:
return sep.join([form.format(k, urlquote(str(v), safe='')) for k, v in ordered.items() if k != 'signature'])
else:
return sep.join([form.format(k, urlquote(str(v), safe='') if k != 'signature' else str(v)) for k, v in ordered.items()]) | Converts an authorization header dict-like object into a string representing the authorization.
Keyword arguments:
authheaders -- A string-indexable object which contains the headers appropriate for this signature version. | Below is the the instruction that describes the task:
### Input:
Converts an authorization header dict-like object into a string representing the authorization.
Keyword arguments:
authheaders -- A string-indexable object which contains the headers appropriate for this signature version.
### Response:
def unroll_auth_headers(self, authheaders, exclude_signature=False, sep=",", quote=True):
"""Converts an authorization header dict-like object into a string representing the authorization.
Keyword arguments:
authheaders -- A string-indexable object which contains the headers appropriate for this signature version.
"""
res = ""
ordered = collections.OrderedDict(sorted(authheaders.items()))
form = '{0}=\"{1}\"' if quote else '{0}={1}'
if exclude_signature:
return sep.join([form.format(k, urlquote(str(v), safe='')) for k, v in ordered.items() if k != 'signature'])
else:
return sep.join([form.format(k, urlquote(str(v), safe='') if k != 'signature' else str(v)) for k, v in ordered.items()]) |
def reload(self):
"""
Reload the flow from the pickle file. Used when we are monitoring the flow
executed by the scheduler. In this case, indeed, the flow might have been changed
by the scheduler and we have to reload the new flow in memory.
"""
new = self.__class__.pickle_load(self.workdir)
self = new | Reload the flow from the pickle file. Used when we are monitoring the flow
executed by the scheduler. In this case, indeed, the flow might have been changed
by the scheduler and we have to reload the new flow in memory. | Below is the the instruction that describes the task:
### Input:
Reload the flow from the pickle file. Used when we are monitoring the flow
executed by the scheduler. In this case, indeed, the flow might have been changed
by the scheduler and we have to reload the new flow in memory.
### Response:
def reload(self):
"""
Reload the flow from the pickle file. Used when we are monitoring the flow
executed by the scheduler. In this case, indeed, the flow might have been changed
by the scheduler and we have to reload the new flow in memory.
"""
new = self.__class__.pickle_load(self.workdir)
self = new |
def works(self, prefix_id):
"""
This method retrieve a iterable of Works of the given prefix.
args: Crossref Prefix (String)
return: Works()
"""
context = '%s/%s' % (self.ENDPOINT, str(prefix_id))
return Works(context=context) | This method retrieve a iterable of Works of the given prefix.
args: Crossref Prefix (String)
return: Works() | Below is the the instruction that describes the task:
### Input:
This method retrieve a iterable of Works of the given prefix.
args: Crossref Prefix (String)
return: Works()
### Response:
def works(self, prefix_id):
"""
This method retrieve a iterable of Works of the given prefix.
args: Crossref Prefix (String)
return: Works()
"""
context = '%s/%s' % (self.ENDPOINT, str(prefix_id))
return Works(context=context) |
def transform_rest_response(self, response_body):
"""Translates an apiserving REST response so it's ready to return.
Currently, the only thing that needs to be fixed here is indentation,
so it's consistent with what the live app will return.
Args:
response_body: A string containing the backend response.
Returns:
A reformatted version of the response JSON.
"""
body_json = json.loads(response_body)
return json.dumps(body_json, indent=1, sort_keys=True) | Translates an apiserving REST response so it's ready to return.
Currently, the only thing that needs to be fixed here is indentation,
so it's consistent with what the live app will return.
Args:
response_body: A string containing the backend response.
Returns:
A reformatted version of the response JSON. | Below is the the instruction that describes the task:
### Input:
Translates an apiserving REST response so it's ready to return.
Currently, the only thing that needs to be fixed here is indentation,
so it's consistent with what the live app will return.
Args:
response_body: A string containing the backend response.
Returns:
A reformatted version of the response JSON.
### Response:
def transform_rest_response(self, response_body):
"""Translates an apiserving REST response so it's ready to return.
Currently, the only thing that needs to be fixed here is indentation,
so it's consistent with what the live app will return.
Args:
response_body: A string containing the backend response.
Returns:
A reformatted version of the response JSON.
"""
body_json = json.loads(response_body)
return json.dumps(body_json, indent=1, sort_keys=True) |
def sec_project_community(self, project=None):
"""
Generate the data for the Communication section in a Project report
:return:
"""
def create_csv(metric1, csv_labels, file_label):
esfilters = None
csv_labels = csv_labels.replace("_", "") # LaTeX not supports "_"
if project != self.GLOBAL_PROJECT:
esfilters = {"project": project}
data_path = os.path.join(self.data_dir, "data")
file_name = os.path.join(data_path, file_label + "_" + project + ".csv")
logger.debug("CSV file %s generation in progress", file_name)
m1 = metric1(self.es_url, self.get_metric_index(metric1),
esfilters=esfilters, start=self.end_prev_month, end=self.end)
top = m1.get_list()
csv = csv_labels + '\n'
for i in range(0, len(top['value'])):
if i > self.TOP_MAX:
break
csv += top[metric1.FIELD_NAME][i] + "," + self.str_val(top['value'][i])
csv += "\n"
with open(file_name, "w") as f:
f.write(csv)
logger.debug("CSV file %s was generated", file_name)
logger.info("Community data for: %s", project)
author = self.config['project_community']['author_metrics'][0]
csv_labels = 'labels,' + author.id
file_label = author.ds.name + "_" + author.id
title_label = author.name + " per " + self.interval
self.__create_csv_eps(author, None, csv_labels, file_label, title_label,
project)
"""
Main developers
"""
metric = self.config['project_community']['people_top_metrics'][0]
# TODO: Commits must be extracted from metric
csv_labels = author.id + ",commits"
file_label = author.ds.name + "_top_" + author.id
create_csv(metric, csv_labels, file_label)
"""
Main organizations
"""
orgs = self.config['project_community']['orgs_top_metrics'][0]
# TODO: Commits must be extracted from metric
csv_labels = orgs.id + ",commits"
file_label = orgs.ds.name + "_top_" + orgs.id
create_csv(orgs, csv_labels, file_label) | Generate the data for the Communication section in a Project report
:return: | Below is the the instruction that describes the task:
### Input:
Generate the data for the Communication section in a Project report
:return:
### Response:
def sec_project_community(self, project=None):
"""
Generate the data for the Communication section in a Project report
:return:
"""
def create_csv(metric1, csv_labels, file_label):
esfilters = None
csv_labels = csv_labels.replace("_", "") # LaTeX not supports "_"
if project != self.GLOBAL_PROJECT:
esfilters = {"project": project}
data_path = os.path.join(self.data_dir, "data")
file_name = os.path.join(data_path, file_label + "_" + project + ".csv")
logger.debug("CSV file %s generation in progress", file_name)
m1 = metric1(self.es_url, self.get_metric_index(metric1),
esfilters=esfilters, start=self.end_prev_month, end=self.end)
top = m1.get_list()
csv = csv_labels + '\n'
for i in range(0, len(top['value'])):
if i > self.TOP_MAX:
break
csv += top[metric1.FIELD_NAME][i] + "," + self.str_val(top['value'][i])
csv += "\n"
with open(file_name, "w") as f:
f.write(csv)
logger.debug("CSV file %s was generated", file_name)
logger.info("Community data for: %s", project)
author = self.config['project_community']['author_metrics'][0]
csv_labels = 'labels,' + author.id
file_label = author.ds.name + "_" + author.id
title_label = author.name + " per " + self.interval
self.__create_csv_eps(author, None, csv_labels, file_label, title_label,
project)
"""
Main developers
"""
metric = self.config['project_community']['people_top_metrics'][0]
# TODO: Commits must be extracted from metric
csv_labels = author.id + ",commits"
file_label = author.ds.name + "_top_" + author.id
create_csv(metric, csv_labels, file_label)
"""
Main organizations
"""
orgs = self.config['project_community']['orgs_top_metrics'][0]
# TODO: Commits must be extracted from metric
csv_labels = orgs.id + ",commits"
file_label = orgs.ds.name + "_top_" + orgs.id
create_csv(orgs, csv_labels, file_label) |
def _set_bd_vc_peer_counter(self, v, load=False):
"""
Setter method for bd_vc_peer_counter, mapped from YANG variable /bd_vc_peer_state/bd_vc_peer_counter (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bd_vc_peer_counter is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bd_vc_peer_counter() directly.
YANG Description: VC peer counters
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=bd_vc_peer_counter.bd_vc_peer_counter, is_container='container', presence=False, yang_name="bd-vc-peer-counter", rest_name="bd-vc-peer-counter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pwm-bd-vc-peer-counter', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-pwm-operational', defining_module='brocade-pwm-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bd_vc_peer_counter must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=bd_vc_peer_counter.bd_vc_peer_counter, is_container='container', presence=False, yang_name="bd-vc-peer-counter", rest_name="bd-vc-peer-counter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pwm-bd-vc-peer-counter', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-pwm-operational', defining_module='brocade-pwm-operational', yang_type='container', is_config=False)""",
})
self.__bd_vc_peer_counter = t
if hasattr(self, '_set'):
self._set() | Setter method for bd_vc_peer_counter, mapped from YANG variable /bd_vc_peer_state/bd_vc_peer_counter (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bd_vc_peer_counter is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bd_vc_peer_counter() directly.
YANG Description: VC peer counters | Below is the the instruction that describes the task:
### Input:
Setter method for bd_vc_peer_counter, mapped from YANG variable /bd_vc_peer_state/bd_vc_peer_counter (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bd_vc_peer_counter is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bd_vc_peer_counter() directly.
YANG Description: VC peer counters
### Response:
def _set_bd_vc_peer_counter(self, v, load=False):
"""
Setter method for bd_vc_peer_counter, mapped from YANG variable /bd_vc_peer_state/bd_vc_peer_counter (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bd_vc_peer_counter is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bd_vc_peer_counter() directly.
YANG Description: VC peer counters
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=bd_vc_peer_counter.bd_vc_peer_counter, is_container='container', presence=False, yang_name="bd-vc-peer-counter", rest_name="bd-vc-peer-counter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pwm-bd-vc-peer-counter', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-pwm-operational', defining_module='brocade-pwm-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bd_vc_peer_counter must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=bd_vc_peer_counter.bd_vc_peer_counter, is_container='container', presence=False, yang_name="bd-vc-peer-counter", rest_name="bd-vc-peer-counter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pwm-bd-vc-peer-counter', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-pwm-operational', defining_module='brocade-pwm-operational', yang_type='container', is_config=False)""",
})
self.__bd_vc_peer_counter = t
if hasattr(self, '_set'):
self._set() |
def set_position(self, decl_pos):
"""Set editor position from ENSIME declPos data."""
if decl_pos["typehint"] == "LineSourcePosition":
self.editor.set_cursor(decl_pos['line'], 0)
else: # OffsetSourcePosition
point = decl_pos["offset"]
row, col = self.editor.point2pos(point + 1)
self.editor.set_cursor(row, col) | Set editor position from ENSIME declPos data. | Below is the the instruction that describes the task:
### Input:
Set editor position from ENSIME declPos data.
### Response:
def set_position(self, decl_pos):
"""Set editor position from ENSIME declPos data."""
if decl_pos["typehint"] == "LineSourcePosition":
self.editor.set_cursor(decl_pos['line'], 0)
else: # OffsetSourcePosition
point = decl_pos["offset"]
row, col = self.editor.point2pos(point + 1)
self.editor.set_cursor(row, col) |
def solve_boolexpr():
"""
sudo pip install git+https://github.com/tpircher/quine-mccluskey.git
sudo pip uninstall quine_mccluskey
pip uninstall quine_mccluskey
pip install git+https://github.com/tpircher/quine-mccluskey.git
Args:
varnames (?):
Returns:
?:
CommandLine:
python -m utool.util_alg solve_boolexpr --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> varnames = ['sa', 'said', 'aid']
>>> result = solve_boolexpr()
>>> print(result)
"""
#false_cases = [
# int('111', 2),
# int('011', 2),
# int('001', 2),
#]
#true_cases = list(set(range(2 ** 3)) - set(false_cases))
varnames = ['sa', 'said', 'aid']
#import utool as ut
truth_table = [
dict(sa=True, said=True, aid=True, output=False),
dict(sa=True, said=True, aid=False, output=True),
dict(sa=True, said=False, aid=True, output=True),
dict(sa=True, said=False, aid=False, output=True),
dict(sa=False, said=True, aid=True, output=False),
dict(sa=False, said=True, aid=False, output=True),
dict(sa=False, said=False, aid=True, output=False),
dict(sa=False, said=False, aid=False, output=True),
]
truth_tuples = [ut.dict_take(d, varnames) for d in truth_table]
outputs = [d['output'] for d in truth_table]
true_tuples = ut.compress(truth_tuples, outputs)
#false_tuples = ut.compress(truth_tuples, ut.not_list(outputs))
true_cases = [''.join([str(int(t)) for t in tup]) for tup in true_tuples]
true_cases = [''.join([str(int(t)) for t in tup]) for tup in true_tuples]
#truth_nums = [int(s, 2) for s in true_cases]
from quine_mccluskey.qm import QuineMcCluskey
qm = QuineMcCluskey(use_xor=False)
result = qm.simplify_los(true_cases, num_bits=len(varnames))
print(result)
#ut.chr_range(3)
#symbol_map = {
# '-': '',
# '1': '{v}',
# '0': 'not {v}',
# '^': '^',
#}
#'-' don't care: this bit can be either zero or one.
#'1' the bit must be one.
#'0' the bit must be zero.
#'^' all bits with the caret are XOR-ed together.
#'~' all bits with the tilde are XNOR-ed together.
#formulas = [[symbol_map[r].format(v=v) for v, r in zip(varnames, rs)] for rs in result]
grouped_terms = [dict(ut.group_items(varnames, rs)) for rs in result]
def parenjoin(char, list_):
if len(list_) == 0:
return ''
else:
return '(' + char.join(list_) + ')'
expanded_terms = [
(
term.get('1', []) +
['(not ' + b + ')' for b in term.get('0', [])] +
[
parenjoin(' ^ ', term.get('^', [])),
parenjoin(' ~ ', term.get('~', [])),
]
) for term in grouped_terms
]
final_terms = [[t for t in term if t] for term in expanded_terms]
products = [parenjoin(' and ', [f for f in form if f]) for form in final_terms]
final_expr = ' or '.join(products)
print(final_expr) | sudo pip install git+https://github.com/tpircher/quine-mccluskey.git
sudo pip uninstall quine_mccluskey
pip uninstall quine_mccluskey
pip install git+https://github.com/tpircher/quine-mccluskey.git
Args:
varnames (?):
Returns:
?:
CommandLine:
python -m utool.util_alg solve_boolexpr --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> varnames = ['sa', 'said', 'aid']
>>> result = solve_boolexpr()
>>> print(result) | Below is the the instruction that describes the task:
### Input:
sudo pip install git+https://github.com/tpircher/quine-mccluskey.git
sudo pip uninstall quine_mccluskey
pip uninstall quine_mccluskey
pip install git+https://github.com/tpircher/quine-mccluskey.git
Args:
varnames (?):
Returns:
?:
CommandLine:
python -m utool.util_alg solve_boolexpr --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> varnames = ['sa', 'said', 'aid']
>>> result = solve_boolexpr()
>>> print(result)
### Response:
def solve_boolexpr():
"""
sudo pip install git+https://github.com/tpircher/quine-mccluskey.git
sudo pip uninstall quine_mccluskey
pip uninstall quine_mccluskey
pip install git+https://github.com/tpircher/quine-mccluskey.git
Args:
varnames (?):
Returns:
?:
CommandLine:
python -m utool.util_alg solve_boolexpr --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> varnames = ['sa', 'said', 'aid']
>>> result = solve_boolexpr()
>>> print(result)
"""
#false_cases = [
# int('111', 2),
# int('011', 2),
# int('001', 2),
#]
#true_cases = list(set(range(2 ** 3)) - set(false_cases))
varnames = ['sa', 'said', 'aid']
#import utool as ut
truth_table = [
dict(sa=True, said=True, aid=True, output=False),
dict(sa=True, said=True, aid=False, output=True),
dict(sa=True, said=False, aid=True, output=True),
dict(sa=True, said=False, aid=False, output=True),
dict(sa=False, said=True, aid=True, output=False),
dict(sa=False, said=True, aid=False, output=True),
dict(sa=False, said=False, aid=True, output=False),
dict(sa=False, said=False, aid=False, output=True),
]
truth_tuples = [ut.dict_take(d, varnames) for d in truth_table]
outputs = [d['output'] for d in truth_table]
true_tuples = ut.compress(truth_tuples, outputs)
#false_tuples = ut.compress(truth_tuples, ut.not_list(outputs))
true_cases = [''.join([str(int(t)) for t in tup]) for tup in true_tuples]
true_cases = [''.join([str(int(t)) for t in tup]) for tup in true_tuples]
#truth_nums = [int(s, 2) for s in true_cases]
from quine_mccluskey.qm import QuineMcCluskey
qm = QuineMcCluskey(use_xor=False)
result = qm.simplify_los(true_cases, num_bits=len(varnames))
print(result)
#ut.chr_range(3)
#symbol_map = {
# '-': '',
# '1': '{v}',
# '0': 'not {v}',
# '^': '^',
#}
#'-' don't care: this bit can be either zero or one.
#'1' the bit must be one.
#'0' the bit must be zero.
#'^' all bits with the caret are XOR-ed together.
#'~' all bits with the tilde are XNOR-ed together.
#formulas = [[symbol_map[r].format(v=v) for v, r in zip(varnames, rs)] for rs in result]
grouped_terms = [dict(ut.group_items(varnames, rs)) for rs in result]
def parenjoin(char, list_):
if len(list_) == 0:
return ''
else:
return '(' + char.join(list_) + ')'
expanded_terms = [
(
term.get('1', []) +
['(not ' + b + ')' for b in term.get('0', [])] +
[
parenjoin(' ^ ', term.get('^', [])),
parenjoin(' ~ ', term.get('~', [])),
]
) for term in grouped_terms
]
final_terms = [[t for t in term if t] for term in expanded_terms]
products = [parenjoin(' and ', [f for f in form if f]) for form in final_terms]
final_expr = ' or '.join(products)
print(final_expr) |
def create(cls, name, vm, size, snapshotprofile, datacenter,
source, disk_type='data', background=False):
""" Create a disk and attach it to a vm. """
if isinstance(size, tuple):
prefix, size = size
if source:
size = None
disk_params = cls.disk_param(name, size, snapshotprofile)
disk_params['datacenter_id'] = int(Datacenter.usable_id(datacenter))
disk_params['type'] = disk_type
if source:
disk_id = int(Image.usable_id(source,
disk_params['datacenter_id']))
result = cls.call('hosting.disk.create_from', disk_params, disk_id)
else:
result = cls.call('hosting.disk.create', disk_params)
if background and not vm:
return result
# interactive mode, run a progress bar
cls.echo('Creating your disk.')
cls.display_progress(result)
if not vm:
return
vm_id = Iaas.usable_id(vm)
result = cls._attach(result['disk_id'], vm_id)
if background:
return result
cls.echo('Attaching your disk.')
cls.display_progress(result) | Create a disk and attach it to a vm. | Below is the the instruction that describes the task:
### Input:
Create a disk and attach it to a vm.
### Response:
def create(cls, name, vm, size, snapshotprofile, datacenter,
source, disk_type='data', background=False):
""" Create a disk and attach it to a vm. """
if isinstance(size, tuple):
prefix, size = size
if source:
size = None
disk_params = cls.disk_param(name, size, snapshotprofile)
disk_params['datacenter_id'] = int(Datacenter.usable_id(datacenter))
disk_params['type'] = disk_type
if source:
disk_id = int(Image.usable_id(source,
disk_params['datacenter_id']))
result = cls.call('hosting.disk.create_from', disk_params, disk_id)
else:
result = cls.call('hosting.disk.create', disk_params)
if background and not vm:
return result
# interactive mode, run a progress bar
cls.echo('Creating your disk.')
cls.display_progress(result)
if not vm:
return
vm_id = Iaas.usable_id(vm)
result = cls._attach(result['disk_id'], vm_id)
if background:
return result
cls.echo('Attaching your disk.')
cls.display_progress(result) |
def items(self):
"""
A generator yielding ``(key, value)`` attribute pairs, sorted by key name.
"""
for key in sorted(self.attrs):
yield key, self.attrs[key] | A generator yielding ``(key, value)`` attribute pairs, sorted by key name. | Below is the the instruction that describes the task:
### Input:
A generator yielding ``(key, value)`` attribute pairs, sorted by key name.
### Response:
def items(self):
"""
A generator yielding ``(key, value)`` attribute pairs, sorted by key name.
"""
for key in sorted(self.attrs):
yield key, self.attrs[key] |
def _get_thumbnail_options(self, context, instance):
"""
Return the size and options of the thumbnail that should be inserted
"""
width, height = None, None
subject_location = False
placeholder_width = context.get('width', None)
placeholder_height = context.get('height', None)
if instance.use_autoscale and placeholder_width:
# use the placeholder width as a hint for sizing
width = int(placeholder_width)
if instance.use_autoscale and placeholder_height:
height = int(placeholder_height)
elif instance.width:
width = instance.width
if instance.height:
height = instance.height
if instance.image:
if instance.image.subject_location:
subject_location = instance.image.subject_location
if not height and width:
# height was not externally defined: use ratio to scale it by the width
height = int(float(width) * float(instance.image.height) / float(instance.image.width))
if not width and height:
# width was not externally defined: use ratio to scale it by the height
width = int(float(height) * float(instance.image.width) / float(instance.image.height))
if not width:
# width is still not defined. fallback the actual image width
width = instance.image.width
if not height:
# height is still not defined. fallback the actual image height
height = instance.image.height
return {'size': (width, height),
'subject_location': subject_location} | Return the size and options of the thumbnail that should be inserted | Below is the the instruction that describes the task:
### Input:
Return the size and options of the thumbnail that should be inserted
### Response:
def _get_thumbnail_options(self, context, instance):
"""
Return the size and options of the thumbnail that should be inserted
"""
width, height = None, None
subject_location = False
placeholder_width = context.get('width', None)
placeholder_height = context.get('height', None)
if instance.use_autoscale and placeholder_width:
# use the placeholder width as a hint for sizing
width = int(placeholder_width)
if instance.use_autoscale and placeholder_height:
height = int(placeholder_height)
elif instance.width:
width = instance.width
if instance.height:
height = instance.height
if instance.image:
if instance.image.subject_location:
subject_location = instance.image.subject_location
if not height and width:
# height was not externally defined: use ratio to scale it by the width
height = int(float(width) * float(instance.image.height) / float(instance.image.width))
if not width and height:
# width was not externally defined: use ratio to scale it by the height
width = int(float(height) * float(instance.image.width) / float(instance.image.height))
if not width:
# width is still not defined. fallback the actual image width
width = instance.image.width
if not height:
# height is still not defined. fallback the actual image height
height = instance.image.height
return {'size': (width, height),
'subject_location': subject_location} |
def readline_check_physical(self):
"""
Check and return the next physical line. This method can be
used to feed tokenize.generate_tokens.
"""
line = self.readline()
if line:
self.check_physical(line)
return line | Check and return the next physical line. This method can be
used to feed tokenize.generate_tokens. | Below is the the instruction that describes the task:
### Input:
Check and return the next physical line. This method can be
used to feed tokenize.generate_tokens.
### Response:
def readline_check_physical(self):
"""
Check and return the next physical line. This method can be
used to feed tokenize.generate_tokens.
"""
line = self.readline()
if line:
self.check_physical(line)
return line |
def _run_yum_command(cmd, fatal=False):
"""Run an YUM command.
Checks the output and retry if the fatal flag is set to True.
:param: cmd: str: The yum command to run.
:param: fatal: bool: Whether the command's output should be checked and
retried.
"""
env = os.environ.copy()
if fatal:
retry_count = 0
result = None
# If the command is considered "fatal", we need to retry if the yum
# lock was not acquired.
while result is None or result == YUM_NO_LOCK:
try:
result = subprocess.check_call(cmd, env=env)
except subprocess.CalledProcessError as e:
retry_count = retry_count + 1
if retry_count > YUM_NO_LOCK_RETRY_COUNT:
raise
result = e.returncode
log("Couldn't acquire YUM lock. Will retry in {} seconds."
"".format(YUM_NO_LOCK_RETRY_DELAY))
time.sleep(YUM_NO_LOCK_RETRY_DELAY)
else:
subprocess.call(cmd, env=env) | Run an YUM command.
Checks the output and retry if the fatal flag is set to True.
:param: cmd: str: The yum command to run.
:param: fatal: bool: Whether the command's output should be checked and
retried. | Below is the the instruction that describes the task:
### Input:
Run an YUM command.
Checks the output and retry if the fatal flag is set to True.
:param: cmd: str: The yum command to run.
:param: fatal: bool: Whether the command's output should be checked and
retried.
### Response:
def _run_yum_command(cmd, fatal=False):
"""Run an YUM command.
Checks the output and retry if the fatal flag is set to True.
:param: cmd: str: The yum command to run.
:param: fatal: bool: Whether the command's output should be checked and
retried.
"""
env = os.environ.copy()
if fatal:
retry_count = 0
result = None
# If the command is considered "fatal", we need to retry if the yum
# lock was not acquired.
while result is None or result == YUM_NO_LOCK:
try:
result = subprocess.check_call(cmd, env=env)
except subprocess.CalledProcessError as e:
retry_count = retry_count + 1
if retry_count > YUM_NO_LOCK_RETRY_COUNT:
raise
result = e.returncode
log("Couldn't acquire YUM lock. Will retry in {} seconds."
"".format(YUM_NO_LOCK_RETRY_DELAY))
time.sleep(YUM_NO_LOCK_RETRY_DELAY)
else:
subprocess.call(cmd, env=env) |
def _get_point_data_handler_for(self, point):
"""Used by point instances and data callbacks"""
with self.__point_data_handlers:
try:
return self.__point_data_handlers[point]
except KeyError:
return self.__point_data_handlers.setdefault(point, PointDataObjectHandler(point, self)) | Used by point instances and data callbacks | Below is the the instruction that describes the task:
### Input:
Used by point instances and data callbacks
### Response:
def _get_point_data_handler_for(self, point):
"""Used by point instances and data callbacks"""
with self.__point_data_handlers:
try:
return self.__point_data_handlers[point]
except KeyError:
return self.__point_data_handlers.setdefault(point, PointDataObjectHandler(point, self)) |
def _stat(file):
"""
Get the Ownership information from a file.
:param file: The path to a file to stat
:type file: str
:returns: owner, group, and mode of the specified file
:rtype: Ownership
:raises subprocess.CalledProcessError: If the underlying stat fails
"""
out = subprocess.check_output(
['stat', '-c', '%U %G %a', file]).decode('utf-8')
return Ownership(*out.strip().split(' ')) | Get the Ownership information from a file.
:param file: The path to a file to stat
:type file: str
:returns: owner, group, and mode of the specified file
:rtype: Ownership
:raises subprocess.CalledProcessError: If the underlying stat fails | Below is the the instruction that describes the task:
### Input:
Get the Ownership information from a file.
:param file: The path to a file to stat
:type file: str
:returns: owner, group, and mode of the specified file
:rtype: Ownership
:raises subprocess.CalledProcessError: If the underlying stat fails
### Response:
def _stat(file):
"""
Get the Ownership information from a file.
:param file: The path to a file to stat
:type file: str
:returns: owner, group, and mode of the specified file
:rtype: Ownership
:raises subprocess.CalledProcessError: If the underlying stat fails
"""
out = subprocess.check_output(
['stat', '-c', '%U %G %a', file]).decode('utf-8')
return Ownership(*out.strip().split(' ')) |
def series_strip(series, startswith=None, endswith=None, startsorendswith=None, ignorecase=True):
""" Strip a suffix/prefix str (`endswith`/`startswith` str) from a `df` columns or pd.Series of type str """
if ignorecase:
mask = series.str.lower()
endswith = endswith.lower()
else:
mask = series
if not (startsorendswith or endswith or startswith):
logger.warning('In series_strip(): You must specify endswith, startswith, or startsorendswith string arguments.')
return series
if startsorendswith:
startswith = endswith = startsorendswith
if endswith:
mask = mask.str.endswith(endswith)
series[mask] = series[mask].str[:-len(endswith)]
if startswith:
mask = mask.str.endswith(startswith)
series[mask] = series[mask].str[len(startswith):]
return series | Strip a suffix/prefix str (`endswith`/`startswith` str) from a `df` columns or pd.Series of type str | Below is the the instruction that describes the task:
### Input:
Strip a suffix/prefix str (`endswith`/`startswith` str) from a `df` columns or pd.Series of type str
### Response:
def series_strip(series, startswith=None, endswith=None, startsorendswith=None, ignorecase=True):
""" Strip a suffix/prefix str (`endswith`/`startswith` str) from a `df` columns or pd.Series of type str """
if ignorecase:
mask = series.str.lower()
endswith = endswith.lower()
else:
mask = series
if not (startsorendswith or endswith or startswith):
logger.warning('In series_strip(): You must specify endswith, startswith, or startsorendswith string arguments.')
return series
if startsorendswith:
startswith = endswith = startsorendswith
if endswith:
mask = mask.str.endswith(endswith)
series[mask] = series[mask].str[:-len(endswith)]
if startswith:
mask = mask.str.endswith(startswith)
series[mask] = series[mask].str[len(startswith):]
return series |
def _forget_page(self, page):
"""Remove a page from document page dict."""
pid = id(page)
if pid in self._page_refs:
self._page_refs[pid] = None | Remove a page from document page dict. | Below is the the instruction that describes the task:
### Input:
Remove a page from document page dict.
### Response:
def _forget_page(self, page):
"""Remove a page from document page dict."""
pid = id(page)
if pid in self._page_refs:
self._page_refs[pid] = None |
def do_list(self, args):
"""List all connected resources."""
try:
resources = self.resource_manager.list_resources_info()
except Exception as e:
print(e)
else:
self.resources = []
for ndx, (resource_name, value) in enumerate(resources.items()):
if not args:
print('({0:2d}) {1}'.format(ndx, resource_name))
if value.alias:
print(' alias: {}'.format(value.alias))
self.resources.append((resource_name, value.alias or None)) | List all connected resources. | Below is the the instruction that describes the task:
### Input:
List all connected resources.
### Response:
def do_list(self, args):
"""List all connected resources."""
try:
resources = self.resource_manager.list_resources_info()
except Exception as e:
print(e)
else:
self.resources = []
for ndx, (resource_name, value) in enumerate(resources.items()):
if not args:
print('({0:2d}) {1}'.format(ndx, resource_name))
if value.alias:
print(' alias: {}'.format(value.alias))
self.resources.append((resource_name, value.alias or None)) |
def link(self):
"""
Registers the Link
"""
if self.source in self.registry:
links = self.registry[self.source]
params = {
k: v for k, v in self.get_param_values() if k != 'name'}
for link in links:
link_params = {
k: v for k, v in link.get_param_values() if k != 'name'}
if (type(link) is type(self) and link.source is self.source
and link.target is self.target and params == link_params):
return
self.registry[self.source].append(self)
else:
self.registry[self.source] = [self] | Registers the Link | Below is the the instruction that describes the task:
### Input:
Registers the Link
### Response:
def link(self):
"""
Registers the Link
"""
if self.source in self.registry:
links = self.registry[self.source]
params = {
k: v for k, v in self.get_param_values() if k != 'name'}
for link in links:
link_params = {
k: v for k, v in link.get_param_values() if k != 'name'}
if (type(link) is type(self) and link.source is self.source
and link.target is self.target and params == link_params):
return
self.registry[self.source].append(self)
else:
self.registry[self.source] = [self] |
def __get_dbms_version(self, make_connection=True):
"""
Returns the 'DBMS Version' string
"""
major, minor, _, _ = self.get_server_version(make_connection=make_connection)
return '{}.{}'.format(major, minor) | Returns the 'DBMS Version' string | Below is the the instruction that describes the task:
### Input:
Returns the 'DBMS Version' string
### Response:
def __get_dbms_version(self, make_connection=True):
"""
Returns the 'DBMS Version' string
"""
major, minor, _, _ = self.get_server_version(make_connection=make_connection)
return '{}.{}'.format(major, minor) |
def SSLCertificates(self):
"""
Lists certificates.
"""
url = self._url + "/SSLCertificate"
params = {"f" : "json"}
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | Lists certificates. | Below is the the instruction that describes the task:
### Input:
Lists certificates.
### Response:
def SSLCertificates(self):
"""
Lists certificates.
"""
url = self._url + "/SSLCertificate"
params = {"f" : "json"}
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) |
def notes_assumptions_extractor(impact_report, component_metadata):
"""Extracting notes and assumptions of the exposure layer
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
context = {}
provenance = impact_report.impact_function.provenance
extra_args = component_metadata.extra_args
hazard_keywords = provenance['hazard_keywords']
exposure_keywords = provenance['exposure_keywords']
exposure_type = definition(exposure_keywords['exposure'])
analysis_note_dict = resolve_from_dictionary(extra_args, 'analysis_notes')
context['items'] = [analysis_note_dict]
context['component_key'] = component_metadata.key
context['header'] = resolve_from_dictionary(extra_args, 'header')
context['items'] += provenance['notes']
# Get hazard classification
hazard_classification = definition(
active_classification(hazard_keywords, exposure_keywords['exposure']))
# Check hazard affected class
affected_classes = []
for hazard_class in hazard_classification['classes']:
if exposure_keywords['exposure'] == exposure_population['key']:
# Taking from profile
is_affected_class = is_affected(
hazard=hazard_keywords['hazard'],
classification=hazard_classification['key'],
hazard_class=hazard_class['key'],
)
if is_affected_class:
affected_classes.append(hazard_class)
else:
if hazard_class.get('affected', False):
affected_classes.append(hazard_class)
if affected_classes:
affected_note_dict = resolve_from_dictionary(
extra_args, 'affected_note_format')
# generate hazard classes
hazard_classes = ', '.join([
c['name'] for c in affected_classes
])
for index, affected_note in enumerate(affected_note_dict['item_list']):
affected_note_dict['item_list'][index] = (
affected_note.format(hazard_classes=hazard_classes)
)
context['items'].append(affected_note_dict)
# Check hazard have displacement rate
for hazard_class in hazard_classification['classes']:
if hazard_class.get('displacement_rate', 0) > 0:
have_displacement_rate = True
break
else:
have_displacement_rate = False
# Only show displacement note if analysis about population exposure
if have_displacement_rate and exposure_type == exposure_population:
# add notes for displacement rate used
displacement_note_dict = resolve_from_dictionary(
extra_args, 'displacement_rates_note_format')
# generate rate description
displacement_rates_note_format = resolve_from_dictionary(
extra_args, 'hazard_displacement_rates_note_format')
displacement_rates_note = []
for hazard_class in hazard_classification['classes']:
the_hazard_class = deepcopy(hazard_class)
the_hazard_class['displacement_rate'] = get_displacement_rate(
hazard=hazard_keywords['hazard'],
classification=hazard_classification['key'],
hazard_class=the_hazard_class['key']
)
displacement_rates_note.append(
displacement_rates_note_format.format(**the_hazard_class))
rate_description = ', '.join(displacement_rates_note)
for index, displacement_note in enumerate(
displacement_note_dict['item_list']):
displacement_note_dict['item_list'][index] = (
displacement_note.format(rate_description=rate_description)
)
context['items'].append(displacement_note_dict)
# Check hazard have displacement rate
have_fatality_rate = False
for hazard_class in hazard_classification['classes']:
if hazard_class.get('fatality_rate', None) is not None and \
hazard_class.get('fatality_rate', 0) > 0:
have_fatality_rate = True
break
if have_fatality_rate and exposure_type == exposure_population:
# add notes for fatality rate used
fatality_note_dict = resolve_from_dictionary(
extra_args, 'fatality_rates_note_format')
# generate rate description
fatality_rates_note_format = resolve_from_dictionary(
extra_args, 'hazard_fatality_rates_note_format')
fatality_rates_note = []
for hazard_class in hazard_classification['classes']:
# we make a copy here because we don't want to
# change the real value.
copy_of_hazard_class = dict(hazard_class)
if copy_of_hazard_class['fatality_rate'] is None or \
copy_of_hazard_class['fatality_rate'] <= 0:
copy_of_hazard_class['fatality_rate'] = 0
else:
# we want to show the rate as a scientific notation
copy_of_hazard_class['fatality_rate'] = (
html_scientific_notation_rate(
copy_of_hazard_class['fatality_rate']))
fatality_rates_note.append(
fatality_rates_note_format.format(**copy_of_hazard_class))
rate_description = ', '.join(fatality_rates_note)
for index, fatality_note in enumerate(fatality_note_dict['item_list']):
fatality_note_dict['item_list'][index] = (
fatality_note.format(rate_description=rate_description)
)
context['items'].append(fatality_note_dict)
return context | Extracting notes and assumptions of the exposure layer
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0 | Below is the the instruction that describes the task:
### Input:
Extracting notes and assumptions of the exposure layer
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
### Response:
def notes_assumptions_extractor(impact_report, component_metadata):
"""Extracting notes and assumptions of the exposure layer
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
context = {}
provenance = impact_report.impact_function.provenance
extra_args = component_metadata.extra_args
hazard_keywords = provenance['hazard_keywords']
exposure_keywords = provenance['exposure_keywords']
exposure_type = definition(exposure_keywords['exposure'])
analysis_note_dict = resolve_from_dictionary(extra_args, 'analysis_notes')
context['items'] = [analysis_note_dict]
context['component_key'] = component_metadata.key
context['header'] = resolve_from_dictionary(extra_args, 'header')
context['items'] += provenance['notes']
# Get hazard classification
hazard_classification = definition(
active_classification(hazard_keywords, exposure_keywords['exposure']))
# Check hazard affected class
affected_classes = []
for hazard_class in hazard_classification['classes']:
if exposure_keywords['exposure'] == exposure_population['key']:
# Taking from profile
is_affected_class = is_affected(
hazard=hazard_keywords['hazard'],
classification=hazard_classification['key'],
hazard_class=hazard_class['key'],
)
if is_affected_class:
affected_classes.append(hazard_class)
else:
if hazard_class.get('affected', False):
affected_classes.append(hazard_class)
if affected_classes:
affected_note_dict = resolve_from_dictionary(
extra_args, 'affected_note_format')
# generate hazard classes
hazard_classes = ', '.join([
c['name'] for c in affected_classes
])
for index, affected_note in enumerate(affected_note_dict['item_list']):
affected_note_dict['item_list'][index] = (
affected_note.format(hazard_classes=hazard_classes)
)
context['items'].append(affected_note_dict)
# Check hazard have displacement rate
for hazard_class in hazard_classification['classes']:
if hazard_class.get('displacement_rate', 0) > 0:
have_displacement_rate = True
break
else:
have_displacement_rate = False
# Only show displacement note if analysis about population exposure
if have_displacement_rate and exposure_type == exposure_population:
# add notes for displacement rate used
displacement_note_dict = resolve_from_dictionary(
extra_args, 'displacement_rates_note_format')
# generate rate description
displacement_rates_note_format = resolve_from_dictionary(
extra_args, 'hazard_displacement_rates_note_format')
displacement_rates_note = []
for hazard_class in hazard_classification['classes']:
the_hazard_class = deepcopy(hazard_class)
the_hazard_class['displacement_rate'] = get_displacement_rate(
hazard=hazard_keywords['hazard'],
classification=hazard_classification['key'],
hazard_class=the_hazard_class['key']
)
displacement_rates_note.append(
displacement_rates_note_format.format(**the_hazard_class))
rate_description = ', '.join(displacement_rates_note)
for index, displacement_note in enumerate(
displacement_note_dict['item_list']):
displacement_note_dict['item_list'][index] = (
displacement_note.format(rate_description=rate_description)
)
context['items'].append(displacement_note_dict)
# Check hazard have displacement rate
have_fatality_rate = False
for hazard_class in hazard_classification['classes']:
if hazard_class.get('fatality_rate', None) is not None and \
hazard_class.get('fatality_rate', 0) > 0:
have_fatality_rate = True
break
if have_fatality_rate and exposure_type == exposure_population:
# add notes for fatality rate used
fatality_note_dict = resolve_from_dictionary(
extra_args, 'fatality_rates_note_format')
# generate rate description
fatality_rates_note_format = resolve_from_dictionary(
extra_args, 'hazard_fatality_rates_note_format')
fatality_rates_note = []
for hazard_class in hazard_classification['classes']:
# we make a copy here because we don't want to
# change the real value.
copy_of_hazard_class = dict(hazard_class)
if copy_of_hazard_class['fatality_rate'] is None or \
copy_of_hazard_class['fatality_rate'] <= 0:
copy_of_hazard_class['fatality_rate'] = 0
else:
# we want to show the rate as a scientific notation
copy_of_hazard_class['fatality_rate'] = (
html_scientific_notation_rate(
copy_of_hazard_class['fatality_rate']))
fatality_rates_note.append(
fatality_rates_note_format.format(**copy_of_hazard_class))
rate_description = ', '.join(fatality_rates_note)
for index, fatality_note in enumerate(fatality_note_dict['item_list']):
fatality_note_dict['item_list'][index] = (
fatality_note.format(rate_description=rate_description)
)
context['items'].append(fatality_note_dict)
return context |
def load_values(self, dictionary, as_defaults=False, flat=False):
"""
Import config values from a dictionary.
When ``as_defaults`` is set to ``True``, the values
imported will be set as defaults. This can be used to
declare the sections and items of configuration.
Values of sections and items in ``dictionary`` can be
dictionaries as well as instances of :class:`.Item` and
:class:`.Config`.
Args:
dictionary:
as_defaults: if ``True``, the imported values will be set as defaults.
"""
if flat:
# Deflatten the dictionary and then pass on to the normal case.
separator = self.settings.str_path_separator
flat_dictionary = dictionary
dictionary = collections.OrderedDict()
for k, v in flat_dictionary.items():
k_parts = k.split(separator)
c = dictionary
for i, kp in enumerate(k_parts):
if i >= len(k_parts) - 1:
c[kp] = v
else:
if kp not in c:
c[kp] = collections.OrderedDict()
c = c[kp]
for name, value in dictionary.items():
if name not in self:
if as_defaults:
if isinstance(value, dict):
self[name] = self.create_section()
self[name].load_values(value, as_defaults=as_defaults)
else:
self[name] = self.create_item(name, default=value)
else:
# Skip unknown names if not interpreting dictionary as defaults
pass
continue
resolution = self._get_item_or_section(name, handle_not_found=False)
if is_config_item(resolution):
if as_defaults:
resolution.default = value
else:
resolution.value = value
else:
resolution.load_values(value, as_defaults=as_defaults) | Import config values from a dictionary.
When ``as_defaults`` is set to ``True``, the values
imported will be set as defaults. This can be used to
declare the sections and items of configuration.
Values of sections and items in ``dictionary`` can be
dictionaries as well as instances of :class:`.Item` and
:class:`.Config`.
Args:
dictionary:
as_defaults: if ``True``, the imported values will be set as defaults. | Below is the the instruction that describes the task:
### Input:
Import config values from a dictionary.
When ``as_defaults`` is set to ``True``, the values
imported will be set as defaults. This can be used to
declare the sections and items of configuration.
Values of sections and items in ``dictionary`` can be
dictionaries as well as instances of :class:`.Item` and
:class:`.Config`.
Args:
dictionary:
as_defaults: if ``True``, the imported values will be set as defaults.
### Response:
def load_values(self, dictionary, as_defaults=False, flat=False):
"""
Import config values from a dictionary.
When ``as_defaults`` is set to ``True``, the values
imported will be set as defaults. This can be used to
declare the sections and items of configuration.
Values of sections and items in ``dictionary`` can be
dictionaries as well as instances of :class:`.Item` and
:class:`.Config`.
Args:
dictionary:
as_defaults: if ``True``, the imported values will be set as defaults.
"""
if flat:
# Deflatten the dictionary and then pass on to the normal case.
separator = self.settings.str_path_separator
flat_dictionary = dictionary
dictionary = collections.OrderedDict()
for k, v in flat_dictionary.items():
k_parts = k.split(separator)
c = dictionary
for i, kp in enumerate(k_parts):
if i >= len(k_parts) - 1:
c[kp] = v
else:
if kp not in c:
c[kp] = collections.OrderedDict()
c = c[kp]
for name, value in dictionary.items():
if name not in self:
if as_defaults:
if isinstance(value, dict):
self[name] = self.create_section()
self[name].load_values(value, as_defaults=as_defaults)
else:
self[name] = self.create_item(name, default=value)
else:
# Skip unknown names if not interpreting dictionary as defaults
pass
continue
resolution = self._get_item_or_section(name, handle_not_found=False)
if is_config_item(resolution):
if as_defaults:
resolution.default = value
else:
resolution.value = value
else:
resolution.load_values(value, as_defaults=as_defaults) |
def flat_list(lst):
"""This function flatten given nested list.
Argument:
nested list
Returns:
flat list
"""
if isinstance(lst, list):
for item in lst:
for i in flat_list(item):
yield i
else:
yield lst | This function flatten given nested list.
Argument:
nested list
Returns:
flat list | Below is the the instruction that describes the task:
### Input:
This function flatten given nested list.
Argument:
nested list
Returns:
flat list
### Response:
def flat_list(lst):
"""This function flatten given nested list.
Argument:
nested list
Returns:
flat list
"""
if isinstance(lst, list):
for item in lst:
for i in flat_list(item):
yield i
else:
yield lst |
def get_seq_number_from_id(id, id_template, prefix, **kw):
"""Return the sequence number of the given ID
"""
separator = kw.get("separator", "-")
postfix = id.replace(prefix, "").strip(separator)
postfix_segments = postfix.split(separator)
seq_number = 0
possible_seq_nums = filter(lambda n: n.isalnum(), postfix_segments)
if possible_seq_nums:
seq_number = possible_seq_nums[-1]
# Check if this id has to be expressed as an alphanumeric number
seq_number = get_alpha_or_number(seq_number, id_template)
seq_number = to_int(seq_number)
return seq_number | Return the sequence number of the given ID | Below is the the instruction that describes the task:
### Input:
Return the sequence number of the given ID
### Response:
def get_seq_number_from_id(id, id_template, prefix, **kw):
"""Return the sequence number of the given ID
"""
separator = kw.get("separator", "-")
postfix = id.replace(prefix, "").strip(separator)
postfix_segments = postfix.split(separator)
seq_number = 0
possible_seq_nums = filter(lambda n: n.isalnum(), postfix_segments)
if possible_seq_nums:
seq_number = possible_seq_nums[-1]
# Check if this id has to be expressed as an alphanumeric number
seq_number = get_alpha_or_number(seq_number, id_template)
seq_number = to_int(seq_number)
return seq_number |
def is_connected(self):
"""
Test if the graph is connected.
Return True if connected, False otherwise
"""
try:
return nx.is_weakly_connected(self.graph)
except nx.exception.NetworkXException:
return False | Test if the graph is connected.
Return True if connected, False otherwise | Below is the the instruction that describes the task:
### Input:
Test if the graph is connected.
Return True if connected, False otherwise
### Response:
def is_connected(self):
"""
Test if the graph is connected.
Return True if connected, False otherwise
"""
try:
return nx.is_weakly_connected(self.graph)
except nx.exception.NetworkXException:
return False |
def moments(data, n_neighbors=30, n_pcs=30, mode='connectivities', method='umap', metric='euclidean', use_rep=None,
recurse_neighbors=False, renormalize=False, copy=False):
"""Computes moments for velocity estimation.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
n_neighbors: `int` (default: 30)
Number of neighbors to use.
n_pcs: `int` (default: 30)
Number of principal components to use.
mode: `'connectivities'` or `'distances'` (default: `'connectivities'`)
Distance metric to use for moment computation.
renormalize: `bool` (default: `False`)
Renormalize the moments by total counts per cell to its median.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
Ms: `.layers`
dense matrix with first order moments of spliced counts.
Mu: `.layers`
dense matrix with first order moments of unspliced counts.
"""
adata = data.copy() if copy else data
if 'spliced' not in adata.layers.keys() or 'unspliced' not in adata.layers.keys():
raise ValueError('Could not find spliced / unspliced counts.')
if any([not_yet_normalized(adata.layers[layer]) for layer in {'spliced', 'unspliced'}]):
normalize_per_cell(adata)
if 'neighbors' not in adata.uns.keys() or neighbors_to_be_recomputed(adata, n_neighbors=n_neighbors):
if use_rep is None: use_rep = 'X_pca'
neighbors(adata, n_neighbors=n_neighbors, use_rep=use_rep, n_pcs=n_pcs, method=method, metric=metric)
if mode not in adata.uns['neighbors']:
raise ValueError('mode can only be \'connectivities\' or \'distances\'')
logg.info('computing moments based on ' + str(mode), r=True)
connectivities = get_connectivities(adata, mode, n_neighbors=n_neighbors, recurse_neighbors=recurse_neighbors)
adata.layers['Ms'] = csr_matrix.dot(connectivities, csr_matrix(adata.layers['spliced'])).astype(np.float32).A
adata.layers['Mu'] = csr_matrix.dot(connectivities, csr_matrix(adata.layers['unspliced'])).astype(np.float32).A
if renormalize: normalize_per_cell(adata, layers={'Ms', 'Mu'}, enforce=True)
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
logg.hint(
'added \n'
' \'Ms\' and \'Mu\', moments of spliced/unspliced abundances (adata.layers)')
return adata if copy else None | Computes moments for velocity estimation.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
n_neighbors: `int` (default: 30)
Number of neighbors to use.
n_pcs: `int` (default: 30)
Number of principal components to use.
mode: `'connectivities'` or `'distances'` (default: `'connectivities'`)
Distance metric to use for moment computation.
renormalize: `bool` (default: `False`)
Renormalize the moments by total counts per cell to its median.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
Ms: `.layers`
dense matrix with first order moments of spliced counts.
Mu: `.layers`
dense matrix with first order moments of unspliced counts. | Below is the the instruction that describes the task:
### Input:
Computes moments for velocity estimation.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
n_neighbors: `int` (default: 30)
Number of neighbors to use.
n_pcs: `int` (default: 30)
Number of principal components to use.
mode: `'connectivities'` or `'distances'` (default: `'connectivities'`)
Distance metric to use for moment computation.
renormalize: `bool` (default: `False`)
Renormalize the moments by total counts per cell to its median.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
Ms: `.layers`
dense matrix with first order moments of spliced counts.
Mu: `.layers`
dense matrix with first order moments of unspliced counts.
### Response:
def moments(data, n_neighbors=30, n_pcs=30, mode='connectivities', method='umap', metric='euclidean', use_rep=None,
recurse_neighbors=False, renormalize=False, copy=False):
"""Computes moments for velocity estimation.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
n_neighbors: `int` (default: 30)
Number of neighbors to use.
n_pcs: `int` (default: 30)
Number of principal components to use.
mode: `'connectivities'` or `'distances'` (default: `'connectivities'`)
Distance metric to use for moment computation.
renormalize: `bool` (default: `False`)
Renormalize the moments by total counts per cell to its median.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
Ms: `.layers`
dense matrix with first order moments of spliced counts.
Mu: `.layers`
dense matrix with first order moments of unspliced counts.
"""
adata = data.copy() if copy else data
if 'spliced' not in adata.layers.keys() or 'unspliced' not in adata.layers.keys():
raise ValueError('Could not find spliced / unspliced counts.')
if any([not_yet_normalized(adata.layers[layer]) for layer in {'spliced', 'unspliced'}]):
normalize_per_cell(adata)
if 'neighbors' not in adata.uns.keys() or neighbors_to_be_recomputed(adata, n_neighbors=n_neighbors):
if use_rep is None: use_rep = 'X_pca'
neighbors(adata, n_neighbors=n_neighbors, use_rep=use_rep, n_pcs=n_pcs, method=method, metric=metric)
if mode not in adata.uns['neighbors']:
raise ValueError('mode can only be \'connectivities\' or \'distances\'')
logg.info('computing moments based on ' + str(mode), r=True)
connectivities = get_connectivities(adata, mode, n_neighbors=n_neighbors, recurse_neighbors=recurse_neighbors)
adata.layers['Ms'] = csr_matrix.dot(connectivities, csr_matrix(adata.layers['spliced'])).astype(np.float32).A
adata.layers['Mu'] = csr_matrix.dot(connectivities, csr_matrix(adata.layers['unspliced'])).astype(np.float32).A
if renormalize: normalize_per_cell(adata, layers={'Ms', 'Mu'}, enforce=True)
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
logg.hint(
'added \n'
' \'Ms\' and \'Mu\', moments of spliced/unspliced abundances (adata.layers)')
return adata if copy else None |
def update(self, other_info, graph, metric_value, model_id):
""" Update the controller with evaluation result of a neural architecture.
Parameters
----------
other_info: any object
In our case it is the father ID in the search tree.
graph: Graph
An instance of Graph. The trained neural architecture.
metric_value: float
The final evaluated metric value.
model_id: int
"""
father_id = other_info
self.bo.fit([graph.extract_descriptor()], [metric_value])
self.bo.add_child(father_id, model_id) | Update the controller with evaluation result of a neural architecture.
Parameters
----------
other_info: any object
In our case it is the father ID in the search tree.
graph: Graph
An instance of Graph. The trained neural architecture.
metric_value: float
The final evaluated metric value.
model_id: int | Below is the the instruction that describes the task:
### Input:
Update the controller with evaluation result of a neural architecture.
Parameters
----------
other_info: any object
In our case it is the father ID in the search tree.
graph: Graph
An instance of Graph. The trained neural architecture.
metric_value: float
The final evaluated metric value.
model_id: int
### Response:
def update(self, other_info, graph, metric_value, model_id):
""" Update the controller with evaluation result of a neural architecture.
Parameters
----------
other_info: any object
In our case it is the father ID in the search tree.
graph: Graph
An instance of Graph. The trained neural architecture.
metric_value: float
The final evaluated metric value.
model_id: int
"""
father_id = other_info
self.bo.fit([graph.extract_descriptor()], [metric_value])
self.bo.add_child(father_id, model_id) |
def url(self):
"""The url ``str`` for :tl:`KeyboardButtonUrl` objects."""
if isinstance(self.button, types.KeyboardButtonUrl):
return self.button.url | The url ``str`` for :tl:`KeyboardButtonUrl` objects. | Below is the the instruction that describes the task:
### Input:
The url ``str`` for :tl:`KeyboardButtonUrl` objects.
### Response:
def url(self):
"""The url ``str`` for :tl:`KeyboardButtonUrl` objects."""
if isinstance(self.button, types.KeyboardButtonUrl):
return self.button.url |
def _get_plugin_stats(self, name):
'''
Used for getting stats for Plugin based stuff, like Kafka Monitor
and Redis Monitor
@param name: the main class stats name
@return: A formatted dict of stats
'''
the_dict = {}
keys = self.redis_conn.keys('stats:{n}:*'.format(n=name))
for key in keys:
# break down key
elements = key.split(":")
main = elements[2]
end = elements[3]
if main == 'total' or main == 'fail':
if main not in the_dict:
the_dict[main] = {}
the_dict[main][end] = self._get_key_value(key, end == 'lifetime')
elif main == 'self':
if 'nodes' not in the_dict:
# main is self, end is machine, true_tail is uuid
the_dict['nodes'] = {}
true_tail = elements[4]
if end not in the_dict['nodes']:
the_dict['nodes'][end] = []
the_dict['nodes'][end].append(true_tail)
else:
if 'plugins' not in the_dict:
the_dict['plugins'] = {}
if main not in the_dict['plugins']:
the_dict['plugins'][main] = {}
the_dict['plugins'][main][end] = self._get_key_value(key, end == 'lifetime')
return the_dict | Used for getting stats for Plugin based stuff, like Kafka Monitor
and Redis Monitor
@param name: the main class stats name
@return: A formatted dict of stats | Below is the the instruction that describes the task:
### Input:
Used for getting stats for Plugin based stuff, like Kafka Monitor
and Redis Monitor
@param name: the main class stats name
@return: A formatted dict of stats
### Response:
def _get_plugin_stats(self, name):
'''
Used for getting stats for Plugin based stuff, like Kafka Monitor
and Redis Monitor
@param name: the main class stats name
@return: A formatted dict of stats
'''
the_dict = {}
keys = self.redis_conn.keys('stats:{n}:*'.format(n=name))
for key in keys:
# break down key
elements = key.split(":")
main = elements[2]
end = elements[3]
if main == 'total' or main == 'fail':
if main not in the_dict:
the_dict[main] = {}
the_dict[main][end] = self._get_key_value(key, end == 'lifetime')
elif main == 'self':
if 'nodes' not in the_dict:
# main is self, end is machine, true_tail is uuid
the_dict['nodes'] = {}
true_tail = elements[4]
if end not in the_dict['nodes']:
the_dict['nodes'][end] = []
the_dict['nodes'][end].append(true_tail)
else:
if 'plugins' not in the_dict:
the_dict['plugins'] = {}
if main not in the_dict['plugins']:
the_dict['plugins'][main] = {}
the_dict['plugins'][main][end] = self._get_key_value(key, end == 'lifetime')
return the_dict |
def push(self, path, name, tag=None):
'''push an image to Singularity Registry
path: should correspond to an absolte image path (or derive it)
name: should be the complete uri that the user has requested to push.
tag: should correspond with an image tag. This is provided to mirror Docker
'''
path = os.path.abspath(path)
bot.debug("PUSH %s" % path)
if not os.path.exists(path):
bot.error('%s does not exist.' %path)
sys.exit(1)
# This returns a data structure with collection, container, based on uri
names = parse_image_name(remove_uri(name),tag=tag)
# use Singularity client, if exists, to inspect to extract metadata
metadata = self.get_metadata(path, names=names)
# If you want a spinner
bot.spinner.start()
# do your push request here. Generally you want to except a KeyboardInterrupt
# and give the user a status from the response
bot.spinner.stop() | push an image to Singularity Registry
path: should correspond to an absolte image path (or derive it)
name: should be the complete uri that the user has requested to push.
tag: should correspond with an image tag. This is provided to mirror Docker | Below is the the instruction that describes the task:
### Input:
push an image to Singularity Registry
path: should correspond to an absolte image path (or derive it)
name: should be the complete uri that the user has requested to push.
tag: should correspond with an image tag. This is provided to mirror Docker
### Response:
def push(self, path, name, tag=None):
'''push an image to Singularity Registry
path: should correspond to an absolte image path (or derive it)
name: should be the complete uri that the user has requested to push.
tag: should correspond with an image tag. This is provided to mirror Docker
'''
path = os.path.abspath(path)
bot.debug("PUSH %s" % path)
if not os.path.exists(path):
bot.error('%s does not exist.' %path)
sys.exit(1)
# This returns a data structure with collection, container, based on uri
names = parse_image_name(remove_uri(name),tag=tag)
# use Singularity client, if exists, to inspect to extract metadata
metadata = self.get_metadata(path, names=names)
# If you want a spinner
bot.spinner.start()
# do your push request here. Generally you want to except a KeyboardInterrupt
# and give the user a status from the response
bot.spinner.stop() |
def _send_data(self, data):
"""
Try to send all data in buffer.
"""
try:
self.socket.sendall(data)
self._reset_errors()
except:
self._close()
self._throttle_error("GraphiteHandler: Socket error, "
"trying reconnect.")
self._connect()
try:
self.socket.sendall(data)
except:
return
self._reset_errors() | Try to send all data in buffer. | Below is the the instruction that describes the task:
### Input:
Try to send all data in buffer.
### Response:
def _send_data(self, data):
"""
Try to send all data in buffer.
"""
try:
self.socket.sendall(data)
self._reset_errors()
except:
self._close()
self._throttle_error("GraphiteHandler: Socket error, "
"trying reconnect.")
self._connect()
try:
self.socket.sendall(data)
except:
return
self._reset_errors() |
def prior_rev(C, alpha=-1.0):
r"""Prior counts for sampling of reversible transition
matrices.
Prior is defined as
b_ij= alpha if i<=j
b_ij=0 else
The reversible prior adds -1 to the upper triagular part of
the given count matrix. This prior respects the fact that
for a reversible transition matrix the degrees of freedom
correspond essentially to the upper, respectively the lower
triangular part of the matrix.
Parameters
----------
C : (M, M) ndarray or scipy.sparse matrix
Count matrix
alpha : float (optional)
Value of prior counts
Returns
-------
B : (M, M) ndarray
Matrix of prior counts
"""
ind = np.triu_indices(C.shape[0])
B = np.zeros(C.shape)
B[ind] = alpha
return B | r"""Prior counts for sampling of reversible transition
matrices.
Prior is defined as
b_ij= alpha if i<=j
b_ij=0 else
The reversible prior adds -1 to the upper triagular part of
the given count matrix. This prior respects the fact that
for a reversible transition matrix the degrees of freedom
correspond essentially to the upper, respectively the lower
triangular part of the matrix.
Parameters
----------
C : (M, M) ndarray or scipy.sparse matrix
Count matrix
alpha : float (optional)
Value of prior counts
Returns
-------
B : (M, M) ndarray
Matrix of prior counts | Below is the the instruction that describes the task:
### Input:
r"""Prior counts for sampling of reversible transition
matrices.
Prior is defined as
b_ij= alpha if i<=j
b_ij=0 else
The reversible prior adds -1 to the upper triagular part of
the given count matrix. This prior respects the fact that
for a reversible transition matrix the degrees of freedom
correspond essentially to the upper, respectively the lower
triangular part of the matrix.
Parameters
----------
C : (M, M) ndarray or scipy.sparse matrix
Count matrix
alpha : float (optional)
Value of prior counts
Returns
-------
B : (M, M) ndarray
Matrix of prior counts
### Response:
def prior_rev(C, alpha=-1.0):
r"""Prior counts for sampling of reversible transition
matrices.
Prior is defined as
b_ij= alpha if i<=j
b_ij=0 else
The reversible prior adds -1 to the upper triagular part of
the given count matrix. This prior respects the fact that
for a reversible transition matrix the degrees of freedom
correspond essentially to the upper, respectively the lower
triangular part of the matrix.
Parameters
----------
C : (M, M) ndarray or scipy.sparse matrix
Count matrix
alpha : float (optional)
Value of prior counts
Returns
-------
B : (M, M) ndarray
Matrix of prior counts
"""
ind = np.triu_indices(C.shape[0])
B = np.zeros(C.shape)
B[ind] = alpha
return B |
def authorizer(self, schemes, resource, action, request_args):
"""Construct the Authorization header for a request.
Args:
schemes (list of str): Authentication schemes supported for the
requested action.
resource (str): Object upon which an action is being performed.
action (str): Action being performed.
request_args (list of str): Arguments passed to the action call.
Returns:
(str, str) A tuple of the auth scheme satisfied, and the credential
for the Authorization header or empty strings if none could be
satisfied.
"""
if not schemes:
return u'', u''
for scheme in schemes:
if scheme in self.schemes and self.has_auth_params(scheme):
cred = Context.format_auth_params(self.schemes[scheme][u'params'])
if hasattr(self, 'mfa_token'):
cred = '{}, mfa_token="{}"'.format(cred, self.mfa_token)
return scheme, cred
raise AuthenticationError(self, schemes) | Construct the Authorization header for a request.
Args:
schemes (list of str): Authentication schemes supported for the
requested action.
resource (str): Object upon which an action is being performed.
action (str): Action being performed.
request_args (list of str): Arguments passed to the action call.
Returns:
(str, str) A tuple of the auth scheme satisfied, and the credential
for the Authorization header or empty strings if none could be
satisfied. | Below is the the instruction that describes the task:
### Input:
Construct the Authorization header for a request.
Args:
schemes (list of str): Authentication schemes supported for the
requested action.
resource (str): Object upon which an action is being performed.
action (str): Action being performed.
request_args (list of str): Arguments passed to the action call.
Returns:
(str, str) A tuple of the auth scheme satisfied, and the credential
for the Authorization header or empty strings if none could be
satisfied.
### Response:
def authorizer(self, schemes, resource, action, request_args):
"""Construct the Authorization header for a request.
Args:
schemes (list of str): Authentication schemes supported for the
requested action.
resource (str): Object upon which an action is being performed.
action (str): Action being performed.
request_args (list of str): Arguments passed to the action call.
Returns:
(str, str) A tuple of the auth scheme satisfied, and the credential
for the Authorization header or empty strings if none could be
satisfied.
"""
if not schemes:
return u'', u''
for scheme in schemes:
if scheme in self.schemes and self.has_auth_params(scheme):
cred = Context.format_auth_params(self.schemes[scheme][u'params'])
if hasattr(self, 'mfa_token'):
cred = '{}, mfa_token="{}"'.format(cred, self.mfa_token)
return scheme, cred
raise AuthenticationError(self, schemes) |
def replace_nones(dict_or_list):
"""Update a dict or list in place to replace
'none' string values with Python None."""
def replace_none_in_value(value):
if isinstance(value, basestring) and value.lower() == "none":
return None
return value
items = dict_or_list.iteritems() if isinstance(dict_or_list, dict) else enumerate(dict_or_list)
for accessor, value in items:
if isinstance(value, (dict, list)):
replace_nones(value)
else:
dict_or_list[accessor] = replace_none_in_value(value) | Update a dict or list in place to replace
'none' string values with Python None. | Below is the the instruction that describes the task:
### Input:
Update a dict or list in place to replace
'none' string values with Python None.
### Response:
def replace_nones(dict_or_list):
"""Update a dict or list in place to replace
'none' string values with Python None."""
def replace_none_in_value(value):
if isinstance(value, basestring) and value.lower() == "none":
return None
return value
items = dict_or_list.iteritems() if isinstance(dict_or_list, dict) else enumerate(dict_or_list)
for accessor, value in items:
if isinstance(value, (dict, list)):
replace_nones(value)
else:
dict_or_list[accessor] = replace_none_in_value(value) |
def resolve(self, other: Type) -> Optional[Type]:
"""See ``PlaceholderType.resolve``"""
if not isinstance(other, NltkComplexType):
return None
expected_second = ComplexType(NUMBER_TYPE,
ComplexType(ANY_TYPE, ComplexType(ComplexType(ANY_TYPE, ANY_TYPE),
ANY_TYPE)))
resolved_second = other.second.resolve(expected_second)
if resolved_second is None:
return None
# The lambda function that we use inside the argmax must take either a number or a date as
# an argument.
lambda_arg_type = other.second.second.second.first.first
if lambda_arg_type.resolve(NUMBER_TYPE) is None and lambda_arg_type.resolve(DATE_TYPE) is None:
return None
try:
# This is the first #1 in the type signature above.
selector_function_type = resolved_second.second.first
# This is the second #1 in the type signature above.
quant_function_argument_type = resolved_second.second.second.first.second
# This is the third #1 in the type signature above.
return_type = resolved_second.second.second.second
# All three placeholder (ph) types above should resolve against each other.
resolved_first_ph = selector_function_type.resolve(quant_function_argument_type)
resolved_first_ph.resolve(return_type)
resolved_second_ph = quant_function_argument_type.resolve(resolved_first_ph)
resolved_second_ph.resolve(return_type)
resolved_third_ph = return_type.resolve(resolved_first_ph)
resolved_third_ph = return_type.resolve(resolved_second_ph)
if not resolved_first_ph or not resolved_second_ph or not resolved_third_ph:
return None
return ArgExtremeType(resolved_first_ph, lambda_arg_type)
except AttributeError:
return None | See ``PlaceholderType.resolve`` | Below is the the instruction that describes the task:
### Input:
See ``PlaceholderType.resolve``
### Response:
def resolve(self, other: Type) -> Optional[Type]:
"""See ``PlaceholderType.resolve``"""
if not isinstance(other, NltkComplexType):
return None
expected_second = ComplexType(NUMBER_TYPE,
ComplexType(ANY_TYPE, ComplexType(ComplexType(ANY_TYPE, ANY_TYPE),
ANY_TYPE)))
resolved_second = other.second.resolve(expected_second)
if resolved_second is None:
return None
# The lambda function that we use inside the argmax must take either a number or a date as
# an argument.
lambda_arg_type = other.second.second.second.first.first
if lambda_arg_type.resolve(NUMBER_TYPE) is None and lambda_arg_type.resolve(DATE_TYPE) is None:
return None
try:
# This is the first #1 in the type signature above.
selector_function_type = resolved_second.second.first
# This is the second #1 in the type signature above.
quant_function_argument_type = resolved_second.second.second.first.second
# This is the third #1 in the type signature above.
return_type = resolved_second.second.second.second
# All three placeholder (ph) types above should resolve against each other.
resolved_first_ph = selector_function_type.resolve(quant_function_argument_type)
resolved_first_ph.resolve(return_type)
resolved_second_ph = quant_function_argument_type.resolve(resolved_first_ph)
resolved_second_ph.resolve(return_type)
resolved_third_ph = return_type.resolve(resolved_first_ph)
resolved_third_ph = return_type.resolve(resolved_second_ph)
if not resolved_first_ph or not resolved_second_ph or not resolved_third_ph:
return None
return ArgExtremeType(resolved_first_ph, lambda_arg_type)
except AttributeError:
return None |
def mine_patterns(self, threshold):
"""
Mine the constructed FP tree for frequent patterns.
"""
if self.tree_has_single_path(self.root):
return self.generate_pattern_list()
else:
return self.zip_patterns(self.mine_sub_trees(threshold)) | Mine the constructed FP tree for frequent patterns. | Below is the the instruction that describes the task:
### Input:
Mine the constructed FP tree for frequent patterns.
### Response:
def mine_patterns(self, threshold):
"""
Mine the constructed FP tree for frequent patterns.
"""
if self.tree_has_single_path(self.root):
return self.generate_pattern_list()
else:
return self.zip_patterns(self.mine_sub_trees(threshold)) |
def put_if_absent(self, key, value, ttl=-1):
"""
Associates the specified key with the given value if it is not already associated. If ttl is provided, entry
will expire and get evicted after the ttl.
This is equivalent to:
>>> if not map.contains_key(key):
>>> return map.put(key,value)
>>> else:
>>> return map.get(key)
except that the action is performed atomically.
**Warning:
This method returns a clone of the previous value, not the original (identically equal) value previously put
into the map.**
**Warning 2: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), key of the entry.
:param value: (object), value of the entry.
:param ttl: (int), maximum time in seconds for this entry to stay in the map, if not provided, the value
configured on server side configuration will be used (optional).
:return: (object), old value of the entry.
"""
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._put_if_absent_internal(key_data, value_data, ttl) | Associates the specified key with the given value if it is not already associated. If ttl is provided, entry
will expire and get evicted after the ttl.
This is equivalent to:
>>> if not map.contains_key(key):
>>> return map.put(key,value)
>>> else:
>>> return map.get(key)
except that the action is performed atomically.
**Warning:
This method returns a clone of the previous value, not the original (identically equal) value previously put
into the map.**
**Warning 2: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), key of the entry.
:param value: (object), value of the entry.
:param ttl: (int), maximum time in seconds for this entry to stay in the map, if not provided, the value
configured on server side configuration will be used (optional).
:return: (object), old value of the entry. | Below is the the instruction that describes the task:
### Input:
Associates the specified key with the given value if it is not already associated. If ttl is provided, entry
will expire and get evicted after the ttl.
This is equivalent to:
>>> if not map.contains_key(key):
>>> return map.put(key,value)
>>> else:
>>> return map.get(key)
except that the action is performed atomically.
**Warning:
This method returns a clone of the previous value, not the original (identically equal) value previously put
into the map.**
**Warning 2: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), key of the entry.
:param value: (object), value of the entry.
:param ttl: (int), maximum time in seconds for this entry to stay in the map, if not provided, the value
configured on server side configuration will be used (optional).
:return: (object), old value of the entry.
### Response:
def put_if_absent(self, key, value, ttl=-1):
"""
Associates the specified key with the given value if it is not already associated. If ttl is provided, entry
will expire and get evicted after the ttl.
This is equivalent to:
>>> if not map.contains_key(key):
>>> return map.put(key,value)
>>> else:
>>> return map.get(key)
except that the action is performed atomically.
**Warning:
This method returns a clone of the previous value, not the original (identically equal) value previously put
into the map.**
**Warning 2: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), key of the entry.
:param value: (object), value of the entry.
:param ttl: (int), maximum time in seconds for this entry to stay in the map, if not provided, the value
configured on server side configuration will be used (optional).
:return: (object), old value of the entry.
"""
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._put_if_absent_internal(key_data, value_data, ttl) |
def instruction_ROL_memory(self, opcode, ea, m):
""" Rotate memory left """
r = self.ROL(m)
# log.debug("$%x ROL memory value $%x << 1 | Carry = $%x and write it to $%x \t| %s" % (
# self.program_counter,
# m, r, ea,
# self.cfg.mem_info.get_shortest(ea)
# ))
return ea, r & 0xff | Rotate memory left | Below is the the instruction that describes the task:
### Input:
Rotate memory left
### Response:
def instruction_ROL_memory(self, opcode, ea, m):
""" Rotate memory left """
r = self.ROL(m)
# log.debug("$%x ROL memory value $%x << 1 | Carry = $%x and write it to $%x \t| %s" % (
# self.program_counter,
# m, r, ea,
# self.cfg.mem_info.get_shortest(ea)
# ))
return ea, r & 0xff |
def verbose_option(default=False):
""" Attaches the option ``verbose`` with its *default* value to the
keyword arguments when the option does not exist. All positional
arguments and keyword arguments are forwarded unchanged.
"""
def decorator(method):
@wraps(method)
def wrapper(*args, **kwargs):
option = Option.verbose.value
kwargs[option] = kwargs.get(option, bool(default))
return method(*args, **kwargs)
return wrapper
return decorator | Attaches the option ``verbose`` with its *default* value to the
keyword arguments when the option does not exist. All positional
arguments and keyword arguments are forwarded unchanged. | Below is the the instruction that describes the task:
### Input:
Attaches the option ``verbose`` with its *default* value to the
keyword arguments when the option does not exist. All positional
arguments and keyword arguments are forwarded unchanged.
### Response:
def verbose_option(default=False):
""" Attaches the option ``verbose`` with its *default* value to the
keyword arguments when the option does not exist. All positional
arguments and keyword arguments are forwarded unchanged.
"""
def decorator(method):
@wraps(method)
def wrapper(*args, **kwargs):
option = Option.verbose.value
kwargs[option] = kwargs.get(option, bool(default))
return method(*args, **kwargs)
return wrapper
return decorator |
def load_build_configuration_from_source(build_configuration, backends=None):
"""Installs pants backend packages to provide BUILD file symbols and cli goals.
:param BuildConfiguration build_configuration: The BuildConfiguration (for adding aliases).
:param backends: An optional list of additional packages to load backends from.
:raises: :class:``pants.base.exceptions.BuildConfigurationError`` if there is a problem loading
the build configuration.
"""
# pants.build_graph and pants.core_task must always be loaded, and before any other backends.
# TODO: Consider replacing the "backend" nomenclature here. pants.build_graph and
# pants.core_tasks aren't really backends.
backend_packages = OrderedSet(['pants.build_graph', 'pants.core_tasks'] + (backends or []))
for backend_package in backend_packages:
load_backend(build_configuration, backend_package) | Installs pants backend packages to provide BUILD file symbols and cli goals.
:param BuildConfiguration build_configuration: The BuildConfiguration (for adding aliases).
:param backends: An optional list of additional packages to load backends from.
:raises: :class:``pants.base.exceptions.BuildConfigurationError`` if there is a problem loading
the build configuration. | Below is the the instruction that describes the task:
### Input:
Installs pants backend packages to provide BUILD file symbols and cli goals.
:param BuildConfiguration build_configuration: The BuildConfiguration (for adding aliases).
:param backends: An optional list of additional packages to load backends from.
:raises: :class:``pants.base.exceptions.BuildConfigurationError`` if there is a problem loading
the build configuration.
### Response:
def load_build_configuration_from_source(build_configuration, backends=None):
"""Installs pants backend packages to provide BUILD file symbols and cli goals.
:param BuildConfiguration build_configuration: The BuildConfiguration (for adding aliases).
:param backends: An optional list of additional packages to load backends from.
:raises: :class:``pants.base.exceptions.BuildConfigurationError`` if there is a problem loading
the build configuration.
"""
# pants.build_graph and pants.core_task must always be loaded, and before any other backends.
# TODO: Consider replacing the "backend" nomenclature here. pants.build_graph and
# pants.core_tasks aren't really backends.
backend_packages = OrderedSet(['pants.build_graph', 'pants.core_tasks'] + (backends or []))
for backend_package in backend_packages:
load_backend(build_configuration, backend_package) |
def profile_to_cross_section(profile, lefthand=False, start_count=1, min_vertices=20):
r"""
Converts a thread profile to it's equivalent cross-section.
**Profile:**
The thread profile contains a single wire along the XZ plane
(note: wire will be projected onto the XZ plane; Y-coords will be ignored).
The profile is expected to be of 1 thread rotation, so it's height
(along the Z-axis) is the thread's "pitch".
If start_count > 1, then the profile will effectively be duplicated.
The resulting cross-section is designed to be swept along a helical path
with a pitch of the thread's "lead" (which is {the height of the given
profile} * start_count)
**Method:**
Each edge of the profile is converted to a bezier spline, aproximating
its polar plot equivalent.
**Resolution:** (via `min_vertices` parameter)
Increasing the number of vertices used to define the bezier will
increase the resulting thread's accuracy, but cost more to render.
min_vertices may also be expressed as a list to set the number of
vertices to set for each wire.
where: len(min_vertices) == number of edges in profile
**Example**
.. doctest::
import cadquery
from cqparts_fasteners.solidtypes.threads.base import profile_to_cross_section
from Helpers import show # doctest: +SKIP
profile = cadquery.Workplane("XZ") \
.moveTo(1, 0) \
.lineTo(2, 1).lineTo(1, 2) \
.wire()
cross_section = profile_to_cross_section(profile)
show(profile) # doctest: +SKIP
show(cross_section) # doctest: +SKIP
Will result in:
.. image:: /_static/img/solidtypes.threads.base.profile_to_cross_section.01.png
:param profile: workplane containing wire of thread profile.
:type profile: :class:`cadquery.Workplane`
:param lefthand: if True, cross-section is made backwards.
:type lefthand: :class:`bool`
:param start_count: profile is duplicated this many times.
:type start_count: :class:`int`
:param min_vertices: int or tuple of the desired resolution.
:type min_vertices: :class:`int` or :class:`tuple`
:return: workplane with a face ready to be swept into a thread.
:rtype: :class:`cadquery.Workplane`
:raises TypeError: if a problem is found with the given parameters.
:raises ValueError: if ``min_vertices`` is a list with elements not equal to the numbmer of wire edges.
"""
# verify parameter(s)
if not isinstance(profile, cadquery.Workplane):
raise TypeError("profile %r must be a %s instance" % (profile, cadquery.Workplane))
if not isinstance(min_vertices, (int, list, tuple)):
raise TypeError("min_vertices %r must be an int, list, or tuple" % (min_vertices))
# get wire from Workplane
wire = profile.val() # cadquery.Wire
if not isinstance(wire, cadquery.Wire):
raise TypeError("a valid profile Wire type could not be found in the given Workplane")
profile_bb = wire.BoundingBox()
pitch = profile_bb.zmax - profile_bb.zmin
lead = pitch * start_count
# determine vertices count per edge
edges = wire.Edges()
vertices_count = None
if isinstance(min_vertices, int):
# evenly spread vertices count along profile wire
# (weighted by the edge's length)
vertices_count = [
int(ceil(round(e.Length() / wire.Length(), 7) * min_vertices))
for e in edges
]
# rounded for desired contrived results
# (trade-off: an error of 1 is of no great consequence)
else:
# min_vertices is defined per edge (already what we want)
if len(min_vertices) != len(edges):
raise ValueError(
"min_vertices list size does not match number of profile edges: "
"len(%r) != %i" % (min_vertices, len(edges))
)
vertices_count = min_vertices
# Utilities for building cross-section
def get_xz(vertex):
if isinstance(vertex, cadquery.Vector):
vertex = vertex.wrapped # TODO: remove this, it's messy
# where isinstance(vertex, FreeCAD.Base.Vector)
return (vertex.x, vertex.z)
def cart2polar(x, z, z_offset=0):
"""
Convert cartesian coordinates to polar coordinates.
Uses thread's lead height to give full 360deg translation.
"""
radius = x
angle = ((z + z_offset) / lead) * (2 * pi) # radians
if not lefthand:
angle = -angle
return (radius, angle)
def transform(vertex, z_offset=0):
# where isinstance(vertex, FreeCAD.Base.Vector)
"""
Transform profile vertex on the XZ plane to it's equivalent on
the cross-section's XY plane
"""
(radius, angle) = cart2polar(*get_xz(vertex), z_offset=z_offset)
return (radius * cos(angle), radius * sin(angle))
# Conversion methods
def apply_spline(wp, edge, vert_count, z_offset=0):
"""
Trace along edge and create a spline from the transformed verteces.
"""
curve = edge.wrapped.Curve # FreeCADPart.Geom* (depending on type)
if edge.geomType() == 'CIRCLE':
iter_dist = edge.wrapped.ParameterRange[1] / vert_count
else:
iter_dist = edge.Length() / vert_count
points = []
for j in range(vert_count):
dist = (j + 1) * iter_dist
vert = curve.value(dist)
points.append(transform(vert, z_offset))
return wp.spline(points)
def apply_arc(wp, edge, z_offset=0):
"""
Create an arc using edge's midpoint and endpoint.
Only intended for use for vertical lines on the given profile.
"""
return wp.threePointArc(
point1=transform(edge.wrapped.valueAt(edge.Length() / 2), z_offset),
point2=transform(edge.wrapped.valueAt(edge.Length()), z_offset),
)
def apply_radial_line(wp, edge, z_offset=0):
"""
Create a straight radial line
"""
return wp.lineTo(*transform(edge.endPoint(), z_offset))
# Build cross-section
start_v = edges[0].startPoint().wrapped
cross_section = cadquery.Workplane("XY") \
.moveTo(*transform(start_v))
for i in range(start_count):
z_offset = i * pitch
for (j, edge) in enumerate(wire.Edges()):
# where: isinstance(edge, cadquery.Edge)
if (edge.geomType() == 'LINE') and (edge.startPoint().x == edge.endPoint().x):
# edge is a vertical line, plot a circular arc
cross_section = apply_arc(cross_section, edge, z_offset)
elif (edge.geomType() == 'LINE') and (edge.startPoint().z == edge.endPoint().z):
# edge is a horizontal line, plot a radial line
cross_section = apply_radial_line(cross_section, edge, z_offset)
else:
# create bezier spline along transformed points (default)
cross_section = apply_spline(cross_section, edge, vertices_count[j], z_offset)
return cross_section.close() | r"""
Converts a thread profile to it's equivalent cross-section.
**Profile:**
The thread profile contains a single wire along the XZ plane
(note: wire will be projected onto the XZ plane; Y-coords will be ignored).
The profile is expected to be of 1 thread rotation, so it's height
(along the Z-axis) is the thread's "pitch".
If start_count > 1, then the profile will effectively be duplicated.
The resulting cross-section is designed to be swept along a helical path
with a pitch of the thread's "lead" (which is {the height of the given
profile} * start_count)
**Method:**
Each edge of the profile is converted to a bezier spline, aproximating
its polar plot equivalent.
**Resolution:** (via `min_vertices` parameter)
Increasing the number of vertices used to define the bezier will
increase the resulting thread's accuracy, but cost more to render.
min_vertices may also be expressed as a list to set the number of
vertices to set for each wire.
where: len(min_vertices) == number of edges in profile
**Example**
.. doctest::
import cadquery
from cqparts_fasteners.solidtypes.threads.base import profile_to_cross_section
from Helpers import show # doctest: +SKIP
profile = cadquery.Workplane("XZ") \
.moveTo(1, 0) \
.lineTo(2, 1).lineTo(1, 2) \
.wire()
cross_section = profile_to_cross_section(profile)
show(profile) # doctest: +SKIP
show(cross_section) # doctest: +SKIP
Will result in:
.. image:: /_static/img/solidtypes.threads.base.profile_to_cross_section.01.png
:param profile: workplane containing wire of thread profile.
:type profile: :class:`cadquery.Workplane`
:param lefthand: if True, cross-section is made backwards.
:type lefthand: :class:`bool`
:param start_count: profile is duplicated this many times.
:type start_count: :class:`int`
:param min_vertices: int or tuple of the desired resolution.
:type min_vertices: :class:`int` or :class:`tuple`
:return: workplane with a face ready to be swept into a thread.
:rtype: :class:`cadquery.Workplane`
:raises TypeError: if a problem is found with the given parameters.
:raises ValueError: if ``min_vertices`` is a list with elements not equal to the numbmer of wire edges. | Below is the the instruction that describes the task:
### Input:
r"""
Converts a thread profile to it's equivalent cross-section.
**Profile:**
The thread profile contains a single wire along the XZ plane
(note: wire will be projected onto the XZ plane; Y-coords will be ignored).
The profile is expected to be of 1 thread rotation, so it's height
(along the Z-axis) is the thread's "pitch".
If start_count > 1, then the profile will effectively be duplicated.
The resulting cross-section is designed to be swept along a helical path
with a pitch of the thread's "lead" (which is {the height of the given
profile} * start_count)
**Method:**
Each edge of the profile is converted to a bezier spline, aproximating
its polar plot equivalent.
**Resolution:** (via `min_vertices` parameter)
Increasing the number of vertices used to define the bezier will
increase the resulting thread's accuracy, but cost more to render.
min_vertices may also be expressed as a list to set the number of
vertices to set for each wire.
where: len(min_vertices) == number of edges in profile
**Example**
.. doctest::
import cadquery
from cqparts_fasteners.solidtypes.threads.base import profile_to_cross_section
from Helpers import show # doctest: +SKIP
profile = cadquery.Workplane("XZ") \
.moveTo(1, 0) \
.lineTo(2, 1).lineTo(1, 2) \
.wire()
cross_section = profile_to_cross_section(profile)
show(profile) # doctest: +SKIP
show(cross_section) # doctest: +SKIP
Will result in:
.. image:: /_static/img/solidtypes.threads.base.profile_to_cross_section.01.png
:param profile: workplane containing wire of thread profile.
:type profile: :class:`cadquery.Workplane`
:param lefthand: if True, cross-section is made backwards.
:type lefthand: :class:`bool`
:param start_count: profile is duplicated this many times.
:type start_count: :class:`int`
:param min_vertices: int or tuple of the desired resolution.
:type min_vertices: :class:`int` or :class:`tuple`
:return: workplane with a face ready to be swept into a thread.
:rtype: :class:`cadquery.Workplane`
:raises TypeError: if a problem is found with the given parameters.
:raises ValueError: if ``min_vertices`` is a list with elements not equal to the numbmer of wire edges.
### Response:
def profile_to_cross_section(profile, lefthand=False, start_count=1, min_vertices=20):
r"""
Converts a thread profile to it's equivalent cross-section.
**Profile:**
The thread profile contains a single wire along the XZ plane
(note: wire will be projected onto the XZ plane; Y-coords will be ignored).
The profile is expected to be of 1 thread rotation, so it's height
(along the Z-axis) is the thread's "pitch".
If start_count > 1, then the profile will effectively be duplicated.
The resulting cross-section is designed to be swept along a helical path
with a pitch of the thread's "lead" (which is {the height of the given
profile} * start_count)
**Method:**
Each edge of the profile is converted to a bezier spline, aproximating
its polar plot equivalent.
**Resolution:** (via `min_vertices` parameter)
Increasing the number of vertices used to define the bezier will
increase the resulting thread's accuracy, but cost more to render.
min_vertices may also be expressed as a list to set the number of
vertices to set for each wire.
where: len(min_vertices) == number of edges in profile
**Example**
.. doctest::
import cadquery
from cqparts_fasteners.solidtypes.threads.base import profile_to_cross_section
from Helpers import show # doctest: +SKIP
profile = cadquery.Workplane("XZ") \
.moveTo(1, 0) \
.lineTo(2, 1).lineTo(1, 2) \
.wire()
cross_section = profile_to_cross_section(profile)
show(profile) # doctest: +SKIP
show(cross_section) # doctest: +SKIP
Will result in:
.. image:: /_static/img/solidtypes.threads.base.profile_to_cross_section.01.png
:param profile: workplane containing wire of thread profile.
:type profile: :class:`cadquery.Workplane`
:param lefthand: if True, cross-section is made backwards.
:type lefthand: :class:`bool`
:param start_count: profile is duplicated this many times.
:type start_count: :class:`int`
:param min_vertices: int or tuple of the desired resolution.
:type min_vertices: :class:`int` or :class:`tuple`
:return: workplane with a face ready to be swept into a thread.
:rtype: :class:`cadquery.Workplane`
:raises TypeError: if a problem is found with the given parameters.
:raises ValueError: if ``min_vertices`` is a list with elements not equal to the numbmer of wire edges.
"""
# verify parameter(s)
if not isinstance(profile, cadquery.Workplane):
raise TypeError("profile %r must be a %s instance" % (profile, cadquery.Workplane))
if not isinstance(min_vertices, (int, list, tuple)):
raise TypeError("min_vertices %r must be an int, list, or tuple" % (min_vertices))
# get wire from Workplane
wire = profile.val() # cadquery.Wire
if not isinstance(wire, cadquery.Wire):
raise TypeError("a valid profile Wire type could not be found in the given Workplane")
profile_bb = wire.BoundingBox()
pitch = profile_bb.zmax - profile_bb.zmin
lead = pitch * start_count
# determine vertices count per edge
edges = wire.Edges()
vertices_count = None
if isinstance(min_vertices, int):
# evenly spread vertices count along profile wire
# (weighted by the edge's length)
vertices_count = [
int(ceil(round(e.Length() / wire.Length(), 7) * min_vertices))
for e in edges
]
# rounded for desired contrived results
# (trade-off: an error of 1 is of no great consequence)
else:
# min_vertices is defined per edge (already what we want)
if len(min_vertices) != len(edges):
raise ValueError(
"min_vertices list size does not match number of profile edges: "
"len(%r) != %i" % (min_vertices, len(edges))
)
vertices_count = min_vertices
# Utilities for building cross-section
def get_xz(vertex):
if isinstance(vertex, cadquery.Vector):
vertex = vertex.wrapped # TODO: remove this, it's messy
# where isinstance(vertex, FreeCAD.Base.Vector)
return (vertex.x, vertex.z)
def cart2polar(x, z, z_offset=0):
"""
Convert cartesian coordinates to polar coordinates.
Uses thread's lead height to give full 360deg translation.
"""
radius = x
angle = ((z + z_offset) / lead) * (2 * pi) # radians
if not lefthand:
angle = -angle
return (radius, angle)
def transform(vertex, z_offset=0):
# where isinstance(vertex, FreeCAD.Base.Vector)
"""
Transform profile vertex on the XZ plane to it's equivalent on
the cross-section's XY plane
"""
(radius, angle) = cart2polar(*get_xz(vertex), z_offset=z_offset)
return (radius * cos(angle), radius * sin(angle))
# Conversion methods
def apply_spline(wp, edge, vert_count, z_offset=0):
"""
Trace along edge and create a spline from the transformed verteces.
"""
curve = edge.wrapped.Curve # FreeCADPart.Geom* (depending on type)
if edge.geomType() == 'CIRCLE':
iter_dist = edge.wrapped.ParameterRange[1] / vert_count
else:
iter_dist = edge.Length() / vert_count
points = []
for j in range(vert_count):
dist = (j + 1) * iter_dist
vert = curve.value(dist)
points.append(transform(vert, z_offset))
return wp.spline(points)
def apply_arc(wp, edge, z_offset=0):
"""
Create an arc using edge's midpoint and endpoint.
Only intended for use for vertical lines on the given profile.
"""
return wp.threePointArc(
point1=transform(edge.wrapped.valueAt(edge.Length() / 2), z_offset),
point2=transform(edge.wrapped.valueAt(edge.Length()), z_offset),
)
def apply_radial_line(wp, edge, z_offset=0):
"""
Create a straight radial line
"""
return wp.lineTo(*transform(edge.endPoint(), z_offset))
# Build cross-section
start_v = edges[0].startPoint().wrapped
cross_section = cadquery.Workplane("XY") \
.moveTo(*transform(start_v))
for i in range(start_count):
z_offset = i * pitch
for (j, edge) in enumerate(wire.Edges()):
# where: isinstance(edge, cadquery.Edge)
if (edge.geomType() == 'LINE') and (edge.startPoint().x == edge.endPoint().x):
# edge is a vertical line, plot a circular arc
cross_section = apply_arc(cross_section, edge, z_offset)
elif (edge.geomType() == 'LINE') and (edge.startPoint().z == edge.endPoint().z):
# edge is a horizontal line, plot a radial line
cross_section = apply_radial_line(cross_section, edge, z_offset)
else:
# create bezier spline along transformed points (default)
cross_section = apply_spline(cross_section, edge, vertices_count[j], z_offset)
return cross_section.close() |
def generate_covariance(ts, sigma, tau):
r"""Generates a covariance matrix according to an
squared-exponential autocovariance
.. math::
\left\langle x_i x_j \right\rangle = \sigma_0^2 \delta_{ij} + \sigma^2 \exp\left[ - \frac{\left| t_i - t_j\right|^2}{2 \tau^2} \right]
"""
ndim = ts.shape[0]
tis = ts[:, np.newaxis]
tjs = ts[np.newaxis, :]
return sigma*sigma*np.exp(-np.square(tis-tjs)/(2.0*tau*tau)) | r"""Generates a covariance matrix according to an
squared-exponential autocovariance
.. math::
\left\langle x_i x_j \right\rangle = \sigma_0^2 \delta_{ij} + \sigma^2 \exp\left[ - \frac{\left| t_i - t_j\right|^2}{2 \tau^2} \right] | Below is the the instruction that describes the task:
### Input:
r"""Generates a covariance matrix according to an
squared-exponential autocovariance
.. math::
\left\langle x_i x_j \right\rangle = \sigma_0^2 \delta_{ij} + \sigma^2 \exp\left[ - \frac{\left| t_i - t_j\right|^2}{2 \tau^2} \right]
### Response:
def generate_covariance(ts, sigma, tau):
r"""Generates a covariance matrix according to an
squared-exponential autocovariance
.. math::
\left\langle x_i x_j \right\rangle = \sigma_0^2 \delta_{ij} + \sigma^2 \exp\left[ - \frac{\left| t_i - t_j\right|^2}{2 \tau^2} \right]
"""
ndim = ts.shape[0]
tis = ts[:, np.newaxis]
tjs = ts[np.newaxis, :]
return sigma*sigma*np.exp(-np.square(tis-tjs)/(2.0*tau*tau)) |
def json(self):
"""Returns the json-encoded content of the response, if any."""
if hasattr(self, '_json'):
return self._json
try:
self._json = json.loads(self.text or self.content)
except ValueError:
self._json = None
return self._json | Returns the json-encoded content of the response, if any. | Below is the the instruction that describes the task:
### Input:
Returns the json-encoded content of the response, if any.
### Response:
def json(self):
"""Returns the json-encoded content of the response, if any."""
if hasattr(self, '_json'):
return self._json
try:
self._json = json.loads(self.text or self.content)
except ValueError:
self._json = None
return self._json |
Subsets and Splits