code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def sample(args):
"""
%prog sample vcffile 0.9
Sample subset of vcf file.
"""
from random import random
p = OptionParser(sample.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
vcffile, ratio = args
ratio = float(ratio)
fp = open(vcffile)
pf = vcffile.rsplit(".", 1)[0]
kept = pf + ".kept.vcf"
withheld = pf + ".withheld.vcf"
fwk = open(kept, "w")
fww = open(withheld, "w")
nkept = nwithheld = 0
for row in fp:
if row[0] == '#':
print(row.strip(), file=fwk)
continue
if random() < ratio:
nkept += 1
print(row.strip(), file=fwk)
else:
nwithheld += 1
print(row.strip(), file=fww)
logging.debug("{0} records kept to `{1}`".format(nkept, kept))
logging.debug("{0} records withheld to `{1}`".format(nwithheld, withheld)) | %prog sample vcffile 0.9
Sample subset of vcf file. | Below is the the instruction that describes the task:
### Input:
%prog sample vcffile 0.9
Sample subset of vcf file.
### Response:
def sample(args):
"""
%prog sample vcffile 0.9
Sample subset of vcf file.
"""
from random import random
p = OptionParser(sample.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
vcffile, ratio = args
ratio = float(ratio)
fp = open(vcffile)
pf = vcffile.rsplit(".", 1)[0]
kept = pf + ".kept.vcf"
withheld = pf + ".withheld.vcf"
fwk = open(kept, "w")
fww = open(withheld, "w")
nkept = nwithheld = 0
for row in fp:
if row[0] == '#':
print(row.strip(), file=fwk)
continue
if random() < ratio:
nkept += 1
print(row.strip(), file=fwk)
else:
nwithheld += 1
print(row.strip(), file=fww)
logging.debug("{0} records kept to `{1}`".format(nkept, kept))
logging.debug("{0} records withheld to `{1}`".format(nwithheld, withheld)) |
def _integrate_mpwrap(ts_and_pks, integrate, fopts):
"""
Take a zipped timeseries and peaks found in it
and integrate it to return peaks. Used to allow
multiprocessing support.
"""
ts, tpks = ts_and_pks
pks = integrate(ts, tpks, **fopts)
# for p in pks:
# p.info['mz'] = str(ts.name)
return pks | Take a zipped timeseries and peaks found in it
and integrate it to return peaks. Used to allow
multiprocessing support. | Below is the the instruction that describes the task:
### Input:
Take a zipped timeseries and peaks found in it
and integrate it to return peaks. Used to allow
multiprocessing support.
### Response:
def _integrate_mpwrap(ts_and_pks, integrate, fopts):
"""
Take a zipped timeseries and peaks found in it
and integrate it to return peaks. Used to allow
multiprocessing support.
"""
ts, tpks = ts_and_pks
pks = integrate(ts, tpks, **fopts)
# for p in pks:
# p.info['mz'] = str(ts.name)
return pks |
def get_key(self, key):
"""
Return a rich object for the given key. For instance, if
a hash key is requested, then a :py:class:`Hash` will be
returned.
:param str key: Key to retrieve.
:returns: A hash, set, list, zset or array.
"""
return self.__mapping.get(self.type(key), self.__getitem__)(key) | Return a rich object for the given key. For instance, if
a hash key is requested, then a :py:class:`Hash` will be
returned.
:param str key: Key to retrieve.
:returns: A hash, set, list, zset or array. | Below is the the instruction that describes the task:
### Input:
Return a rich object for the given key. For instance, if
a hash key is requested, then a :py:class:`Hash` will be
returned.
:param str key: Key to retrieve.
:returns: A hash, set, list, zset or array.
### Response:
def get_key(self, key):
"""
Return a rich object for the given key. For instance, if
a hash key is requested, then a :py:class:`Hash` will be
returned.
:param str key: Key to retrieve.
:returns: A hash, set, list, zset or array.
"""
return self.__mapping.get(self.type(key), self.__getitem__)(key) |
def send_handle_get_request(self, handle, indices=None):
'''
Send a HTTP GET request to the handle server to read either an entire
handle or to some specified values from a handle record, using the
requests module.
:param handle: The handle.
:param indices: Optional. A list of indices to delete. Defaults to
None (i.e. the entire handle is deleted.). The list can contain
integers or strings.
:return: The server's response.
'''
# Assemble required info:
url = self.make_handle_URL(handle, indices)
LOGGER.debug('GET Request to '+url)
head = self.__get_headers('GET')
veri = self.__HTTPS_verify
# Send the request
if self.__cert_needed_for_get_request():
# If this is the first request and the connector uses client cert authentication, we need to send the cert along
# in the first request that builds the session.
resp = self.__session.get(url, headers=head, verify=veri, cert=self.__cert_object)
else:
# Normal case:
resp = self.__session.get(url, headers=head, verify=veri)
# Log and return
self.__log_request_response_to_file(
logger=REQUESTLOGGER,
op='GET',
handle=handle,
url=url,
headers=head,
verify=veri,
resp=resp
)
self.__first_request = False
return resp | Send a HTTP GET request to the handle server to read either an entire
handle or to some specified values from a handle record, using the
requests module.
:param handle: The handle.
:param indices: Optional. A list of indices to delete. Defaults to
None (i.e. the entire handle is deleted.). The list can contain
integers or strings.
:return: The server's response. | Below is the the instruction that describes the task:
### Input:
Send a HTTP GET request to the handle server to read either an entire
handle or to some specified values from a handle record, using the
requests module.
:param handle: The handle.
:param indices: Optional. A list of indices to delete. Defaults to
None (i.e. the entire handle is deleted.). The list can contain
integers or strings.
:return: The server's response.
### Response:
def send_handle_get_request(self, handle, indices=None):
'''
Send a HTTP GET request to the handle server to read either an entire
handle or to some specified values from a handle record, using the
requests module.
:param handle: The handle.
:param indices: Optional. A list of indices to delete. Defaults to
None (i.e. the entire handle is deleted.). The list can contain
integers or strings.
:return: The server's response.
'''
# Assemble required info:
url = self.make_handle_URL(handle, indices)
LOGGER.debug('GET Request to '+url)
head = self.__get_headers('GET')
veri = self.__HTTPS_verify
# Send the request
if self.__cert_needed_for_get_request():
# If this is the first request and the connector uses client cert authentication, we need to send the cert along
# in the first request that builds the session.
resp = self.__session.get(url, headers=head, verify=veri, cert=self.__cert_object)
else:
# Normal case:
resp = self.__session.get(url, headers=head, verify=veri)
# Log and return
self.__log_request_response_to_file(
logger=REQUESTLOGGER,
op='GET',
handle=handle,
url=url,
headers=head,
verify=veri,
resp=resp
)
self.__first_request = False
return resp |
def init(cls, *args, **kwargs):
"""Initialize the config like as you would a regular dict."""
instance = cls()
instance._values.update(dict(*args, **kwargs))
return instance | Initialize the config like as you would a regular dict. | Below is the the instruction that describes the task:
### Input:
Initialize the config like as you would a regular dict.
### Response:
def init(cls, *args, **kwargs):
"""Initialize the config like as you would a regular dict."""
instance = cls()
instance._values.update(dict(*args, **kwargs))
return instance |
def is_installed(pkg_name):
"""
Check if a package is installed.
"""
with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
res = run("dpkg -s %(pkg_name)s" % locals())
for line in res.splitlines():
if line.startswith("Status: "):
status = line[8:]
if "installed" in status.split(' '):
return True
return False | Check if a package is installed. | Below is the the instruction that describes the task:
### Input:
Check if a package is installed.
### Response:
def is_installed(pkg_name):
"""
Check if a package is installed.
"""
with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
res = run("dpkg -s %(pkg_name)s" % locals())
for line in res.splitlines():
if line.startswith("Status: "):
status = line[8:]
if "installed" in status.split(' '):
return True
return False |
async def terminateInstance(self, *args, **kwargs):
"""
Terminate an instance
Terminate an instance in a specified region
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["terminateInstance"], *args, **kwargs) | Terminate an instance
Terminate an instance in a specified region
This method is ``experimental`` | Below is the the instruction that describes the task:
### Input:
Terminate an instance
Terminate an instance in a specified region
This method is ``experimental``
### Response:
async def terminateInstance(self, *args, **kwargs):
"""
Terminate an instance
Terminate an instance in a specified region
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["terminateInstance"], *args, **kwargs) |
def event_trigger(self, event_type):
"""Returns a callback that creates events.
Returned callback function will add an event of type event_type
to a queue which will be checked the next time an event is requested."""
def callback(**kwargs):
self.queued_events.append(event_type(**kwargs))
return callback | Returns a callback that creates events.
Returned callback function will add an event of type event_type
to a queue which will be checked the next time an event is requested. | Below is the the instruction that describes the task:
### Input:
Returns a callback that creates events.
Returned callback function will add an event of type event_type
to a queue which will be checked the next time an event is requested.
### Response:
def event_trigger(self, event_type):
"""Returns a callback that creates events.
Returned callback function will add an event of type event_type
to a queue which will be checked the next time an event is requested."""
def callback(**kwargs):
self.queued_events.append(event_type(**kwargs))
return callback |
def clear(self, asset_manager_id, book_ids=None):
""" This method deletes all the data for an asset_manager_id
and option book_ids.
It should be used with extreme caution. In production it
is almost always better to Inactivate rather than delete. """
self.logger.info('Clear Transactions & Positions - Asset Manager: %s', asset_manager_id)
url = '%s/clear/%s' % (self.endpoint, asset_manager_id)
params = {'asset_manager_ids': ','.join(book_ids)} if book_ids else {}
response = self.session.delete(url, params=params)
if response.ok:
tran_count = response.json().get('transaction_count', 'Unknown')
self.logger.info('Deleted %s Transactions.', tran_count)
pos_count = response.json().get('position_count', 'Unknown')
self.logger.info('Deleted %s Positions.', pos_count)
return response.json()
else:
self.logger.error(response.text)
response.raise_for_status() | This method deletes all the data for an asset_manager_id
and option book_ids.
It should be used with extreme caution. In production it
is almost always better to Inactivate rather than delete. | Below is the the instruction that describes the task:
### Input:
This method deletes all the data for an asset_manager_id
and option book_ids.
It should be used with extreme caution. In production it
is almost always better to Inactivate rather than delete.
### Response:
def clear(self, asset_manager_id, book_ids=None):
""" This method deletes all the data for an asset_manager_id
and option book_ids.
It should be used with extreme caution. In production it
is almost always better to Inactivate rather than delete. """
self.logger.info('Clear Transactions & Positions - Asset Manager: %s', asset_manager_id)
url = '%s/clear/%s' % (self.endpoint, asset_manager_id)
params = {'asset_manager_ids': ','.join(book_ids)} if book_ids else {}
response = self.session.delete(url, params=params)
if response.ok:
tran_count = response.json().get('transaction_count', 'Unknown')
self.logger.info('Deleted %s Transactions.', tran_count)
pos_count = response.json().get('position_count', 'Unknown')
self.logger.info('Deleted %s Positions.', pos_count)
return response.json()
else:
self.logger.error(response.text)
response.raise_for_status() |
def fmt_latex_output(hyps: Sequence[Sequence[str]],
refs: Sequence[Sequence[str]],
prefixes: Sequence[str],
out_fn: Path,
) -> None:
""" Output the hypotheses and references to a LaTeX source file for
pretty printing.
"""
alignments_ = [min_edit_distance_align(ref, hyp)
for hyp, ref in zip(hyps, refs)]
with out_fn.open("w") as out_f:
print(latex_header(), file=out_f)
print("\\begin{document}\n"
"\\begin{longtable}{ll}", file=out_f)
print(r"\toprule", file=out_f)
for sent in zip(prefixes, alignments_):
prefix = sent[0]
alignments = sent[1:]
print("Utterance ID: &", prefix.strip().replace(r"_", r"\_"), r"\\", file=out_f)
for i, alignment in enumerate(alignments):
ref_list = []
hyp_list = []
for arrow in alignment:
if arrow[0] == arrow[1]:
# Then don't highlight it; it's correct.
ref_list.append(arrow[0])
hyp_list.append(arrow[1])
else:
# Then highlight the errors.
ref_list.append("\\hl{%s}" % arrow[0])
hyp_list.append("\\hl{%s}" % arrow[1])
print("Ref: &", "".join(ref_list), r"\\", file=out_f)
print("Hyp: &", "".join(hyp_list), r"\\", file=out_f)
print(r"\midrule", file=out_f)
print(r"\end{longtable}", file=out_f)
print(r"\end{document}", file=out_f) | Output the hypotheses and references to a LaTeX source file for
pretty printing. | Below is the the instruction that describes the task:
### Input:
Output the hypotheses and references to a LaTeX source file for
pretty printing.
### Response:
def fmt_latex_output(hyps: Sequence[Sequence[str]],
refs: Sequence[Sequence[str]],
prefixes: Sequence[str],
out_fn: Path,
) -> None:
""" Output the hypotheses and references to a LaTeX source file for
pretty printing.
"""
alignments_ = [min_edit_distance_align(ref, hyp)
for hyp, ref in zip(hyps, refs)]
with out_fn.open("w") as out_f:
print(latex_header(), file=out_f)
print("\\begin{document}\n"
"\\begin{longtable}{ll}", file=out_f)
print(r"\toprule", file=out_f)
for sent in zip(prefixes, alignments_):
prefix = sent[0]
alignments = sent[1:]
print("Utterance ID: &", prefix.strip().replace(r"_", r"\_"), r"\\", file=out_f)
for i, alignment in enumerate(alignments):
ref_list = []
hyp_list = []
for arrow in alignment:
if arrow[0] == arrow[1]:
# Then don't highlight it; it's correct.
ref_list.append(arrow[0])
hyp_list.append(arrow[1])
else:
# Then highlight the errors.
ref_list.append("\\hl{%s}" % arrow[0])
hyp_list.append("\\hl{%s}" % arrow[1])
print("Ref: &", "".join(ref_list), r"\\", file=out_f)
print("Hyp: &", "".join(hyp_list), r"\\", file=out_f)
print(r"\midrule", file=out_f)
print(r"\end{longtable}", file=out_f)
print(r"\end{document}", file=out_f) |
def fetch_by_url(self, url):
"""
Gets service for given ``url`` from mongodb storage.
"""
service = self.collection.find_one({'url': url})
if not service:
raise ServiceNotFound
return Service(service) | Gets service for given ``url`` from mongodb storage. | Below is the the instruction that describes the task:
### Input:
Gets service for given ``url`` from mongodb storage.
### Response:
def fetch_by_url(self, url):
"""
Gets service for given ``url`` from mongodb storage.
"""
service = self.collection.find_one({'url': url})
if not service:
raise ServiceNotFound
return Service(service) |
def cumulative_value(self, slip, mmax, mag_value, bbar, dbar, beta):
'''
Returns the rate of events with M > mag_value
:param float slip:
Slip rate in mm/yr
:param float mmax:
Maximum magnitude
:param float mag_value:
Magnitude value
:param float bbar:
\bar{b} parameter (effectively = b * log(10.))
:param float dbar:
\bar{d} parameter
:param float beta:
Beta value of formula defined in Eq. 20 of Anderson & Luco (1983)
'''
delta_m = mmax - mag_value
a_3 = self._get_a3_value(bbar, dbar, slip / 10., beta, mmax)
central_term = np.exp(bbar * delta_m) - 1.0 - (bbar * delta_m)
return a_3 * central_term * (delta_m > 0.0) | Returns the rate of events with M > mag_value
:param float slip:
Slip rate in mm/yr
:param float mmax:
Maximum magnitude
:param float mag_value:
Magnitude value
:param float bbar:
\bar{b} parameter (effectively = b * log(10.))
:param float dbar:
\bar{d} parameter
:param float beta:
Beta value of formula defined in Eq. 20 of Anderson & Luco (1983) | Below is the the instruction that describes the task:
### Input:
Returns the rate of events with M > mag_value
:param float slip:
Slip rate in mm/yr
:param float mmax:
Maximum magnitude
:param float mag_value:
Magnitude value
:param float bbar:
\bar{b} parameter (effectively = b * log(10.))
:param float dbar:
\bar{d} parameter
:param float beta:
Beta value of formula defined in Eq. 20 of Anderson & Luco (1983)
### Response:
def cumulative_value(self, slip, mmax, mag_value, bbar, dbar, beta):
'''
Returns the rate of events with M > mag_value
:param float slip:
Slip rate in mm/yr
:param float mmax:
Maximum magnitude
:param float mag_value:
Magnitude value
:param float bbar:
\bar{b} parameter (effectively = b * log(10.))
:param float dbar:
\bar{d} parameter
:param float beta:
Beta value of formula defined in Eq. 20 of Anderson & Luco (1983)
'''
delta_m = mmax - mag_value
a_3 = self._get_a3_value(bbar, dbar, slip / 10., beta, mmax)
central_term = np.exp(bbar * delta_m) - 1.0 - (bbar * delta_m)
return a_3 * central_term * (delta_m > 0.0) |
def compute_precession(jd_tdb):
"""Return the rotation matrices for precessing to an array of epochs.
`jd_tdb` - array of TDB Julian dates
The array returned has the shape `(3, 3, n)` where `n` is the number
of dates that have been provided as input.
"""
eps0 = 84381.406
# 't' is time in TDB centuries.
t = (jd_tdb - T0) / 36525.0
# Numerical coefficients of psi_a, omega_a, and chi_a, along with
# epsilon_0, the obliquity at J2000.0, are 4-angle formulation from
# Capitaine et al. (2003), eqs. (4), (37), & (39).
psia = ((((- 0.0000000951 * t
+ 0.000132851 ) * t
- 0.00114045 ) * t
- 1.0790069 ) * t
+ 5038.481507 ) * t
omegaa = ((((+ 0.0000003337 * t
- 0.000000467 ) * t
- 0.00772503 ) * t
+ 0.0512623 ) * t
- 0.025754 ) * t + eps0
chia = ((((- 0.0000000560 * t
+ 0.000170663 ) * t
- 0.00121197 ) * t
- 2.3814292 ) * t
+ 10.556403 ) * t
eps0 = eps0 * ASEC2RAD
psia = psia * ASEC2RAD
omegaa = omegaa * ASEC2RAD
chia = chia * ASEC2RAD
sa = sin(eps0)
ca = cos(eps0)
sb = sin(-psia)
cb = cos(-psia)
sc = sin(-omegaa)
cc = cos(-omegaa)
sd = sin(chia)
cd = cos(chia)
# Compute elements of precession rotation matrix equivalent to
# R3(chi_a) R1(-omega_a) R3(-psi_a) R1(epsilon_0).
rot3 = array(((cd * cb - sb * sd * cc,
cd * sb * ca + sd * cc * cb * ca - sa * sd * sc,
cd * sb * sa + sd * cc * cb * sa + ca * sd * sc),
(-sd * cb - sb * cd * cc,
-sd * sb * ca + cd * cc * cb * ca - sa * cd * sc,
-sd * sb * sa + cd * cc * cb * sa + ca * cd * sc),
(sb * sc,
-sc * cb * ca - sa * cc,
-sc * cb * sa + cc * ca)))
return rot3 | Return the rotation matrices for precessing to an array of epochs.
`jd_tdb` - array of TDB Julian dates
The array returned has the shape `(3, 3, n)` where `n` is the number
of dates that have been provided as input. | Below is the the instruction that describes the task:
### Input:
Return the rotation matrices for precessing to an array of epochs.
`jd_tdb` - array of TDB Julian dates
The array returned has the shape `(3, 3, n)` where `n` is the number
of dates that have been provided as input.
### Response:
def compute_precession(jd_tdb):
"""Return the rotation matrices for precessing to an array of epochs.
`jd_tdb` - array of TDB Julian dates
The array returned has the shape `(3, 3, n)` where `n` is the number
of dates that have been provided as input.
"""
eps0 = 84381.406
# 't' is time in TDB centuries.
t = (jd_tdb - T0) / 36525.0
# Numerical coefficients of psi_a, omega_a, and chi_a, along with
# epsilon_0, the obliquity at J2000.0, are 4-angle formulation from
# Capitaine et al. (2003), eqs. (4), (37), & (39).
psia = ((((- 0.0000000951 * t
+ 0.000132851 ) * t
- 0.00114045 ) * t
- 1.0790069 ) * t
+ 5038.481507 ) * t
omegaa = ((((+ 0.0000003337 * t
- 0.000000467 ) * t
- 0.00772503 ) * t
+ 0.0512623 ) * t
- 0.025754 ) * t + eps0
chia = ((((- 0.0000000560 * t
+ 0.000170663 ) * t
- 0.00121197 ) * t
- 2.3814292 ) * t
+ 10.556403 ) * t
eps0 = eps0 * ASEC2RAD
psia = psia * ASEC2RAD
omegaa = omegaa * ASEC2RAD
chia = chia * ASEC2RAD
sa = sin(eps0)
ca = cos(eps0)
sb = sin(-psia)
cb = cos(-psia)
sc = sin(-omegaa)
cc = cos(-omegaa)
sd = sin(chia)
cd = cos(chia)
# Compute elements of precession rotation matrix equivalent to
# R3(chi_a) R1(-omega_a) R3(-psi_a) R1(epsilon_0).
rot3 = array(((cd * cb - sb * sd * cc,
cd * sb * ca + sd * cc * cb * ca - sa * sd * sc,
cd * sb * sa + sd * cc * cb * sa + ca * sd * sc),
(-sd * cb - sb * cd * cc,
-sd * sb * ca + cd * cc * cb * ca - sa * cd * sc,
-sd * sb * sa + cd * cc * cb * sa + ca * cd * sc),
(sb * sc,
-sc * cb * ca - sa * cc,
-sc * cb * sa + cc * ca)))
return rot3 |
def rhol(self):
r'''Liquid-phase mass density of the chemical at its current
temperature and pressure, in units of [kg/m^3]. For calculation of this
property at other temperatures and pressures, or specifying manually
the method used to calculate it, and more - see the object oriented
interface :obj:`thermo.volume.VolumeLiquid`; each Chemical instance
creates one to actually perform the calculations. Note that that
interface provides output in molar units.
Examples
--------
>>> Chemical('o-xylene', T=297).rhol
876.9946785618097
'''
Vml = self.Vml
if Vml:
return Vm_to_rho(Vml, self.MW)
return None | r'''Liquid-phase mass density of the chemical at its current
temperature and pressure, in units of [kg/m^3]. For calculation of this
property at other temperatures and pressures, or specifying manually
the method used to calculate it, and more - see the object oriented
interface :obj:`thermo.volume.VolumeLiquid`; each Chemical instance
creates one to actually perform the calculations. Note that that
interface provides output in molar units.
Examples
--------
>>> Chemical('o-xylene', T=297).rhol
876.9946785618097 | Below is the the instruction that describes the task:
### Input:
r'''Liquid-phase mass density of the chemical at its current
temperature and pressure, in units of [kg/m^3]. For calculation of this
property at other temperatures and pressures, or specifying manually
the method used to calculate it, and more - see the object oriented
interface :obj:`thermo.volume.VolumeLiquid`; each Chemical instance
creates one to actually perform the calculations. Note that that
interface provides output in molar units.
Examples
--------
>>> Chemical('o-xylene', T=297).rhol
876.9946785618097
### Response:
def rhol(self):
r'''Liquid-phase mass density of the chemical at its current
temperature and pressure, in units of [kg/m^3]. For calculation of this
property at other temperatures and pressures, or specifying manually
the method used to calculate it, and more - see the object oriented
interface :obj:`thermo.volume.VolumeLiquid`; each Chemical instance
creates one to actually perform the calculations. Note that that
interface provides output in molar units.
Examples
--------
>>> Chemical('o-xylene', T=297).rhol
876.9946785618097
'''
Vml = self.Vml
if Vml:
return Vm_to_rho(Vml, self.MW)
return None |
def _filter(self, criteria: Q, db):
"""Recursive function to filter items from dictionary"""
# Filter the dictionary objects based on the filters
negated = criteria.negated
input_db = None
if criteria.connector == criteria.AND:
# Trim database records over successive iterations
# Whatever is left at the end satisfy all criteria (AND)
input_db = db
for child in criteria.children:
if isinstance(child, Q):
input_db = self._filter(child, input_db)
else:
input_db = self.provider._evaluate_lookup(child[0], child[1],
negated, input_db)
else:
# Grow database records over successive iterations
# Whatever is left at the end satisfy any criteria (OR)
input_db = {}
for child in criteria.children:
if isinstance(child, Q):
results = self._filter(child, db)
else:
results = self.provider._evaluate_lookup(child[0], child[1], negated, db)
input_db = {**input_db, **results}
return input_db | Recursive function to filter items from dictionary | Below is the the instruction that describes the task:
### Input:
Recursive function to filter items from dictionary
### Response:
def _filter(self, criteria: Q, db):
"""Recursive function to filter items from dictionary"""
# Filter the dictionary objects based on the filters
negated = criteria.negated
input_db = None
if criteria.connector == criteria.AND:
# Trim database records over successive iterations
# Whatever is left at the end satisfy all criteria (AND)
input_db = db
for child in criteria.children:
if isinstance(child, Q):
input_db = self._filter(child, input_db)
else:
input_db = self.provider._evaluate_lookup(child[0], child[1],
negated, input_db)
else:
# Grow database records over successive iterations
# Whatever is left at the end satisfy any criteria (OR)
input_db = {}
for child in criteria.children:
if isinstance(child, Q):
results = self._filter(child, db)
else:
results = self.provider._evaluate_lookup(child[0], child[1], negated, db)
input_db = {**input_db, **results}
return input_db |
def to_dict(self):
"""
Returns a dict representation of this instance suitable for
conversion to YAML.
"""
return {
'model_type': 'segmented_regression',
'name': self.name,
'segmentation_col': self.segmentation_col,
'fit_filters': self.fit_filters,
'predict_filters': self.predict_filters,
'min_segment_size': self.min_segment_size,
'default_config': {
'model_expression': self.default_model_expr,
'ytransform': YTRANSFORM_MAPPING[self.default_ytransform]
},
'fitted': self.fitted,
'models': {
yamlio.to_scalar_safe(name):
self._process_model_dict(m.to_dict())
for name, m in self._group.models.items()}
} | Returns a dict representation of this instance suitable for
conversion to YAML. | Below is the the instruction that describes the task:
### Input:
Returns a dict representation of this instance suitable for
conversion to YAML.
### Response:
def to_dict(self):
"""
Returns a dict representation of this instance suitable for
conversion to YAML.
"""
return {
'model_type': 'segmented_regression',
'name': self.name,
'segmentation_col': self.segmentation_col,
'fit_filters': self.fit_filters,
'predict_filters': self.predict_filters,
'min_segment_size': self.min_segment_size,
'default_config': {
'model_expression': self.default_model_expr,
'ytransform': YTRANSFORM_MAPPING[self.default_ytransform]
},
'fitted': self.fitted,
'models': {
yamlio.to_scalar_safe(name):
self._process_model_dict(m.to_dict())
for name, m in self._group.models.items()}
} |
def company_prefix(self):
"""Return the identifier's company prefix part."""
offset = self.EXTRA_DIGITS
return self._id[offset:self._ref_idx] | Return the identifier's company prefix part. | Below is the the instruction that describes the task:
### Input:
Return the identifier's company prefix part.
### Response:
def company_prefix(self):
"""Return the identifier's company prefix part."""
offset = self.EXTRA_DIGITS
return self._id[offset:self._ref_idx] |
def str_dict(some_dict):
"""Convert dict of ascii str/unicode to dict of str, if necessary"""
return {str(k): str(v) for k, v in some_dict.items()} | Convert dict of ascii str/unicode to dict of str, if necessary | Below is the the instruction that describes the task:
### Input:
Convert dict of ascii str/unicode to dict of str, if necessary
### Response:
def str_dict(some_dict):
"""Convert dict of ascii str/unicode to dict of str, if necessary"""
return {str(k): str(v) for k, v in some_dict.items()} |
def get_kwargs(self):
"""Return kwargs from attached attributes."""
return {k: v for k, v in vars(self).items() if k not in self._ignored} | Return kwargs from attached attributes. | Below is the the instruction that describes the task:
### Input:
Return kwargs from attached attributes.
### Response:
def get_kwargs(self):
"""Return kwargs from attached attributes."""
return {k: v for k, v in vars(self).items() if k not in self._ignored} |
def wm_preferences(name,
user=None,
action_double_click_titlebar=None,
action_middle_click_titlebar=None,
action_right_click_titlebar=None,
application_based=None,
audible_bell=None,
auto_raise=None,
auto_raise_delay=None,
button_layout=None,
disable_workarounds=None,
focus_mode=None,
focus_new_windows=None,
mouse_button_modifier=None,
num_workspaces=None,
raise_on_click=None,
resize_with_right_button=None,
theme=None,
titlebar_font=None,
titlebar_uses_system_font=None,
visual_bell=None,
visual_bell_type=None,
workspace_names=None,
**kwargs):
'''
wm_preferences: sets values in the org.gnome.desktop.wm.preferences schema
'''
gnome_kwargs = {
'user': user,
'schema': 'org.gnome.desktop.wm.preferences'
}
preferences = ['action_double_click_titlebar',
'action_middle_click_titlebar', 'action_right_click_titlebar',
'application_based', 'audible_bell', 'auto_raise',
'auto_raise_delay', 'button_layout', 'disable_workarounds',
'focus_mode', 'focus_new_windows', 'mouse_button_modifier',
'num_workspaces', 'raise_on_click', 'resize_with_right_button',
'theme', 'titlebar_font', 'titlebar_uses_system_font',
'visual_bell', 'visual_bell_type', 'workspace_names']
preferences_hash = {}
for pref in preferences:
if pref in locals() and locals()[pref] is not None:
key = re.sub('_', '-', pref)
preferences_hash[key] = locals()[pref]
return _do(name, gnome_kwargs, preferences_hash) | wm_preferences: sets values in the org.gnome.desktop.wm.preferences schema | Below is the the instruction that describes the task:
### Input:
wm_preferences: sets values in the org.gnome.desktop.wm.preferences schema
### Response:
def wm_preferences(name,
user=None,
action_double_click_titlebar=None,
action_middle_click_titlebar=None,
action_right_click_titlebar=None,
application_based=None,
audible_bell=None,
auto_raise=None,
auto_raise_delay=None,
button_layout=None,
disable_workarounds=None,
focus_mode=None,
focus_new_windows=None,
mouse_button_modifier=None,
num_workspaces=None,
raise_on_click=None,
resize_with_right_button=None,
theme=None,
titlebar_font=None,
titlebar_uses_system_font=None,
visual_bell=None,
visual_bell_type=None,
workspace_names=None,
**kwargs):
'''
wm_preferences: sets values in the org.gnome.desktop.wm.preferences schema
'''
gnome_kwargs = {
'user': user,
'schema': 'org.gnome.desktop.wm.preferences'
}
preferences = ['action_double_click_titlebar',
'action_middle_click_titlebar', 'action_right_click_titlebar',
'application_based', 'audible_bell', 'auto_raise',
'auto_raise_delay', 'button_layout', 'disable_workarounds',
'focus_mode', 'focus_new_windows', 'mouse_button_modifier',
'num_workspaces', 'raise_on_click', 'resize_with_right_button',
'theme', 'titlebar_font', 'titlebar_uses_system_font',
'visual_bell', 'visual_bell_type', 'workspace_names']
preferences_hash = {}
for pref in preferences:
if pref in locals() and locals()[pref] is not None:
key = re.sub('_', '-', pref)
preferences_hash[key] = locals()[pref]
return _do(name, gnome_kwargs, preferences_hash) |
def _adaptive(self, gamma=1.0, relative_tolerance=1.0e-8, maximum_iterations=1000, verbose=True, print_warning=True):
"""
Determine dimensionless free energies by a combination of Newton-Raphson iteration and self-consistent iteration.
Picks whichever method gives the lowest gradient.
Is slower than NR (approximated, not calculated) since it calculates the log norms twice each iteration.
OPTIONAL ARGUMENTS
gamma (float between 0 and 1) - incrementor for NR iterations.
relative_tolerance (float between 0 and 1) - relative tolerance for convergence (default 1.0e-6)
maximum_iterations (int) - maximum number of Newton-Raphson iterations (default 1000)
verbose (boolean) - verbosity level for debug output
NOTES
This method determines the dimensionless free energies by minimizing a convex function whose solution is the desired estimator.
The original idea came from the construction of a likelihood function that independently reproduced the work of Geyer (see [1]
and Section 6 of [2]).
This can alternatively be formulated as a root-finding algorithm for the Z-estimator.
More details of this procedure will follow in a subsequent paper.
Only those states with nonzero counts are include in the estimation procedure.
REFERENCES
See Appendix C.2 of [1].
"""
if verbose:
print("Determining dimensionless free energies by Newton-Raphson iteration.")
# keep track of Newton-Raphson and self-consistent iterations
nr_iter = 0
sci_iter = 0
N_k = self.N_k[self.states_with_samples]
K = len(N_k)
f_k_sci = np.zeros([K], dtype=np.float64)
f_k_new = np.zeros([K], dtype=np.float64)
# Perform Newton-Raphson iterations (with sci computed on the way)
for iteration in range(0, maximum_iterations):
# Store for new estimate of dimensionless relative free energies.
f_k = self.f_k[self.states_with_samples].copy()
# compute weights for gradients: the denominators and free energies are from the previous
# iteration in most cases.
(W_nk, f_k_sci) = self._computeWeights(
recalc_denom=(iteration == 0), return_f_k = True)
# Compute gradient and Hessian of last (K-1) states.
#
# gradient (defined by Eq. C6 of [1])
# g_i(theta) = N_i - \sum_n N_i W_ni
#
# Hessian (defined by Eq. C9 of [1])
# H_ii(theta) = - \sum_n N_i W_ni (1 - N_i W_ni)
# H_ij(theta) = \sum_n N_i W_ni N_j W_nj
#
"""
g = np.matrix(np.zeros([K-1,1], dtype=np.float64)) # gradient
H = np.matrix(np.zeros([K-1,K-1], dtype=np.float64)) # Hessian
for i in range(1,K):
g[i-1] = N_k[i] - N_k[i] * W_nk[:,i].sum()
H[i-1,i-1] = - (N_k[i] * W_nk[:,i] * (1.0 - N_k[i] * W_nk[:,i])).sum()
for j in range(1,i):
H[i-1,j-1] = (N_k[i] * W_nk[:,i] * N_k[j] * W_nk[:,j]).sum()
H[j-1,i-1] = H[i-1,j-1]
# Update the free energy estimate (Eq. C11 of [1]).
Hinvg = linalg.lstsq(H,g)[0] #
# Hinvg = linalg.solve(H,g) # This might be faster if we can guarantee full rank.
for k in range(0,K-1):
f_k_new[k+1] = f_k[k+1] - gamma*Hinvg[k]
"""
g = N_k - N_k * W_nk.sum(axis=0)
NW = N_k * W_nk
H = np.dot(NW.T, NW)
H += (g.T - N_k) * np.eye(K)
# Update the free energy estimate (Eq. C11 of [1]).
# will always have lower rank the way it is set up
Hinvg = linalg.lstsq(H, g)[0]
Hinvg -= Hinvg[0]
f_k_new = f_k - gamma * Hinvg
# self-consistent iteration gradient norm and saved log sums.
g_sci = self._gradientF(f_k_sci)
gnorm_sci = np.dot(g_sci, g_sci)
# save this so we can switch it back in if g_sci is lower.
log_weight_denom = self.log_weight_denom.copy()
# newton raphson gradient norm and saved log sums.
g_nr = self._gradientF(f_k_new)
gnorm_nr = np.dot(g_nr, g_nr)
# we could save the gradient, too, but it's not too expensive to
# compute since we are doing the Hessian anyway.
if verbose:
print("self consistent iteration gradient norm is %10.5g, Newton-Raphson gradient norm is %10.5g" % (gnorm_sci, gnorm_nr))
# decide which directon to go depending on size of gradient norm
if (gnorm_sci < gnorm_nr or sci_iter < 2):
sci_iter += 1
self.log_weight_denom = log_weight_denom.copy()
if verbose:
if sci_iter < 2:
print("Choosing self-consistent iteration on iteration %d" % iteration)
else:
print("Choosing self-consistent iteration for lower gradient on iteration %d" % iteration)
f_k_new = f_k_sci.copy()
else:
nr_iter += 1
if verbose:
print("Newton-Raphson used on iteration %d" % iteration)
# get rid of big matrices that are not used.
del(log_weight_denom, NW, W_nk)
# have to set the free energies back in self, since the gradient
# routine changes them.
self.f_k[self.states_with_samples] = f_k
if (self._amIdoneIterating(f_k_new, relative_tolerance, iteration, maximum_iterations, print_warning, verbose)):
if verbose:
print('Of %d iterations, %d were Newton-Raphson iterations and %d were self-consistent iterations' % (iteration + 1, nr_iter, sci_iter))
break
return | Determine dimensionless free energies by a combination of Newton-Raphson iteration and self-consistent iteration.
Picks whichever method gives the lowest gradient.
Is slower than NR (approximated, not calculated) since it calculates the log norms twice each iteration.
OPTIONAL ARGUMENTS
gamma (float between 0 and 1) - incrementor for NR iterations.
relative_tolerance (float between 0 and 1) - relative tolerance for convergence (default 1.0e-6)
maximum_iterations (int) - maximum number of Newton-Raphson iterations (default 1000)
verbose (boolean) - verbosity level for debug output
NOTES
This method determines the dimensionless free energies by minimizing a convex function whose solution is the desired estimator.
The original idea came from the construction of a likelihood function that independently reproduced the work of Geyer (see [1]
and Section 6 of [2]).
This can alternatively be formulated as a root-finding algorithm for the Z-estimator.
More details of this procedure will follow in a subsequent paper.
Only those states with nonzero counts are include in the estimation procedure.
REFERENCES
See Appendix C.2 of [1]. | Below is the the instruction that describes the task:
### Input:
Determine dimensionless free energies by a combination of Newton-Raphson iteration and self-consistent iteration.
Picks whichever method gives the lowest gradient.
Is slower than NR (approximated, not calculated) since it calculates the log norms twice each iteration.
OPTIONAL ARGUMENTS
gamma (float between 0 and 1) - incrementor for NR iterations.
relative_tolerance (float between 0 and 1) - relative tolerance for convergence (default 1.0e-6)
maximum_iterations (int) - maximum number of Newton-Raphson iterations (default 1000)
verbose (boolean) - verbosity level for debug output
NOTES
This method determines the dimensionless free energies by minimizing a convex function whose solution is the desired estimator.
The original idea came from the construction of a likelihood function that independently reproduced the work of Geyer (see [1]
and Section 6 of [2]).
This can alternatively be formulated as a root-finding algorithm for the Z-estimator.
More details of this procedure will follow in a subsequent paper.
Only those states with nonzero counts are include in the estimation procedure.
REFERENCES
See Appendix C.2 of [1].
### Response:
def _adaptive(self, gamma=1.0, relative_tolerance=1.0e-8, maximum_iterations=1000, verbose=True, print_warning=True):
"""
Determine dimensionless free energies by a combination of Newton-Raphson iteration and self-consistent iteration.
Picks whichever method gives the lowest gradient.
Is slower than NR (approximated, not calculated) since it calculates the log norms twice each iteration.
OPTIONAL ARGUMENTS
gamma (float between 0 and 1) - incrementor for NR iterations.
relative_tolerance (float between 0 and 1) - relative tolerance for convergence (default 1.0e-6)
maximum_iterations (int) - maximum number of Newton-Raphson iterations (default 1000)
verbose (boolean) - verbosity level for debug output
NOTES
This method determines the dimensionless free energies by minimizing a convex function whose solution is the desired estimator.
The original idea came from the construction of a likelihood function that independently reproduced the work of Geyer (see [1]
and Section 6 of [2]).
This can alternatively be formulated as a root-finding algorithm for the Z-estimator.
More details of this procedure will follow in a subsequent paper.
Only those states with nonzero counts are include in the estimation procedure.
REFERENCES
See Appendix C.2 of [1].
"""
if verbose:
print("Determining dimensionless free energies by Newton-Raphson iteration.")
# keep track of Newton-Raphson and self-consistent iterations
nr_iter = 0
sci_iter = 0
N_k = self.N_k[self.states_with_samples]
K = len(N_k)
f_k_sci = np.zeros([K], dtype=np.float64)
f_k_new = np.zeros([K], dtype=np.float64)
# Perform Newton-Raphson iterations (with sci computed on the way)
for iteration in range(0, maximum_iterations):
# Store for new estimate of dimensionless relative free energies.
f_k = self.f_k[self.states_with_samples].copy()
# compute weights for gradients: the denominators and free energies are from the previous
# iteration in most cases.
(W_nk, f_k_sci) = self._computeWeights(
recalc_denom=(iteration == 0), return_f_k = True)
# Compute gradient and Hessian of last (K-1) states.
#
# gradient (defined by Eq. C6 of [1])
# g_i(theta) = N_i - \sum_n N_i W_ni
#
# Hessian (defined by Eq. C9 of [1])
# H_ii(theta) = - \sum_n N_i W_ni (1 - N_i W_ni)
# H_ij(theta) = \sum_n N_i W_ni N_j W_nj
#
"""
g = np.matrix(np.zeros([K-1,1], dtype=np.float64)) # gradient
H = np.matrix(np.zeros([K-1,K-1], dtype=np.float64)) # Hessian
for i in range(1,K):
g[i-1] = N_k[i] - N_k[i] * W_nk[:,i].sum()
H[i-1,i-1] = - (N_k[i] * W_nk[:,i] * (1.0 - N_k[i] * W_nk[:,i])).sum()
for j in range(1,i):
H[i-1,j-1] = (N_k[i] * W_nk[:,i] * N_k[j] * W_nk[:,j]).sum()
H[j-1,i-1] = H[i-1,j-1]
# Update the free energy estimate (Eq. C11 of [1]).
Hinvg = linalg.lstsq(H,g)[0] #
# Hinvg = linalg.solve(H,g) # This might be faster if we can guarantee full rank.
for k in range(0,K-1):
f_k_new[k+1] = f_k[k+1] - gamma*Hinvg[k]
"""
g = N_k - N_k * W_nk.sum(axis=0)
NW = N_k * W_nk
H = np.dot(NW.T, NW)
H += (g.T - N_k) * np.eye(K)
# Update the free energy estimate (Eq. C11 of [1]).
# will always have lower rank the way it is set up
Hinvg = linalg.lstsq(H, g)[0]
Hinvg -= Hinvg[0]
f_k_new = f_k - gamma * Hinvg
# self-consistent iteration gradient norm and saved log sums.
g_sci = self._gradientF(f_k_sci)
gnorm_sci = np.dot(g_sci, g_sci)
# save this so we can switch it back in if g_sci is lower.
log_weight_denom = self.log_weight_denom.copy()
# newton raphson gradient norm and saved log sums.
g_nr = self._gradientF(f_k_new)
gnorm_nr = np.dot(g_nr, g_nr)
# we could save the gradient, too, but it's not too expensive to
# compute since we are doing the Hessian anyway.
if verbose:
print("self consistent iteration gradient norm is %10.5g, Newton-Raphson gradient norm is %10.5g" % (gnorm_sci, gnorm_nr))
# decide which directon to go depending on size of gradient norm
if (gnorm_sci < gnorm_nr or sci_iter < 2):
sci_iter += 1
self.log_weight_denom = log_weight_denom.copy()
if verbose:
if sci_iter < 2:
print("Choosing self-consistent iteration on iteration %d" % iteration)
else:
print("Choosing self-consistent iteration for lower gradient on iteration %d" % iteration)
f_k_new = f_k_sci.copy()
else:
nr_iter += 1
if verbose:
print("Newton-Raphson used on iteration %d" % iteration)
# get rid of big matrices that are not used.
del(log_weight_denom, NW, W_nk)
# have to set the free energies back in self, since the gradient
# routine changes them.
self.f_k[self.states_with_samples] = f_k
if (self._amIdoneIterating(f_k_new, relative_tolerance, iteration, maximum_iterations, print_warning, verbose)):
if verbose:
print('Of %d iterations, %d were Newton-Raphson iterations and %d were self-consistent iterations' % (iteration + 1, nr_iter, sci_iter))
break
return |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
return _dict |
def from_file(cls, filename, **kwargs):
"""Create a reddening law from file.
If filename has 'fits' or 'fit' suffix, it is read as FITS.
Otherwise, it is read as ASCII.
Parameters
----------
filename : str
Reddening law filename.
kwargs : dict
Keywords acceptable by
:func:`~synphot.specio.read_fits_spec` (if FITS) or
:func:`~synphot.specio.read_ascii_spec` (if ASCII).
Returns
-------
redlaw : `ReddeningLaw`
Empirical reddening law.
"""
if 'flux_unit' not in kwargs:
kwargs['flux_unit'] = cls._internal_flux_unit
if ((filename.endswith('fits') or filename.endswith('fit')) and
'flux_col' not in kwargs):
kwargs['flux_col'] = 'Av/E(B-V)'
header, wavelengths, rvs = specio.read_spec(filename, **kwargs)
return cls(Empirical1D, points=wavelengths, lookup_table=rvs,
meta={'header': header}) | Create a reddening law from file.
If filename has 'fits' or 'fit' suffix, it is read as FITS.
Otherwise, it is read as ASCII.
Parameters
----------
filename : str
Reddening law filename.
kwargs : dict
Keywords acceptable by
:func:`~synphot.specio.read_fits_spec` (if FITS) or
:func:`~synphot.specio.read_ascii_spec` (if ASCII).
Returns
-------
redlaw : `ReddeningLaw`
Empirical reddening law. | Below is the the instruction that describes the task:
### Input:
Create a reddening law from file.
If filename has 'fits' or 'fit' suffix, it is read as FITS.
Otherwise, it is read as ASCII.
Parameters
----------
filename : str
Reddening law filename.
kwargs : dict
Keywords acceptable by
:func:`~synphot.specio.read_fits_spec` (if FITS) or
:func:`~synphot.specio.read_ascii_spec` (if ASCII).
Returns
-------
redlaw : `ReddeningLaw`
Empirical reddening law.
### Response:
def from_file(cls, filename, **kwargs):
"""Create a reddening law from file.
If filename has 'fits' or 'fit' suffix, it is read as FITS.
Otherwise, it is read as ASCII.
Parameters
----------
filename : str
Reddening law filename.
kwargs : dict
Keywords acceptable by
:func:`~synphot.specio.read_fits_spec` (if FITS) or
:func:`~synphot.specio.read_ascii_spec` (if ASCII).
Returns
-------
redlaw : `ReddeningLaw`
Empirical reddening law.
"""
if 'flux_unit' not in kwargs:
kwargs['flux_unit'] = cls._internal_flux_unit
if ((filename.endswith('fits') or filename.endswith('fit')) and
'flux_col' not in kwargs):
kwargs['flux_col'] = 'Av/E(B-V)'
header, wavelengths, rvs = specio.read_spec(filename, **kwargs)
return cls(Empirical1D, points=wavelengths, lookup_table=rvs,
meta={'header': header}) |
def task_done(self):
'''Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
'''
self._parent._check_closing()
with self._parent._all_tasks_done:
unfinished = self._parent._unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self._parent._all_tasks_done.notify_all()
self._parent._loop.call_soon_threadsafe(
self._parent._finished.set)
self._parent._unfinished_tasks = unfinished | Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue. | Below is the the instruction that describes the task:
### Input:
Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
### Response:
def task_done(self):
'''Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
'''
self._parent._check_closing()
with self._parent._all_tasks_done:
unfinished = self._parent._unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self._parent._all_tasks_done.notify_all()
self._parent._loop.call_soon_threadsafe(
self._parent._finished.set)
self._parent._unfinished_tasks = unfinished |
def _extract_path_parameters_from_paths(paths):
"""
from a list of paths, return back a list of the
arguments present in those paths.
the arguments available in all of the paths must match: if not,
an exception will be raised.
"""
params = set()
for path in paths:
parts = PART_REGEX.split(path)
for p in parts:
match = PARAM_REGEX.match(p)
if match:
params.add(match.group("name"))
return params | from a list of paths, return back a list of the
arguments present in those paths.
the arguments available in all of the paths must match: if not,
an exception will be raised. | Below is the the instruction that describes the task:
### Input:
from a list of paths, return back a list of the
arguments present in those paths.
the arguments available in all of the paths must match: if not,
an exception will be raised.
### Response:
def _extract_path_parameters_from_paths(paths):
"""
from a list of paths, return back a list of the
arguments present in those paths.
the arguments available in all of the paths must match: if not,
an exception will be raised.
"""
params = set()
for path in paths:
parts = PART_REGEX.split(path)
for p in parts:
match = PARAM_REGEX.match(p)
if match:
params.add(match.group("name"))
return params |
def moveSpeed(self, location, seconds=0.3):
""" Moves cursor to specified ``Location`` over ``seconds``.
If ``seconds`` is 0, moves the cursor immediately. Used for smooth
somewhat-human-like motion.
"""
self._lock.acquire()
original_location = mouse.get_position()
mouse.move(location.x, location.y, duration=seconds)
if mouse.get_position() == original_location and original_location != location.getTuple():
raise IOError("""
Unable to move mouse cursor. This may happen if you're trying to automate a
program running as Administrator with a script running as a non-elevated user.
""")
self._lock.release() | Moves cursor to specified ``Location`` over ``seconds``.
If ``seconds`` is 0, moves the cursor immediately. Used for smooth
somewhat-human-like motion. | Below is the the instruction that describes the task:
### Input:
Moves cursor to specified ``Location`` over ``seconds``.
If ``seconds`` is 0, moves the cursor immediately. Used for smooth
somewhat-human-like motion.
### Response:
def moveSpeed(self, location, seconds=0.3):
""" Moves cursor to specified ``Location`` over ``seconds``.
If ``seconds`` is 0, moves the cursor immediately. Used for smooth
somewhat-human-like motion.
"""
self._lock.acquire()
original_location = mouse.get_position()
mouse.move(location.x, location.y, duration=seconds)
if mouse.get_position() == original_location and original_location != location.getTuple():
raise IOError("""
Unable to move mouse cursor. This may happen if you're trying to automate a
program running as Administrator with a script running as a non-elevated user.
""")
self._lock.release() |
def _put_cluster(self, dic, params=None):
"""Change cluster attributes"""
cluster = self._put('', ApiCluster, data=dic, params=params)
self._update(cluster)
return self | Change cluster attributes | Below is the the instruction that describes the task:
### Input:
Change cluster attributes
### Response:
def _put_cluster(self, dic, params=None):
"""Change cluster attributes"""
cluster = self._put('', ApiCluster, data=dic, params=params)
self._update(cluster)
return self |
def __mgmt(name, _type, action):
'''
Perform zone management
'''
# It's permanent because the 4 concerned functions need the permanent option, it's wrong without
cmd = '--{0}-{1}={2} --permanent'.format(action, _type, name)
return __firewall_cmd(cmd) | Perform zone management | Below is the the instruction that describes the task:
### Input:
Perform zone management
### Response:
def __mgmt(name, _type, action):
'''
Perform zone management
'''
# It's permanent because the 4 concerned functions need the permanent option, it's wrong without
cmd = '--{0}-{1}={2} --permanent'.format(action, _type, name)
return __firewall_cmd(cmd) |
def __getitem_slice(self, slce):
"""Return a range which represents the requested slce
of the sequence represented by this range.
"""
scaled_indices = (self._step * n for n in slce.indices(self._len))
start_offset, stop_offset, new_step = scaled_indices
return newrange(self._start + start_offset,
self._start + stop_offset,
new_step) | Return a range which represents the requested slce
of the sequence represented by this range. | Below is the the instruction that describes the task:
### Input:
Return a range which represents the requested slce
of the sequence represented by this range.
### Response:
def __getitem_slice(self, slce):
"""Return a range which represents the requested slce
of the sequence represented by this range.
"""
scaled_indices = (self._step * n for n in slce.indices(self._len))
start_offset, stop_offset, new_step = scaled_indices
return newrange(self._start + start_offset,
self._start + stop_offset,
new_step) |
def keyPressEvent(self, event):
"""
Exits the modal window on an escape press.
:param event | <QtCore.QKeyPressEvent>
"""
if event.key() == QtCore.Qt.Key_Escape:
self.reject()
super(XOverlayWidget, self).keyPressEvent(event) | Exits the modal window on an escape press.
:param event | <QtCore.QKeyPressEvent> | Below is the the instruction that describes the task:
### Input:
Exits the modal window on an escape press.
:param event | <QtCore.QKeyPressEvent>
### Response:
def keyPressEvent(self, event):
"""
Exits the modal window on an escape press.
:param event | <QtCore.QKeyPressEvent>
"""
if event.key() == QtCore.Qt.Key_Escape:
self.reject()
super(XOverlayWidget, self).keyPressEvent(event) |
def find_cycle(self):
"""greedy search for a cycle"""
for node in self.nodes:
cyc = self._follow_children(node)
if len(cyc) > 0:
return [self._nodes[x] for x in cyc]
return None | greedy search for a cycle | Below is the the instruction that describes the task:
### Input:
greedy search for a cycle
### Response:
def find_cycle(self):
"""greedy search for a cycle"""
for node in self.nodes:
cyc = self._follow_children(node)
if len(cyc) > 0:
return [self._nodes[x] for x in cyc]
return None |
def is_mouse_over(self, event):
"""
Check whether a MouseEvent is over thus scroll bar.
:param event: The MouseEvent to check.
:returns: True if the mouse event is over the scroll bar.
"""
return event.x == self._x and self._y <= event.y < self._y + self._height | Check whether a MouseEvent is over thus scroll bar.
:param event: The MouseEvent to check.
:returns: True if the mouse event is over the scroll bar. | Below is the the instruction that describes the task:
### Input:
Check whether a MouseEvent is over thus scroll bar.
:param event: The MouseEvent to check.
:returns: True if the mouse event is over the scroll bar.
### Response:
def is_mouse_over(self, event):
"""
Check whether a MouseEvent is over thus scroll bar.
:param event: The MouseEvent to check.
:returns: True if the mouse event is over the scroll bar.
"""
return event.x == self._x and self._y <= event.y < self._y + self._height |
def margin_to_exchange(self, symbol, currency, amount):
"""
借贷账户划出至现货账户
:param amount:
:param currency:
:param symbol:
:return:
"""
params = {'symbol': symbol, 'currency': currency, 'amount': amount}
path = '/v1/dw/transfer-out/margin'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_post(params, path))
return handle
return _wrapper | 借贷账户划出至现货账户
:param amount:
:param currency:
:param symbol:
:return: | Below is the the instruction that describes the task:
### Input:
借贷账户划出至现货账户
:param amount:
:param currency:
:param symbol:
:return:
### Response:
def margin_to_exchange(self, symbol, currency, amount):
"""
借贷账户划出至现货账户
:param amount:
:param currency:
:param symbol:
:return:
"""
params = {'symbol': symbol, 'currency': currency, 'amount': amount}
path = '/v1/dw/transfer-out/margin'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_post(params, path))
return handle
return _wrapper |
def as_sql(self, qn, connection):
"""
This method identifies joined table aliases in order for
VersionedExtraWhere.as_sql() to be able to add time restrictions for
those tables based on the VersionedQuery's querytime value.
:param qn: In Django 1.7 & 1.8 this is a compiler
:param connection: A DB connection
:return: A tuple consisting of (sql_string, result_params)
"""
# self.children is an array of VersionedExtraWhere-objects
from versions.fields import VersionedExtraWhere
for child in self.children:
if isinstance(child, VersionedExtraWhere) and not child.params:
_query = qn.query
query_time = _query.querytime.time
apply_query_time = _query.querytime.active
alias_map = _query.alias_map
self._set_child_joined_alias(child, alias_map)
if apply_query_time:
# Add query parameters that have not been added till now
child.set_as_of(query_time)
else:
# Remove the restriction if it's not required
child.sqls = []
return super(VersionedWhereNode, self).as_sql(qn, connection) | This method identifies joined table aliases in order for
VersionedExtraWhere.as_sql() to be able to add time restrictions for
those tables based on the VersionedQuery's querytime value.
:param qn: In Django 1.7 & 1.8 this is a compiler
:param connection: A DB connection
:return: A tuple consisting of (sql_string, result_params) | Below is the the instruction that describes the task:
### Input:
This method identifies joined table aliases in order for
VersionedExtraWhere.as_sql() to be able to add time restrictions for
those tables based on the VersionedQuery's querytime value.
:param qn: In Django 1.7 & 1.8 this is a compiler
:param connection: A DB connection
:return: A tuple consisting of (sql_string, result_params)
### Response:
def as_sql(self, qn, connection):
"""
This method identifies joined table aliases in order for
VersionedExtraWhere.as_sql() to be able to add time restrictions for
those tables based on the VersionedQuery's querytime value.
:param qn: In Django 1.7 & 1.8 this is a compiler
:param connection: A DB connection
:return: A tuple consisting of (sql_string, result_params)
"""
# self.children is an array of VersionedExtraWhere-objects
from versions.fields import VersionedExtraWhere
for child in self.children:
if isinstance(child, VersionedExtraWhere) and not child.params:
_query = qn.query
query_time = _query.querytime.time
apply_query_time = _query.querytime.active
alias_map = _query.alias_map
self._set_child_joined_alias(child, alias_map)
if apply_query_time:
# Add query parameters that have not been added till now
child.set_as_of(query_time)
else:
# Remove the restriction if it's not required
child.sqls = []
return super(VersionedWhereNode, self).as_sql(qn, connection) |
def blend(self, clr, factor=0.5):
"""
Returns a mix of two colors.
"""
r = self.r * (1 - factor) + clr.r * factor
g = self.g * (1 - factor) + clr.g * factor
b = self.b * (1 - factor) + clr.b * factor
a = self.a * (1 - factor) + clr.a * factor
return Color(r, g, b, a, mode="rgb") | Returns a mix of two colors. | Below is the the instruction that describes the task:
### Input:
Returns a mix of two colors.
### Response:
def blend(self, clr, factor=0.5):
"""
Returns a mix of two colors.
"""
r = self.r * (1 - factor) + clr.r * factor
g = self.g * (1 - factor) + clr.g * factor
b = self.b * (1 - factor) + clr.b * factor
a = self.a * (1 - factor) + clr.a * factor
return Color(r, g, b, a, mode="rgb") |
def expand_with_style(template, style, data, body_subtree='body'):
"""Expand a data dictionary with a template AND a style.
DEPRECATED -- Remove this entire function in favor of expand(d, style=style)
A style is a Template instance that factors out the common strings in several
"body" templates.
Args:
template: Template instance for the inner "page content"
style: Template instance for the outer "page style"
data: Data dictionary, with a 'body' key (or body_subtree
"""
if template.has_defines:
return template.expand(data, style=style)
else:
tokens = []
execute_with_style_LEGACY(template, style, data, tokens.append,
body_subtree=body_subtree)
return JoinTokens(tokens) | Expand a data dictionary with a template AND a style.
DEPRECATED -- Remove this entire function in favor of expand(d, style=style)
A style is a Template instance that factors out the common strings in several
"body" templates.
Args:
template: Template instance for the inner "page content"
style: Template instance for the outer "page style"
data: Data dictionary, with a 'body' key (or body_subtree | Below is the the instruction that describes the task:
### Input:
Expand a data dictionary with a template AND a style.
DEPRECATED -- Remove this entire function in favor of expand(d, style=style)
A style is a Template instance that factors out the common strings in several
"body" templates.
Args:
template: Template instance for the inner "page content"
style: Template instance for the outer "page style"
data: Data dictionary, with a 'body' key (or body_subtree
### Response:
def expand_with_style(template, style, data, body_subtree='body'):
"""Expand a data dictionary with a template AND a style.
DEPRECATED -- Remove this entire function in favor of expand(d, style=style)
A style is a Template instance that factors out the common strings in several
"body" templates.
Args:
template: Template instance for the inner "page content"
style: Template instance for the outer "page style"
data: Data dictionary, with a 'body' key (or body_subtree
"""
if template.has_defines:
return template.expand(data, style=style)
else:
tokens = []
execute_with_style_LEGACY(template, style, data, tokens.append,
body_subtree=body_subtree)
return JoinTokens(tokens) |
def t_t_eopen(self, t):
r'~"|~\''
if t.value[1] == '"':
t.lexer.push_state('escapequotes')
elif t.value[1] == '\'':
t.lexer.push_state('escapeapostrophe')
return t | r'~"|~\ | Below is the the instruction that describes the task:
### Input:
r'~"|~\
### Response:
def t_t_eopen(self, t):
r'~"|~\''
if t.value[1] == '"':
t.lexer.push_state('escapequotes')
elif t.value[1] == '\'':
t.lexer.push_state('escapeapostrophe')
return t |
def _all_possible_indices(self, column_names):
"""
Create list of tuples containing all possible index groups
we might want to create over tables in this database.
If a set of genome annotations is missing some column we want
to index on, we have to drop any indices which use that column.
A specific table may later drop some of these indices if they're
missing values for that feature or are the same as the table's primary key.
"""
candidate_column_groups = [
['seqname', 'start', 'end'],
['gene_name'],
['gene_id'],
['transcript_id'],
['transcript_name'],
['exon_id'],
['protein_id'],
['ccds_id'],
]
indices = []
column_set = set(column_names)
# Since queries are often restricted by feature type
# we should include that column in combination with all
# other indices we anticipate might improve performance
for column_group in candidate_column_groups:
skip = False
for column_name in column_group:
# some columns, such as 'exon_id',
# are not available in all releases of Ensembl (or
# other GTFs)
if column_name not in column_set:
logger.info(
"Skipping database index for {%s}",
", ".join(column_group))
skip = True
if skip:
continue
indices.append(column_group)
return indices | Create list of tuples containing all possible index groups
we might want to create over tables in this database.
If a set of genome annotations is missing some column we want
to index on, we have to drop any indices which use that column.
A specific table may later drop some of these indices if they're
missing values for that feature or are the same as the table's primary key. | Below is the the instruction that describes the task:
### Input:
Create list of tuples containing all possible index groups
we might want to create over tables in this database.
If a set of genome annotations is missing some column we want
to index on, we have to drop any indices which use that column.
A specific table may later drop some of these indices if they're
missing values for that feature or are the same as the table's primary key.
### Response:
def _all_possible_indices(self, column_names):
"""
Create list of tuples containing all possible index groups
we might want to create over tables in this database.
If a set of genome annotations is missing some column we want
to index on, we have to drop any indices which use that column.
A specific table may later drop some of these indices if they're
missing values for that feature or are the same as the table's primary key.
"""
candidate_column_groups = [
['seqname', 'start', 'end'],
['gene_name'],
['gene_id'],
['transcript_id'],
['transcript_name'],
['exon_id'],
['protein_id'],
['ccds_id'],
]
indices = []
column_set = set(column_names)
# Since queries are often restricted by feature type
# we should include that column in combination with all
# other indices we anticipate might improve performance
for column_group in candidate_column_groups:
skip = False
for column_name in column_group:
# some columns, such as 'exon_id',
# are not available in all releases of Ensembl (or
# other GTFs)
if column_name not in column_set:
logger.info(
"Skipping database index for {%s}",
", ".join(column_group))
skip = True
if skip:
continue
indices.append(column_group)
return indices |
def statements(self):
'''Return a list of statements
This is done by joining together any rows that
have continuations
'''
# FIXME: no need to do this every time; we should cache the
# result
if len(self.rows) == 0:
return []
current_statement = Statement(self.rows[0])
current_statement.startline = self.rows[0].linenumber
current_statement.endline = self.rows[0].linenumber
statements = []
for row in self.rows[1:]:
if len(row) > 0 and row[0] == "...":
# we found a continuation
current_statement += row[1:]
current_statement.endline = row.linenumber
else:
if len(current_statement) > 0:
# append current statement to the list of statements...
statements.append(current_statement)
# start a new statement
current_statement = Statement(row)
current_statement.startline = row.linenumber
current_statement.endline = row.linenumber
if len(current_statement) > 0:
statements.append(current_statement)
# trim trailing blank statements
while (len(statements[-1]) == 0 or
((len(statements[-1]) == 1) and len(statements[-1][0]) == 0)):
statements.pop()
return statements | Return a list of statements
This is done by joining together any rows that
have continuations | Below is the the instruction that describes the task:
### Input:
Return a list of statements
This is done by joining together any rows that
have continuations
### Response:
def statements(self):
'''Return a list of statements
This is done by joining together any rows that
have continuations
'''
# FIXME: no need to do this every time; we should cache the
# result
if len(self.rows) == 0:
return []
current_statement = Statement(self.rows[0])
current_statement.startline = self.rows[0].linenumber
current_statement.endline = self.rows[0].linenumber
statements = []
for row in self.rows[1:]:
if len(row) > 0 and row[0] == "...":
# we found a continuation
current_statement += row[1:]
current_statement.endline = row.linenumber
else:
if len(current_statement) > 0:
# append current statement to the list of statements...
statements.append(current_statement)
# start a new statement
current_statement = Statement(row)
current_statement.startline = row.linenumber
current_statement.endline = row.linenumber
if len(current_statement) > 0:
statements.append(current_statement)
# trim trailing blank statements
while (len(statements[-1]) == 0 or
((len(statements[-1]) == 1) and len(statements[-1][0]) == 0)):
statements.pop()
return statements |
def cross_signal(s1, s2, continuous=0):
""" return a signal with the following
1 : when all values of s1 cross all values of s2
-1 : when all values of s2 cross below all values of s2
0 : if s1 < max(s2) and s1 > min(s2)
np.nan : if s1 or s2 contains np.nan at position
s1: Series, DataFrame, float, int, or tuple(float|int)
s2: Series, DataFrame, float, int, or tuple(float|int)
continous: bool, if true then once the signal starts it is always 1 or -1
"""
def _convert(src, other):
if isinstance(src, pd.DataFrame):
return src.min(axis=1, skipna=0), src.max(axis=1, skipna=0)
elif isinstance(src, pd.Series):
return src, src
elif isinstance(src, (int, float)):
s = pd.Series(src, index=other.index)
return s, s
elif isinstance(src, (tuple, list)):
l, u = min(src), max(src)
assert l <= u, 'lower bound must be less than upper bound'
lower, upper = pd.Series(l, index=other.index), pd.Series(u, index=other.index)
return lower, upper
else:
raise Exception('unable to handle type %s' % type(src))
lower1, upper1 = _convert(s1, s2)
lower2, upper2 = _convert(s2, s1)
df = pd.DataFrame({'upper1': upper1, 'lower1': lower1, 'upper2': upper2, 'lower2': lower2})
df.ffill(inplace=True)
signal = pd.Series(np.nan, index=df.index)
signal[df.upper1 > df.upper2] = 1
signal[df.lower1 < df.lower2] = -1
if continuous:
# Just roll with 1, -1
signal = signal.fillna(method='ffill')
m1, m2 = df.upper1.first_valid_index(), df.upper2.first_valid_index()
if m1 is not None or m2 is not None:
m1 = m2 if m1 is None else m1
m2 = m1 if m2 is None else m2
fv = max(m1, m2)
if np.isnan(signal[fv]):
signal[fv] = 0
signal.ffill(inplace=1)
else:
signal[(df.upper1 < df.upper2) & (df.lower1 > df.lower2)] = 0
# special handling when equal, determine where it previously was
eq = (df.upper1 == df.upper2)
if eq.any(): # Set to prior value
tmp = signal[eq]
for i in tmp.index:
loc = signal.index.get_loc(i)
if loc != 0:
u, l = df.upper2.iloc[loc], df.lower2.iloc[loc]
ps = signal.iloc[loc - 1]
if u == l or ps == 1.: # Line coming from above upper bound if ps == 1
signal[i] = ps
else:
signal[i] = 0
eq = (df.lower1 == df.lower2)
if eq.any(): # Set to prior value
tmp = signal[eq]
for i in tmp.index:
loc = signal.index.get_loc(i)
if loc != 0:
u, l = df.upper2.iloc[loc], df.lower2.iloc[loc]
ps = signal.iloc[loc - 1]
if u == l or ps == -1.: # Line coming from below lower bound if ps == -1
signal[i] = ps
else:
signal[i] = 0
return signal | return a signal with the following
1 : when all values of s1 cross all values of s2
-1 : when all values of s2 cross below all values of s2
0 : if s1 < max(s2) and s1 > min(s2)
np.nan : if s1 or s2 contains np.nan at position
s1: Series, DataFrame, float, int, or tuple(float|int)
s2: Series, DataFrame, float, int, or tuple(float|int)
continous: bool, if true then once the signal starts it is always 1 or -1 | Below is the the instruction that describes the task:
### Input:
return a signal with the following
1 : when all values of s1 cross all values of s2
-1 : when all values of s2 cross below all values of s2
0 : if s1 < max(s2) and s1 > min(s2)
np.nan : if s1 or s2 contains np.nan at position
s1: Series, DataFrame, float, int, or tuple(float|int)
s2: Series, DataFrame, float, int, or tuple(float|int)
continous: bool, if true then once the signal starts it is always 1 or -1
### Response:
def cross_signal(s1, s2, continuous=0):
""" return a signal with the following
1 : when all values of s1 cross all values of s2
-1 : when all values of s2 cross below all values of s2
0 : if s1 < max(s2) and s1 > min(s2)
np.nan : if s1 or s2 contains np.nan at position
s1: Series, DataFrame, float, int, or tuple(float|int)
s2: Series, DataFrame, float, int, or tuple(float|int)
continous: bool, if true then once the signal starts it is always 1 or -1
"""
def _convert(src, other):
if isinstance(src, pd.DataFrame):
return src.min(axis=1, skipna=0), src.max(axis=1, skipna=0)
elif isinstance(src, pd.Series):
return src, src
elif isinstance(src, (int, float)):
s = pd.Series(src, index=other.index)
return s, s
elif isinstance(src, (tuple, list)):
l, u = min(src), max(src)
assert l <= u, 'lower bound must be less than upper bound'
lower, upper = pd.Series(l, index=other.index), pd.Series(u, index=other.index)
return lower, upper
else:
raise Exception('unable to handle type %s' % type(src))
lower1, upper1 = _convert(s1, s2)
lower2, upper2 = _convert(s2, s1)
df = pd.DataFrame({'upper1': upper1, 'lower1': lower1, 'upper2': upper2, 'lower2': lower2})
df.ffill(inplace=True)
signal = pd.Series(np.nan, index=df.index)
signal[df.upper1 > df.upper2] = 1
signal[df.lower1 < df.lower2] = -1
if continuous:
# Just roll with 1, -1
signal = signal.fillna(method='ffill')
m1, m2 = df.upper1.first_valid_index(), df.upper2.first_valid_index()
if m1 is not None or m2 is not None:
m1 = m2 if m1 is None else m1
m2 = m1 if m2 is None else m2
fv = max(m1, m2)
if np.isnan(signal[fv]):
signal[fv] = 0
signal.ffill(inplace=1)
else:
signal[(df.upper1 < df.upper2) & (df.lower1 > df.lower2)] = 0
# special handling when equal, determine where it previously was
eq = (df.upper1 == df.upper2)
if eq.any(): # Set to prior value
tmp = signal[eq]
for i in tmp.index:
loc = signal.index.get_loc(i)
if loc != 0:
u, l = df.upper2.iloc[loc], df.lower2.iloc[loc]
ps = signal.iloc[loc - 1]
if u == l or ps == 1.: # Line coming from above upper bound if ps == 1
signal[i] = ps
else:
signal[i] = 0
eq = (df.lower1 == df.lower2)
if eq.any(): # Set to prior value
tmp = signal[eq]
for i in tmp.index:
loc = signal.index.get_loc(i)
if loc != 0:
u, l = df.upper2.iloc[loc], df.lower2.iloc[loc]
ps = signal.iloc[loc - 1]
if u == l or ps == -1.: # Line coming from below lower bound if ps == -1
signal[i] = ps
else:
signal[i] = 0
return signal |
def wrap_star_digger(item, type_str, data_name='Value'):
"""
code used to extract data from Bing's wrap star
:param item: wrap star obj
:param type_str: target type string
:param data_name: target data label, might be "Entities", "Properties", 'Value'
:return: list of all matched target, arranged in occurance
"""
ret = []
if type(item) == dict:
if 'Type' in item and item['Type'] == type_str and data_name in item: # 'Business.Consumer_Product.Description'
if len(item[data_name]) > 1:
# print 'length error!!!!!!!!!!!'
pass
return item[data_name]
else:
for k in item:
sub_ret = wrap_star_digger(item[k], type_str, data_name)
if sub_ret:
ret.extend(sub_ret)
elif type(item) == list:
for i in item:
sub_ret = wrap_star_digger(i, type_str, data_name)
if sub_ret:
ret.extend(sub_ret)
return ret | code used to extract data from Bing's wrap star
:param item: wrap star obj
:param type_str: target type string
:param data_name: target data label, might be "Entities", "Properties", 'Value'
:return: list of all matched target, arranged in occurance | Below is the the instruction that describes the task:
### Input:
code used to extract data from Bing's wrap star
:param item: wrap star obj
:param type_str: target type string
:param data_name: target data label, might be "Entities", "Properties", 'Value'
:return: list of all matched target, arranged in occurance
### Response:
def wrap_star_digger(item, type_str, data_name='Value'):
"""
code used to extract data from Bing's wrap star
:param item: wrap star obj
:param type_str: target type string
:param data_name: target data label, might be "Entities", "Properties", 'Value'
:return: list of all matched target, arranged in occurance
"""
ret = []
if type(item) == dict:
if 'Type' in item and item['Type'] == type_str and data_name in item: # 'Business.Consumer_Product.Description'
if len(item[data_name]) > 1:
# print 'length error!!!!!!!!!!!'
pass
return item[data_name]
else:
for k in item:
sub_ret = wrap_star_digger(item[k], type_str, data_name)
if sub_ret:
ret.extend(sub_ret)
elif type(item) == list:
for i in item:
sub_ret = wrap_star_digger(i, type_str, data_name)
if sub_ret:
ret.extend(sub_ret)
return ret |
def list_(saltenv='base', test=None):
'''
List currently configured reactors
CLI Example:
.. code-block:: bash
salt-run reactor.list
'''
sevent = salt.utils.event.get_event(
'master',
__opts__['sock_dir'],
__opts__['transport'],
opts=__opts__,
listen=True)
master_key = salt.utils.master.get_master_key('root', __opts__)
__jid_event__.fire_event({'key': master_key}, 'salt/reactors/manage/list')
results = sevent.get_event(wait=30, tag='salt/reactors/manage/list-results')
reactors = results['reactors']
return reactors | List currently configured reactors
CLI Example:
.. code-block:: bash
salt-run reactor.list | Below is the the instruction that describes the task:
### Input:
List currently configured reactors
CLI Example:
.. code-block:: bash
salt-run reactor.list
### Response:
def list_(saltenv='base', test=None):
'''
List currently configured reactors
CLI Example:
.. code-block:: bash
salt-run reactor.list
'''
sevent = salt.utils.event.get_event(
'master',
__opts__['sock_dir'],
__opts__['transport'],
opts=__opts__,
listen=True)
master_key = salt.utils.master.get_master_key('root', __opts__)
__jid_event__.fire_event({'key': master_key}, 'salt/reactors/manage/list')
results = sevent.get_event(wait=30, tag='salt/reactors/manage/list-results')
reactors = results['reactors']
return reactors |
def create_privkey(self):
"""
This is called by post_build() for key creation.
"""
if self.group in _tls_named_ffdh_groups:
params = _ffdh_groups[_tls_named_ffdh_groups[self.group]][0]
privkey = params.generate_private_key()
self.privkey = privkey
pubkey = privkey.public_key()
self.key_exchange = pubkey.public_numbers().y
elif self.group in _tls_named_curves:
if _tls_named_curves[self.group] == "x25519":
if conf.crypto_valid_advanced:
privkey = x25519.X25519PrivateKey.generate()
self.privkey = privkey
pubkey = privkey.public_key()
self.key_exchange = pubkey.public_bytes()
elif _tls_named_curves[self.group] != "x448":
curve = ec._CURVE_TYPES[_tls_named_curves[self.group]]()
privkey = ec.generate_private_key(curve, default_backend())
self.privkey = privkey
pubkey = privkey.public_key()
try:
# cryptography >= 2.5
self.key_exchange = pubkey.public_bytes(
serialization.Encoding.X962,
serialization.PublicFormat.UncompressedPoint
)
except TypeError:
# older versions
self.key_exchange = pubkey.public_numbers().encode_point() | This is called by post_build() for key creation. | Below is the the instruction that describes the task:
### Input:
This is called by post_build() for key creation.
### Response:
def create_privkey(self):
"""
This is called by post_build() for key creation.
"""
if self.group in _tls_named_ffdh_groups:
params = _ffdh_groups[_tls_named_ffdh_groups[self.group]][0]
privkey = params.generate_private_key()
self.privkey = privkey
pubkey = privkey.public_key()
self.key_exchange = pubkey.public_numbers().y
elif self.group in _tls_named_curves:
if _tls_named_curves[self.group] == "x25519":
if conf.crypto_valid_advanced:
privkey = x25519.X25519PrivateKey.generate()
self.privkey = privkey
pubkey = privkey.public_key()
self.key_exchange = pubkey.public_bytes()
elif _tls_named_curves[self.group] != "x448":
curve = ec._CURVE_TYPES[_tls_named_curves[self.group]]()
privkey = ec.generate_private_key(curve, default_backend())
self.privkey = privkey
pubkey = privkey.public_key()
try:
# cryptography >= 2.5
self.key_exchange = pubkey.public_bytes(
serialization.Encoding.X962,
serialization.PublicFormat.UncompressedPoint
)
except TypeError:
# older versions
self.key_exchange = pubkey.public_numbers().encode_point() |
def prepare_feature(best_processed_path, option='train'):
"""
Transform processed path into feature matrix and output array
Input
=====
best_processed_path: str, path to processed BEST dataset
option: str, 'train' or 'test'
"""
# padding for training and testing set
n_pad = 21
n_pad_2 = int((n_pad - 1)/2)
pad = [{'char': ' ', 'type': 'p', 'target': True}]
df_pad = pd.DataFrame(pad * n_pad_2)
df = []
for article_type in article_types:
df.append(pd.read_csv(os.path.join(best_processed_path, option, 'df_best_{}_{}.csv'.format(article_type, option))))
df = pd.concat(df)
df = pd.concat((df_pad, df, df_pad)) # pad with empty string feature
df['char'] = df['char'].map(lambda x: CHARS_MAP.get(x, 80))
df['type'] = df['type'].map(lambda x: CHAR_TYPES_MAP.get(x, 4))
df_pad = create_n_gram_df(df, n_pad=n_pad)
char_row = ['char' + str(i + 1) for i in range(n_pad_2)] + \
['char-' + str(i + 1) for i in range(n_pad_2)] + ['char']
type_row = ['type' + str(i + 1) for i in range(n_pad_2)] + \
['type-' + str(i + 1) for i in range(n_pad_2)] + ['type']
x_char = df_pad[char_row].as_matrix()
x_type = df_pad[type_row].as_matrix()
y = df_pad['target'].astype(int).as_matrix()
return x_char, x_type, y | Transform processed path into feature matrix and output array
Input
=====
best_processed_path: str, path to processed BEST dataset
option: str, 'train' or 'test' | Below is the the instruction that describes the task:
### Input:
Transform processed path into feature matrix and output array
Input
=====
best_processed_path: str, path to processed BEST dataset
option: str, 'train' or 'test'
### Response:
def prepare_feature(best_processed_path, option='train'):
"""
Transform processed path into feature matrix and output array
Input
=====
best_processed_path: str, path to processed BEST dataset
option: str, 'train' or 'test'
"""
# padding for training and testing set
n_pad = 21
n_pad_2 = int((n_pad - 1)/2)
pad = [{'char': ' ', 'type': 'p', 'target': True}]
df_pad = pd.DataFrame(pad * n_pad_2)
df = []
for article_type in article_types:
df.append(pd.read_csv(os.path.join(best_processed_path, option, 'df_best_{}_{}.csv'.format(article_type, option))))
df = pd.concat(df)
df = pd.concat((df_pad, df, df_pad)) # pad with empty string feature
df['char'] = df['char'].map(lambda x: CHARS_MAP.get(x, 80))
df['type'] = df['type'].map(lambda x: CHAR_TYPES_MAP.get(x, 4))
df_pad = create_n_gram_df(df, n_pad=n_pad)
char_row = ['char' + str(i + 1) for i in range(n_pad_2)] + \
['char-' + str(i + 1) for i in range(n_pad_2)] + ['char']
type_row = ['type' + str(i + 1) for i in range(n_pad_2)] + \
['type-' + str(i + 1) for i in range(n_pad_2)] + ['type']
x_char = df_pad[char_row].as_matrix()
x_type = df_pad[type_row].as_matrix()
y = df_pad['target'].astype(int).as_matrix()
return x_char, x_type, y |
def __bind(self):
'''
Binds the reply server
'''
if self.log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
if self.log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
if self.secrets is not None:
SMaster.secrets = self.secrets
dfn = os.path.join(self.opts['cachedir'], '.dfn')
if os.path.isfile(dfn):
try:
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
os.remove(dfn)
except os.error:
pass
# Wait for kill should be less then parent's ProcessManager.
self.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager',
wait_for_kill=1)
req_channels = []
tcp_only = True
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.ReqServerChannel.factory(opts)
chan.pre_fork(self.process_manager)
req_channels.append(chan)
if transport != 'tcp':
tcp_only = False
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = self.log_queue
kwargs['log_queue_level'] = self.log_queue_level
# Use one worker thread if only the TCP transport is set up on
# Windows and we are using Python 2. There is load balancer
# support on Windows for the TCP transport when using Python 3.
if tcp_only and six.PY2 and int(self.opts['worker_threads']) != 1:
log.warning('TCP transport supports only 1 worker on Windows '
'when using Python 2.')
self.opts['worker_threads'] = 1
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
for ind in range(int(self.opts['worker_threads'])):
name = 'MWorker-{0}'.format(ind)
self.process_manager.add_process(MWorker,
args=(self.opts,
self.master_key,
self.key,
req_channels,
name),
kwargs=kwargs,
name=name)
self.process_manager.run() | Binds the reply server | Below is the the instruction that describes the task:
### Input:
Binds the reply server
### Response:
def __bind(self):
'''
Binds the reply server
'''
if self.log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
if self.log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
if self.secrets is not None:
SMaster.secrets = self.secrets
dfn = os.path.join(self.opts['cachedir'], '.dfn')
if os.path.isfile(dfn):
try:
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
os.remove(dfn)
except os.error:
pass
# Wait for kill should be less then parent's ProcessManager.
self.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager',
wait_for_kill=1)
req_channels = []
tcp_only = True
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.ReqServerChannel.factory(opts)
chan.pre_fork(self.process_manager)
req_channels.append(chan)
if transport != 'tcp':
tcp_only = False
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = self.log_queue
kwargs['log_queue_level'] = self.log_queue_level
# Use one worker thread if only the TCP transport is set up on
# Windows and we are using Python 2. There is load balancer
# support on Windows for the TCP transport when using Python 3.
if tcp_only and six.PY2 and int(self.opts['worker_threads']) != 1:
log.warning('TCP transport supports only 1 worker on Windows '
'when using Python 2.')
self.opts['worker_threads'] = 1
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
for ind in range(int(self.opts['worker_threads'])):
name = 'MWorker-{0}'.format(ind)
self.process_manager.add_process(MWorker,
args=(self.opts,
self.master_key,
self.key,
req_channels,
name),
kwargs=kwargs,
name=name)
self.process_manager.run() |
def generateL2Sequences(nL1Patterns=10, l1Hubs=[2,6], l1SeqLength=[5,6,7],
nL1SimpleSequences=50, nL1HubSequences=50,
l1Pooling=4, perfectStability=False, spHysteresisFactor=1.0,
patternLen=500, patternActivity=50):
"""
Generate the simulated output from a spatial pooler that's sitting
on top of another spatial pooler / temporal memory pair. The average on-time
of the outputs from the simulated TM is given by the l1Pooling argument.
In this routine, L1 refers to the first spatial and temporal memory and L2
refers to the spatial pooler above that.
Parameters:
-----------------------------------------------
nL1Patterns: the number of patterns to use in the L1 sequences.
l1Hubs: which of the elements will be used as hubs.
l1SeqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nL1SimpleSequences: The number of simple sequences to generate for L1
nL1HubSequences: The number of hub sequences to generate for L1
l1Pooling: The number of time steps to pool over in the L1 temporal
pooler
perfectStability: If true, then the input patterns represented by the
sequences generated will have perfect stability over
l1Pooling time steps. This is the best case ideal input
to a TM. In actual situations, with an actual SP
providing input, the stability will always be less than
this.
spHystereisFactor: The hysteresisFactor to use in the L2 spatial pooler.
Only used when perfectStability is False
patternLen: The number of elements in each pattern output by L2
patternActivity: The number of elements that should be active in
each pattern
@retval: (seqList, patterns)
seqList: a list of sequences output from L2. Each sequence is
itself a list containing the input pattern indices for that
sequence.
patterns: the input patterns used in the L2 seqList.
"""
# First, generate the L1 sequences
l1SeqList = generateSimpleSequences(nCoinc=nL1Patterns, seqLength=l1SeqLength,
nSeq=nL1SimpleSequences) + \
generateHubSequences(nCoinc=nL1Patterns, hubs=l1Hubs,
seqLength=l1SeqLength, nSeq=nL1HubSequences)
# Generate the L2 SP output from those
spOutput = generateSlowSPOutput(seqListBelow = l1SeqList,
poolingTimeBelow=l1Pooling, outputWidth=patternLen,
activity=patternActivity, perfectStability=perfectStability,
spHysteresisFactor=spHysteresisFactor)
# Map the spOutput patterns into indices into a pattern matrix which we
# generate now.
outSeq = None
outSeqList = []
outPatterns = SM32(0, patternLen)
for pattern in spOutput:
# If we have a reset vector start a new sequence
if pattern.sum() == 0:
if outSeq is not None:
outSeqList.append(outSeq)
outSeq = []
continue
# See if this vector matches a pattern we've already seen before
patternIdx = None
if outPatterns.nRows() > 0:
# Find most matching 1's.
matches = outPatterns.rightVecSumAtNZ(pattern)
outCoinc = matches.argmax().astype('uint32')
# See if its number of 1's is the same in the pattern and in the
# coincidence row. If so, it is an exact match
numOnes = pattern.sum()
if matches[outCoinc] == numOnes \
and outPatterns.getRow(int(outCoinc)).sum() == numOnes:
patternIdx = outCoinc
# If no match, add this pattern to our matrix
if patternIdx is None:
outPatterns.addRow(pattern)
patternIdx = outPatterns.nRows() - 1
# Store the pattern index into the sequence
outSeq.append(patternIdx)
# Put in last finished sequence
if outSeq is not None:
outSeqList.append(outSeq)
# Return with the seqList and patterns matrix
return (outSeqList, outPatterns) | Generate the simulated output from a spatial pooler that's sitting
on top of another spatial pooler / temporal memory pair. The average on-time
of the outputs from the simulated TM is given by the l1Pooling argument.
In this routine, L1 refers to the first spatial and temporal memory and L2
refers to the spatial pooler above that.
Parameters:
-----------------------------------------------
nL1Patterns: the number of patterns to use in the L1 sequences.
l1Hubs: which of the elements will be used as hubs.
l1SeqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nL1SimpleSequences: The number of simple sequences to generate for L1
nL1HubSequences: The number of hub sequences to generate for L1
l1Pooling: The number of time steps to pool over in the L1 temporal
pooler
perfectStability: If true, then the input patterns represented by the
sequences generated will have perfect stability over
l1Pooling time steps. This is the best case ideal input
to a TM. In actual situations, with an actual SP
providing input, the stability will always be less than
this.
spHystereisFactor: The hysteresisFactor to use in the L2 spatial pooler.
Only used when perfectStability is False
patternLen: The number of elements in each pattern output by L2
patternActivity: The number of elements that should be active in
each pattern
@retval: (seqList, patterns)
seqList: a list of sequences output from L2. Each sequence is
itself a list containing the input pattern indices for that
sequence.
patterns: the input patterns used in the L2 seqList. | Below is the the instruction that describes the task:
### Input:
Generate the simulated output from a spatial pooler that's sitting
on top of another spatial pooler / temporal memory pair. The average on-time
of the outputs from the simulated TM is given by the l1Pooling argument.
In this routine, L1 refers to the first spatial and temporal memory and L2
refers to the spatial pooler above that.
Parameters:
-----------------------------------------------
nL1Patterns: the number of patterns to use in the L1 sequences.
l1Hubs: which of the elements will be used as hubs.
l1SeqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nL1SimpleSequences: The number of simple sequences to generate for L1
nL1HubSequences: The number of hub sequences to generate for L1
l1Pooling: The number of time steps to pool over in the L1 temporal
pooler
perfectStability: If true, then the input patterns represented by the
sequences generated will have perfect stability over
l1Pooling time steps. This is the best case ideal input
to a TM. In actual situations, with an actual SP
providing input, the stability will always be less than
this.
spHystereisFactor: The hysteresisFactor to use in the L2 spatial pooler.
Only used when perfectStability is False
patternLen: The number of elements in each pattern output by L2
patternActivity: The number of elements that should be active in
each pattern
@retval: (seqList, patterns)
seqList: a list of sequences output from L2. Each sequence is
itself a list containing the input pattern indices for that
sequence.
patterns: the input patterns used in the L2 seqList.
### Response:
def generateL2Sequences(nL1Patterns=10, l1Hubs=[2,6], l1SeqLength=[5,6,7],
nL1SimpleSequences=50, nL1HubSequences=50,
l1Pooling=4, perfectStability=False, spHysteresisFactor=1.0,
patternLen=500, patternActivity=50):
"""
Generate the simulated output from a spatial pooler that's sitting
on top of another spatial pooler / temporal memory pair. The average on-time
of the outputs from the simulated TM is given by the l1Pooling argument.
In this routine, L1 refers to the first spatial and temporal memory and L2
refers to the spatial pooler above that.
Parameters:
-----------------------------------------------
nL1Patterns: the number of patterns to use in the L1 sequences.
l1Hubs: which of the elements will be used as hubs.
l1SeqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nL1SimpleSequences: The number of simple sequences to generate for L1
nL1HubSequences: The number of hub sequences to generate for L1
l1Pooling: The number of time steps to pool over in the L1 temporal
pooler
perfectStability: If true, then the input patterns represented by the
sequences generated will have perfect stability over
l1Pooling time steps. This is the best case ideal input
to a TM. In actual situations, with an actual SP
providing input, the stability will always be less than
this.
spHystereisFactor: The hysteresisFactor to use in the L2 spatial pooler.
Only used when perfectStability is False
patternLen: The number of elements in each pattern output by L2
patternActivity: The number of elements that should be active in
each pattern
@retval: (seqList, patterns)
seqList: a list of sequences output from L2. Each sequence is
itself a list containing the input pattern indices for that
sequence.
patterns: the input patterns used in the L2 seqList.
"""
# First, generate the L1 sequences
l1SeqList = generateSimpleSequences(nCoinc=nL1Patterns, seqLength=l1SeqLength,
nSeq=nL1SimpleSequences) + \
generateHubSequences(nCoinc=nL1Patterns, hubs=l1Hubs,
seqLength=l1SeqLength, nSeq=nL1HubSequences)
# Generate the L2 SP output from those
spOutput = generateSlowSPOutput(seqListBelow = l1SeqList,
poolingTimeBelow=l1Pooling, outputWidth=patternLen,
activity=patternActivity, perfectStability=perfectStability,
spHysteresisFactor=spHysteresisFactor)
# Map the spOutput patterns into indices into a pattern matrix which we
# generate now.
outSeq = None
outSeqList = []
outPatterns = SM32(0, patternLen)
for pattern in spOutput:
# If we have a reset vector start a new sequence
if pattern.sum() == 0:
if outSeq is not None:
outSeqList.append(outSeq)
outSeq = []
continue
# See if this vector matches a pattern we've already seen before
patternIdx = None
if outPatterns.nRows() > 0:
# Find most matching 1's.
matches = outPatterns.rightVecSumAtNZ(pattern)
outCoinc = matches.argmax().astype('uint32')
# See if its number of 1's is the same in the pattern and in the
# coincidence row. If so, it is an exact match
numOnes = pattern.sum()
if matches[outCoinc] == numOnes \
and outPatterns.getRow(int(outCoinc)).sum() == numOnes:
patternIdx = outCoinc
# If no match, add this pattern to our matrix
if patternIdx is None:
outPatterns.addRow(pattern)
patternIdx = outPatterns.nRows() - 1
# Store the pattern index into the sequence
outSeq.append(patternIdx)
# Put in last finished sequence
if outSeq is not None:
outSeqList.append(outSeq)
# Return with the seqList and patterns matrix
return (outSeqList, outPatterns) |
def RegisterDefinition(self, data_type_definition):
"""Registers a data type definition.
The data type definitions are identified based on their lower case name.
Args:
data_type_definition (DataTypeDefinition): data type definitions.
Raises:
KeyError: if data type definition is already set for the corresponding
name.
"""
name_lower = data_type_definition.name.lower()
if name_lower in self._definitions:
raise KeyError('Definition already set for name: {0:s}.'.format(
data_type_definition.name))
if data_type_definition.name in self._aliases:
raise KeyError('Alias already set for name: {0:s}.'.format(
data_type_definition.name))
for alias in data_type_definition.aliases:
if alias in self._aliases:
raise KeyError('Alias already set for name: {0:s}.'.format(alias))
self._definitions[name_lower] = data_type_definition
for alias in data_type_definition.aliases:
self._aliases[alias] = name_lower
if data_type_definition.TYPE_INDICATOR == definitions.TYPE_INDICATOR_FORMAT:
self._format_definitions.append(name_lower) | Registers a data type definition.
The data type definitions are identified based on their lower case name.
Args:
data_type_definition (DataTypeDefinition): data type definitions.
Raises:
KeyError: if data type definition is already set for the corresponding
name. | Below is the the instruction that describes the task:
### Input:
Registers a data type definition.
The data type definitions are identified based on their lower case name.
Args:
data_type_definition (DataTypeDefinition): data type definitions.
Raises:
KeyError: if data type definition is already set for the corresponding
name.
### Response:
def RegisterDefinition(self, data_type_definition):
"""Registers a data type definition.
The data type definitions are identified based on their lower case name.
Args:
data_type_definition (DataTypeDefinition): data type definitions.
Raises:
KeyError: if data type definition is already set for the corresponding
name.
"""
name_lower = data_type_definition.name.lower()
if name_lower in self._definitions:
raise KeyError('Definition already set for name: {0:s}.'.format(
data_type_definition.name))
if data_type_definition.name in self._aliases:
raise KeyError('Alias already set for name: {0:s}.'.format(
data_type_definition.name))
for alias in data_type_definition.aliases:
if alias in self._aliases:
raise KeyError('Alias already set for name: {0:s}.'.format(alias))
self._definitions[name_lower] = data_type_definition
for alias in data_type_definition.aliases:
self._aliases[alias] = name_lower
if data_type_definition.TYPE_INDICATOR == definitions.TYPE_INDICATOR_FORMAT:
self._format_definitions.append(name_lower) |
def launch_shell(username, hostname, password, port=22):
"""
Launches an ssh shell
"""
if not username or not hostname or not password:
return False
with tempfile.NamedTemporaryFile() as tmpFile:
os.system(sshCmdLine.format(password, tmpFile.name, username, hostname,
port))
return True | Launches an ssh shell | Below is the the instruction that describes the task:
### Input:
Launches an ssh shell
### Response:
def launch_shell(username, hostname, password, port=22):
"""
Launches an ssh shell
"""
if not username or not hostname or not password:
return False
with tempfile.NamedTemporaryFile() as tmpFile:
os.system(sshCmdLine.format(password, tmpFile.name, username, hostname,
port))
return True |
def start(self, start_offset):
"""
Starts fetching messages from Kafka and delivering them to the
:attr:`.processor` function.
:param int start_offset:
The offset within the partition from which to start fetching.
Special values include: :const:`OFFSET_EARLIEST`,
:const:`OFFSET_LATEST`, and :const:`OFFSET_COMMITTED`. If the
supplied offset is :const:`OFFSET_EARLIEST` or
:const:`OFFSET_LATEST` the :class:`Consumer` will use the
OffsetRequest Kafka API to retrieve the actual offset used for
fetching. In the case :const:`OFFSET_COMMITTED` is used,
`commit_policy` MUST be set on the Consumer, and the Consumer
will use the OffsetFetchRequest Kafka API to retrieve the actual
offset used for fetching.
:returns:
A :class:`~twisted.internet.defer.Deferred` which will resolve
successfully when the consumer is cleanly stopped, or with
a failure if the :class:`Consumer` encounters an error from which
it is unable to recover.
:raises: :exc:`RestartError` if already running.
"""
# Have we been started already, and not stopped?
if self._start_d is not None:
raise RestartError("Start called on already-started consumer")
# Keep track of state for debugging
self._state = '[started]'
# Create and return a deferred for alerting on errors/stoppage
start_d = self._start_d = Deferred()
# Start a new fetch request, possibly just for the starting offset
self._fetch_offset = start_offset
self._do_fetch()
# Set up the auto-commit timer, if needed
if self.consumer_group and self.auto_commit_every_s:
self._commit_looper = LoopingCall(self._auto_commit)
self._commit_looper.clock = self.client.reactor
self._commit_looper_d = self._commit_looper.start(
self.auto_commit_every_s, now=False)
self._commit_looper_d.addCallbacks(self._commit_timer_stopped,
self._commit_timer_failed)
return start_d | Starts fetching messages from Kafka and delivering them to the
:attr:`.processor` function.
:param int start_offset:
The offset within the partition from which to start fetching.
Special values include: :const:`OFFSET_EARLIEST`,
:const:`OFFSET_LATEST`, and :const:`OFFSET_COMMITTED`. If the
supplied offset is :const:`OFFSET_EARLIEST` or
:const:`OFFSET_LATEST` the :class:`Consumer` will use the
OffsetRequest Kafka API to retrieve the actual offset used for
fetching. In the case :const:`OFFSET_COMMITTED` is used,
`commit_policy` MUST be set on the Consumer, and the Consumer
will use the OffsetFetchRequest Kafka API to retrieve the actual
offset used for fetching.
:returns:
A :class:`~twisted.internet.defer.Deferred` which will resolve
successfully when the consumer is cleanly stopped, or with
a failure if the :class:`Consumer` encounters an error from which
it is unable to recover.
:raises: :exc:`RestartError` if already running. | Below is the the instruction that describes the task:
### Input:
Starts fetching messages from Kafka and delivering them to the
:attr:`.processor` function.
:param int start_offset:
The offset within the partition from which to start fetching.
Special values include: :const:`OFFSET_EARLIEST`,
:const:`OFFSET_LATEST`, and :const:`OFFSET_COMMITTED`. If the
supplied offset is :const:`OFFSET_EARLIEST` or
:const:`OFFSET_LATEST` the :class:`Consumer` will use the
OffsetRequest Kafka API to retrieve the actual offset used for
fetching. In the case :const:`OFFSET_COMMITTED` is used,
`commit_policy` MUST be set on the Consumer, and the Consumer
will use the OffsetFetchRequest Kafka API to retrieve the actual
offset used for fetching.
:returns:
A :class:`~twisted.internet.defer.Deferred` which will resolve
successfully when the consumer is cleanly stopped, or with
a failure if the :class:`Consumer` encounters an error from which
it is unable to recover.
:raises: :exc:`RestartError` if already running.
### Response:
def start(self, start_offset):
"""
Starts fetching messages from Kafka and delivering them to the
:attr:`.processor` function.
:param int start_offset:
The offset within the partition from which to start fetching.
Special values include: :const:`OFFSET_EARLIEST`,
:const:`OFFSET_LATEST`, and :const:`OFFSET_COMMITTED`. If the
supplied offset is :const:`OFFSET_EARLIEST` or
:const:`OFFSET_LATEST` the :class:`Consumer` will use the
OffsetRequest Kafka API to retrieve the actual offset used for
fetching. In the case :const:`OFFSET_COMMITTED` is used,
`commit_policy` MUST be set on the Consumer, and the Consumer
will use the OffsetFetchRequest Kafka API to retrieve the actual
offset used for fetching.
:returns:
A :class:`~twisted.internet.defer.Deferred` which will resolve
successfully when the consumer is cleanly stopped, or with
a failure if the :class:`Consumer` encounters an error from which
it is unable to recover.
:raises: :exc:`RestartError` if already running.
"""
# Have we been started already, and not stopped?
if self._start_d is not None:
raise RestartError("Start called on already-started consumer")
# Keep track of state for debugging
self._state = '[started]'
# Create and return a deferred for alerting on errors/stoppage
start_d = self._start_d = Deferred()
# Start a new fetch request, possibly just for the starting offset
self._fetch_offset = start_offset
self._do_fetch()
# Set up the auto-commit timer, if needed
if self.consumer_group and self.auto_commit_every_s:
self._commit_looper = LoopingCall(self._auto_commit)
self._commit_looper.clock = self.client.reactor
self._commit_looper_d = self._commit_looper.start(
self.auto_commit_every_s, now=False)
self._commit_looper_d.addCallbacks(self._commit_timer_stopped,
self._commit_timer_failed)
return start_d |
def _pindel_options(items, config, out_file, region, tmp_path):
"""parse pindel options. Add region to cmd.
:param items: (dict) information from yaml
:param config: (dict) information from yaml (items[0]['config'])
:param region: (str or tupple) region to analyze
:param tmp_path: (str) temporal folder
:returns: (list) options for pindel
"""
variant_regions = utils.get_in(config, ("algorithm", "variant_regions"))
target = subset_variant_regions(variant_regions, region, out_file, items)
opts = ""
if target:
if isinstance(target, six.string_types) and os.path.isfile(target):
target_bed = target
else:
target_bed = os.path.join(tmp_path, "tmp.bed")
with file_transaction(config, target_bed) as tx_tmp_bed:
if not isinstance(region, (list, tuple)):
message = ("Region must be a tuple - something odd just happened")
raise ValueError(message)
chrom, start, end = region
with open(tx_tmp_bed, "w") as out_handle:
print("%s\t%s\t%s" % (chrom, start, end), file=out_handle)
opts = "-j " + remove_lcr_regions(target_bed, items)
return opts | parse pindel options. Add region to cmd.
:param items: (dict) information from yaml
:param config: (dict) information from yaml (items[0]['config'])
:param region: (str or tupple) region to analyze
:param tmp_path: (str) temporal folder
:returns: (list) options for pindel | Below is the the instruction that describes the task:
### Input:
parse pindel options. Add region to cmd.
:param items: (dict) information from yaml
:param config: (dict) information from yaml (items[0]['config'])
:param region: (str or tupple) region to analyze
:param tmp_path: (str) temporal folder
:returns: (list) options for pindel
### Response:
def _pindel_options(items, config, out_file, region, tmp_path):
"""parse pindel options. Add region to cmd.
:param items: (dict) information from yaml
:param config: (dict) information from yaml (items[0]['config'])
:param region: (str or tupple) region to analyze
:param tmp_path: (str) temporal folder
:returns: (list) options for pindel
"""
variant_regions = utils.get_in(config, ("algorithm", "variant_regions"))
target = subset_variant_regions(variant_regions, region, out_file, items)
opts = ""
if target:
if isinstance(target, six.string_types) and os.path.isfile(target):
target_bed = target
else:
target_bed = os.path.join(tmp_path, "tmp.bed")
with file_transaction(config, target_bed) as tx_tmp_bed:
if not isinstance(region, (list, tuple)):
message = ("Region must be a tuple - something odd just happened")
raise ValueError(message)
chrom, start, end = region
with open(tx_tmp_bed, "w") as out_handle:
print("%s\t%s\t%s" % (chrom, start, end), file=out_handle)
opts = "-j " + remove_lcr_regions(target_bed, items)
return opts |
def decode_schedule(string):
"""Decodes a string into a schedule tuple.
Args:
string: The string encoding of a schedule tuple.
Returns:
A schedule tuple, see encode_schedule for details.
"""
splits = string.split()
steps = [int(x[1:]) for x in splits[1:] if x[0] == '@']
pmfs = np.reshape(
[float(x) for x in splits[1:] if x[0] != '@'], [len(steps), -1])
return splits[0], tuplize(steps), tuplize(pmfs) | Decodes a string into a schedule tuple.
Args:
string: The string encoding of a schedule tuple.
Returns:
A schedule tuple, see encode_schedule for details. | Below is the the instruction that describes the task:
### Input:
Decodes a string into a schedule tuple.
Args:
string: The string encoding of a schedule tuple.
Returns:
A schedule tuple, see encode_schedule for details.
### Response:
def decode_schedule(string):
"""Decodes a string into a schedule tuple.
Args:
string: The string encoding of a schedule tuple.
Returns:
A schedule tuple, see encode_schedule for details.
"""
splits = string.split()
steps = [int(x[1:]) for x in splits[1:] if x[0] == '@']
pmfs = np.reshape(
[float(x) for x in splits[1:] if x[0] != '@'], [len(steps), -1])
return splits[0], tuplize(steps), tuplize(pmfs) |
def _submit_task_with_template(self, task_ids):
'''Submit tasks by interpolating a shell script defined in job_template'''
runtime = self.config
runtime.update({
'workdir': os.getcwd(),
'cur_dir': os.getcwd(), # for backward compatibility
'verbosity': env.verbosity,
'sig_mode': env.config.get('sig_mode', 'default'),
'run_mode': env.config.get('run_mode', 'run'),
'home_dir': os.path.expanduser('~')
})
if '_runtime' in env.sos_dict:
runtime.update({
x: env.sos_dict['_runtime'][x]
for x in ('nodes', 'cores', 'workdir', 'mem', 'walltime')
if x in env.sos_dict['_runtime']
})
if 'nodes' not in runtime:
runtime['nodes'] = 1
if 'cores' not in runtime:
runtime['cores'] = 1
# let us first prepare a task file
job_text = ''
for task_id in task_ids:
runtime['task'] = task_id
try:
job_text += cfg_interpolate(self.job_template, runtime)
job_text += '\n'
except Exception as e:
raise ValueError(
f'Failed to generate job file for task {task_id}: {e}')
filename = task_ids[0] + ('.sh' if len(task_ids) == 1 else
f'-{task_ids[-1]}.sh')
# now we need to write a job file
job_file = os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', filename)
# do not translate newline under windows because the script will be executed
# under linux/mac
with open(job_file, 'w', newline='') as job:
job.write(job_text)
# then copy the job file to remote host if necessary
self.agent.send_task_file(job_file)
try:
cmd = f'bash ~/.sos/tasks/{filename}'
self.agent.run_command(cmd, wait_for_task=self.wait_for_task)
except Exception as e:
raise RuntimeError(f'Failed to submit task {task_ids}: {e}')
return True | Submit tasks by interpolating a shell script defined in job_template | Below is the the instruction that describes the task:
### Input:
Submit tasks by interpolating a shell script defined in job_template
### Response:
def _submit_task_with_template(self, task_ids):
'''Submit tasks by interpolating a shell script defined in job_template'''
runtime = self.config
runtime.update({
'workdir': os.getcwd(),
'cur_dir': os.getcwd(), # for backward compatibility
'verbosity': env.verbosity,
'sig_mode': env.config.get('sig_mode', 'default'),
'run_mode': env.config.get('run_mode', 'run'),
'home_dir': os.path.expanduser('~')
})
if '_runtime' in env.sos_dict:
runtime.update({
x: env.sos_dict['_runtime'][x]
for x in ('nodes', 'cores', 'workdir', 'mem', 'walltime')
if x in env.sos_dict['_runtime']
})
if 'nodes' not in runtime:
runtime['nodes'] = 1
if 'cores' not in runtime:
runtime['cores'] = 1
# let us first prepare a task file
job_text = ''
for task_id in task_ids:
runtime['task'] = task_id
try:
job_text += cfg_interpolate(self.job_template, runtime)
job_text += '\n'
except Exception as e:
raise ValueError(
f'Failed to generate job file for task {task_id}: {e}')
filename = task_ids[0] + ('.sh' if len(task_ids) == 1 else
f'-{task_ids[-1]}.sh')
# now we need to write a job file
job_file = os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', filename)
# do not translate newline under windows because the script will be executed
# under linux/mac
with open(job_file, 'w', newline='') as job:
job.write(job_text)
# then copy the job file to remote host if necessary
self.agent.send_task_file(job_file)
try:
cmd = f'bash ~/.sos/tasks/{filename}'
self.agent.run_command(cmd, wait_for_task=self.wait_for_task)
except Exception as e:
raise RuntimeError(f'Failed to submit task {task_ids}: {e}')
return True |
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
"""Returns the file_set for the given interval in."""
try:
return self.file_map[intvl_in]
except KeyError:
raise KeyError('File set does not exist for the specified'
' intvl_in {0}'.format(intvl_in)) | Returns the file_set for the given interval in. | Below is the the instruction that describes the task:
### Input:
Returns the file_set for the given interval in.
### Response:
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
"""Returns the file_set for the given interval in."""
try:
return self.file_map[intvl_in]
except KeyError:
raise KeyError('File set does not exist for the specified'
' intvl_in {0}'.format(intvl_in)) |
def cut(sentence, HMM=True):
"""
Global `cut` function that supports parallel processing.
Note that this only works using dt, custom POSTokenizer
instances are not supported.
"""
global dt
if jieba.pool is None:
for w in dt.cut(sentence, HMM=HMM):
yield w
else:
parts = strdecode(sentence).splitlines(True)
if HMM:
result = jieba.pool.map(_lcut_internal, parts)
else:
result = jieba.pool.map(_lcut_internal_no_hmm, parts)
for r in result:
for w in r:
yield w | Global `cut` function that supports parallel processing.
Note that this only works using dt, custom POSTokenizer
instances are not supported. | Below is the the instruction that describes the task:
### Input:
Global `cut` function that supports parallel processing.
Note that this only works using dt, custom POSTokenizer
instances are not supported.
### Response:
def cut(sentence, HMM=True):
"""
Global `cut` function that supports parallel processing.
Note that this only works using dt, custom POSTokenizer
instances are not supported.
"""
global dt
if jieba.pool is None:
for w in dt.cut(sentence, HMM=HMM):
yield w
else:
parts = strdecode(sentence).splitlines(True)
if HMM:
result = jieba.pool.map(_lcut_internal, parts)
else:
result = jieba.pool.map(_lcut_internal_no_hmm, parts)
for r in result:
for w in r:
yield w |
def algorithm(self):
"""
:return:
A unicode string of "rsa", "dsa" or "ec"
"""
if self._algorithm is None:
self._algorithm = self['private_key_algorithm']['algorithm'].native
return self._algorithm | :return:
A unicode string of "rsa", "dsa" or "ec" | Below is the the instruction that describes the task:
### Input:
:return:
A unicode string of "rsa", "dsa" or "ec"
### Response:
def algorithm(self):
"""
:return:
A unicode string of "rsa", "dsa" or "ec"
"""
if self._algorithm is None:
self._algorithm = self['private_key_algorithm']['algorithm'].native
return self._algorithm |
def call_script(self, script_dict, keys=None, args=None):
"""Call a redis script with keys and args
The first time we call a script, we register it to speed up later calls.
We expect a dict with a ``lua`` key having the script, and the dict will be
updated with a ``script_object`` key, with the content returned by the
the redis-py ``register_script`` command.
Parameters
----------
script_dict: dict
A dict with a ``lua`` entry containing the lua code. A new key, ``script_object``
will be added after that.
keys: list of str
List of the keys that will be read/updated by the lua script
args: list of str
List of all the args expected by the script.
Returns
-------
Anything that will be returned by the script
"""
if keys is None:
keys = []
if args is None:
args = []
if 'script_object' not in script_dict:
script_dict['script_object'] = self.connection.register_script(script_dict['lua'])
return script_dict['script_object'](keys=keys, args=args, client=self.connection) | Call a redis script with keys and args
The first time we call a script, we register it to speed up later calls.
We expect a dict with a ``lua`` key having the script, and the dict will be
updated with a ``script_object`` key, with the content returned by the
the redis-py ``register_script`` command.
Parameters
----------
script_dict: dict
A dict with a ``lua`` entry containing the lua code. A new key, ``script_object``
will be added after that.
keys: list of str
List of the keys that will be read/updated by the lua script
args: list of str
List of all the args expected by the script.
Returns
-------
Anything that will be returned by the script | Below is the the instruction that describes the task:
### Input:
Call a redis script with keys and args
The first time we call a script, we register it to speed up later calls.
We expect a dict with a ``lua`` key having the script, and the dict will be
updated with a ``script_object`` key, with the content returned by the
the redis-py ``register_script`` command.
Parameters
----------
script_dict: dict
A dict with a ``lua`` entry containing the lua code. A new key, ``script_object``
will be added after that.
keys: list of str
List of the keys that will be read/updated by the lua script
args: list of str
List of all the args expected by the script.
Returns
-------
Anything that will be returned by the script
### Response:
def call_script(self, script_dict, keys=None, args=None):
"""Call a redis script with keys and args
The first time we call a script, we register it to speed up later calls.
We expect a dict with a ``lua`` key having the script, and the dict will be
updated with a ``script_object`` key, with the content returned by the
the redis-py ``register_script`` command.
Parameters
----------
script_dict: dict
A dict with a ``lua`` entry containing the lua code. A new key, ``script_object``
will be added after that.
keys: list of str
List of the keys that will be read/updated by the lua script
args: list of str
List of all the args expected by the script.
Returns
-------
Anything that will be returned by the script
"""
if keys is None:
keys = []
if args is None:
args = []
if 'script_object' not in script_dict:
script_dict['script_object'] = self.connection.register_script(script_dict['lua'])
return script_dict['script_object'](keys=keys, args=args, client=self.connection) |
def int_option(string, options):
""" Requires values (int) to be in `args`
:param string: Value to validate
:type string: str
"""
i = int(string)
if i in options:
return i
raise ValueError('Not in allowed options') | Requires values (int) to be in `args`
:param string: Value to validate
:type string: str | Below is the the instruction that describes the task:
### Input:
Requires values (int) to be in `args`
:param string: Value to validate
:type string: str
### Response:
def int_option(string, options):
""" Requires values (int) to be in `args`
:param string: Value to validate
:type string: str
"""
i = int(string)
if i in options:
return i
raise ValueError('Not in allowed options') |
async def jsk_hide(self, ctx: commands.Context):
"""
Hides Jishaku from the help command.
"""
if self.jsk.hidden:
return await ctx.send("Jishaku is already hidden.")
self.jsk.hidden = True
await ctx.send("Jishaku is now hidden.") | Hides Jishaku from the help command. | Below is the the instruction that describes the task:
### Input:
Hides Jishaku from the help command.
### Response:
async def jsk_hide(self, ctx: commands.Context):
"""
Hides Jishaku from the help command.
"""
if self.jsk.hidden:
return await ctx.send("Jishaku is already hidden.")
self.jsk.hidden = True
await ctx.send("Jishaku is now hidden.") |
def to_CAG(self):
""" Export to a Causal Analysis Graph (CAG) PyGraphviz AGraph object.
The CAG shows the influence relationships between the variables and
elides the function nodes."""
G = nx.DiGraph()
for (name, attrs) in self.nodes(data=True):
if attrs["type"] == "variable":
for pred_fn in self.predecessors(name):
if not any(
fn_type in pred_fn
for fn_type in ("condition", "decision")
):
for pred_var in self.predecessors(pred_fn):
G.add_node(
self.nodes[pred_var]["basename"],
**self.nodes[pred_var],
)
G.add_node(attrs["basename"], **attrs)
G.add_edge(
self.nodes[pred_var]["basename"],
attrs["basename"],
)
if attrs["is_loop_index"]:
G.add_edge(attrs["basename"], attrs["basename"])
return G | Export to a Causal Analysis Graph (CAG) PyGraphviz AGraph object.
The CAG shows the influence relationships between the variables and
elides the function nodes. | Below is the the instruction that describes the task:
### Input:
Export to a Causal Analysis Graph (CAG) PyGraphviz AGraph object.
The CAG shows the influence relationships between the variables and
elides the function nodes.
### Response:
def to_CAG(self):
""" Export to a Causal Analysis Graph (CAG) PyGraphviz AGraph object.
The CAG shows the influence relationships between the variables and
elides the function nodes."""
G = nx.DiGraph()
for (name, attrs) in self.nodes(data=True):
if attrs["type"] == "variable":
for pred_fn in self.predecessors(name):
if not any(
fn_type in pred_fn
for fn_type in ("condition", "decision")
):
for pred_var in self.predecessors(pred_fn):
G.add_node(
self.nodes[pred_var]["basename"],
**self.nodes[pred_var],
)
G.add_node(attrs["basename"], **attrs)
G.add_edge(
self.nodes[pred_var]["basename"],
attrs["basename"],
)
if attrs["is_loop_index"]:
G.add_edge(attrs["basename"], attrs["basename"])
return G |
def is_package(self, fullname):
"""Concrete implementation of InspectLoader.is_package by checking if
the path returned by get_filename has a filename of '__init__.py'."""
filename = os.path.split(self.get_filename(fullname))[1]
filename_base = filename.rsplit('.', 1)[0]
tail_name = fullname.rpartition('.')[2]
return filename_base == '__init__' and tail_name != '__init__' | Concrete implementation of InspectLoader.is_package by checking if
the path returned by get_filename has a filename of '__init__.py'. | Below is the the instruction that describes the task:
### Input:
Concrete implementation of InspectLoader.is_package by checking if
the path returned by get_filename has a filename of '__init__.py'.
### Response:
def is_package(self, fullname):
"""Concrete implementation of InspectLoader.is_package by checking if
the path returned by get_filename has a filename of '__init__.py'."""
filename = os.path.split(self.get_filename(fullname))[1]
filename_base = filename.rsplit('.', 1)[0]
tail_name = fullname.rpartition('.')[2]
return filename_base == '__init__' and tail_name != '__init__' |
def list_images(self, tag_values=None):
'''
a method to retrieve the list of images of account on AWS EC2
:param tag_values: [optional] list of tag values
:return: list of image AWS ids
'''
title = '%s.list_images' % self.__class__.__name__
# validate inputs
input_fields = {
'tag_values': tag_values
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# add tags to method arguments
kw_args = { 'Owners': [ self.iam.owner_id ] }
tag_text = ''
if tag_values:
kw_args = {
'Filters': [ { 'Name': 'tag-value', 'Values': tag_values } ]
}
from labpack.parsing.grammar import join_words
plural_value = ''
if len(tag_values) > 1:
plural_value = 's'
tag_text = ' with tag value%s %s' % (plural_value, join_words(tag_values))
# request image details from AWS
self.iam.printer('Querying AWS region %s for images%s.' % (self.iam.region_name, tag_text))
image_list = []
try:
response = self.connection.describe_images(**kw_args)
except:
raise AWSConnectionError(title)
response_list = response['Images']
# repeat request
if not response_list:
from time import sleep
from timeit import default_timer as timer
self.iam.printer('No images found initially. Checking again', flush=True)
state_timeout = 0
delay = 3
while not response_list and state_timeout < 12:
self.iam.printer('.', flush=True)
sleep(delay)
t3 = timer()
try:
response = self.connection.describe_images(**kw_args)
except:
raise AWSConnectionError(title)
response_list = response['Images']
t4 = timer()
state_timeout += 1
response_time = t4 - t3
if 3 - response_time > 0:
delay = 3 - response_time
else:
delay = 0
self.iam.printer(' done.')
# wait until all images are no longer pending
for image in response_list:
image_list.append(image['ImageId'])
# report outcome and return results
if image_list:
print_out = 'Found image'
if len(image_list) > 1:
print_out += 's'
from labpack.parsing.grammar import join_words
print_out += ' %s.' % join_words(image_list)
self.iam.printer(print_out)
else:
self.iam.printer('No images found.')
return image_list | a method to retrieve the list of images of account on AWS EC2
:param tag_values: [optional] list of tag values
:return: list of image AWS ids | Below is the the instruction that describes the task:
### Input:
a method to retrieve the list of images of account on AWS EC2
:param tag_values: [optional] list of tag values
:return: list of image AWS ids
### Response:
def list_images(self, tag_values=None):
'''
a method to retrieve the list of images of account on AWS EC2
:param tag_values: [optional] list of tag values
:return: list of image AWS ids
'''
title = '%s.list_images' % self.__class__.__name__
# validate inputs
input_fields = {
'tag_values': tag_values
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# add tags to method arguments
kw_args = { 'Owners': [ self.iam.owner_id ] }
tag_text = ''
if tag_values:
kw_args = {
'Filters': [ { 'Name': 'tag-value', 'Values': tag_values } ]
}
from labpack.parsing.grammar import join_words
plural_value = ''
if len(tag_values) > 1:
plural_value = 's'
tag_text = ' with tag value%s %s' % (plural_value, join_words(tag_values))
# request image details from AWS
self.iam.printer('Querying AWS region %s for images%s.' % (self.iam.region_name, tag_text))
image_list = []
try:
response = self.connection.describe_images(**kw_args)
except:
raise AWSConnectionError(title)
response_list = response['Images']
# repeat request
if not response_list:
from time import sleep
from timeit import default_timer as timer
self.iam.printer('No images found initially. Checking again', flush=True)
state_timeout = 0
delay = 3
while not response_list and state_timeout < 12:
self.iam.printer('.', flush=True)
sleep(delay)
t3 = timer()
try:
response = self.connection.describe_images(**kw_args)
except:
raise AWSConnectionError(title)
response_list = response['Images']
t4 = timer()
state_timeout += 1
response_time = t4 - t3
if 3 - response_time > 0:
delay = 3 - response_time
else:
delay = 0
self.iam.printer(' done.')
# wait until all images are no longer pending
for image in response_list:
image_list.append(image['ImageId'])
# report outcome and return results
if image_list:
print_out = 'Found image'
if len(image_list) > 1:
print_out += 's'
from labpack.parsing.grammar import join_words
print_out += ' %s.' % join_words(image_list)
self.iam.printer(print_out)
else:
self.iam.printer('No images found.')
return image_list |
def setCenter(self, loc):
""" Move this region so it is centered on ``loc`` """
offset = self.getCenter().getOffset(loc) # Calculate offset from current center
return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset | Move this region so it is centered on ``loc`` | Below is the the instruction that describes the task:
### Input:
Move this region so it is centered on ``loc``
### Response:
def setCenter(self, loc):
""" Move this region so it is centered on ``loc`` """
offset = self.getCenter().getOffset(loc) # Calculate offset from current center
return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset |
def _try_instantiate(self, ipopo, factory, component):
# type: (Any, str, str) -> None
"""
Tries to instantiate a component from the queue. Hides all exceptions.
:param ipopo: The iPOPO service
:param factory: Component factory
:param component: Component name
"""
try:
# Get component properties
with self.__lock:
properties = self.__queue[factory][component]
except KeyError:
# Component not in queue
return
else:
try:
# Try instantiation
ipopo.instantiate(factory, component, properties)
except TypeError:
# Unknown factory: try later
pass
except ValueError as ex:
# Already known component
_logger.error("Component already running: %s", ex)
except Exception as ex:
# Other error
_logger.exception("Error instantiating component: %s", ex) | Tries to instantiate a component from the queue. Hides all exceptions.
:param ipopo: The iPOPO service
:param factory: Component factory
:param component: Component name | Below is the the instruction that describes the task:
### Input:
Tries to instantiate a component from the queue. Hides all exceptions.
:param ipopo: The iPOPO service
:param factory: Component factory
:param component: Component name
### Response:
def _try_instantiate(self, ipopo, factory, component):
# type: (Any, str, str) -> None
"""
Tries to instantiate a component from the queue. Hides all exceptions.
:param ipopo: The iPOPO service
:param factory: Component factory
:param component: Component name
"""
try:
# Get component properties
with self.__lock:
properties = self.__queue[factory][component]
except KeyError:
# Component not in queue
return
else:
try:
# Try instantiation
ipopo.instantiate(factory, component, properties)
except TypeError:
# Unknown factory: try later
pass
except ValueError as ex:
# Already known component
_logger.error("Component already running: %s", ex)
except Exception as ex:
# Other error
_logger.exception("Error instantiating component: %s", ex) |
def make_order_and_cancel(api_svr_ip, api_svr_port, unlock_password, test_code, trade_env, acc_id):
"""
使用请先配置正确参数:
:param api_svr_ip: (string) ip
:param api_svr_port: (string) ip
:param unlock_password: (string) 交易解锁密码, 必需修改!
:param test_code: (string) 股票
:param trade_env: 参见 ft.TrdEnv的定义
:param acc_id: 交易子账号id
"""
if unlock_password == "":
raise Exception("请先配置交易解锁密码!")
quote_ctx = ft.OpenQuoteContext(host=api_svr_ip, port=api_svr_port) # 创建行情api
quote_ctx.subscribe(test_code, ft.SubType.ORDER_BOOK) # 定阅摆盘
# 创建交易api
is_hk_trade = 'HK.' in test_code
if is_hk_trade:
trade_ctx = ft.OpenHKTradeContext(host=api_svr_ip, port=api_svr_port)
else:
trade_ctx = ft.OpenUSTradeContext(host=api_svr_ip, port=api_svr_port)
# 每手股数
lot_size = 0
is_unlock_trade = False
is_fire_trade = False
while not is_fire_trade:
sleep(2)
# 解锁交易
if not is_unlock_trade and trade_env == ft.TrdEnv.REAL:
print("unlocking trade...")
ret_code, ret_data = trade_ctx.unlock_trade(unlock_password)
is_unlock_trade = (ret_code == ft.RET_OK)
if not is_unlock_trade:
print("请求交易解锁失败:{}".format(ret_data))
break
if lot_size == 0:
print("get lotsize...")
ret, data = quote_ctx.get_market_snapshot(test_code)
lot_size = data.iloc[0]['lot_size'] if ret == ft.RET_OK else 0
if ret != ft.RET_OK:
print("取不到每手信息,重试中: {}".format(data))
continue
elif lot_size <= 0:
raise BaseException("该股票每手信息错误,可能不支持交易 code ={}".format(test_code))
print("get order book...")
ret, data = quote_ctx.get_order_book(test_code) # 得到第十档数据
if ret != ft.RET_OK:
continue
# 计算交易价格
bid_order_arr = data['Bid']
if is_hk_trade:
if len(bid_order_arr) != 10:
continue
# 港股下单: 价格定为第十档
price, _, _ = bid_order_arr[9]
else:
if len(bid_order_arr) == 0:
continue
# 美股下单: 价格定为一档降10%
price, _, _ = bid_order_arr[0]
price = round(price * 0.9, 2)
qty = lot_size
# 价格和数量判断
if qty == 0 or price == 0.0:
continue
# 下单
order_id = 0
print("place order : price={} qty={} code={}".format(price, qty, test_code))
ret_code, ret_data = trade_ctx.place_order(price=price, qty=qty, code=test_code, trd_side=ft.TrdSide.BUY,
order_type=ft.OrderType.NORMAL, trd_env=trade_env, acc_id=acc_id)
is_fire_trade = True
print('下单ret={} data={}'.format(ret_code, ret_data))
if ret_code == ft.RET_OK:
row = ret_data.iloc[0]
order_id = row['order_id']
# 循环撤单
sleep(2)
if order_id:
while True:
ret_code, ret_data = trade_ctx.order_list_query(order_id=order_id, status_filter_list=[], code='',
start='', end='', trd_env=trade_env, acc_id=acc_id)
if ret_code != ft.RET_OK:
sleep(2)
continue
order_status = ret_data.iloc[0]['order_status']
if order_status in [ft.OrderStatus.SUBMIT_FAILED, ft.OrderStatus.TIMEOUT, ft.OrderStatus.FILLED_ALL,
ft.OrderStatus.FAILED, ft.OrderStatus.DELETED]:
break
print("cancel order...")
ret_code, ret_data = trade_ctx.modify_order(modify_order_op=ft.ModifyOrderOp.CANCEL, order_id=order_id,
price=price, qty=qty, adjust_limit=0, trd_env=trade_env, acc_id=acc_id)
print("撤单ret={} data={}".format(ret_code, ret_data))
if ret_code == ft.RET_OK:
break
else:
sleep(2)
# destroy object
quote_ctx.close()
trade_ctx.close() | 使用请先配置正确参数:
:param api_svr_ip: (string) ip
:param api_svr_port: (string) ip
:param unlock_password: (string) 交易解锁密码, 必需修改!
:param test_code: (string) 股票
:param trade_env: 参见 ft.TrdEnv的定义
:param acc_id: 交易子账号id | Below is the the instruction that describes the task:
### Input:
使用请先配置正确参数:
:param api_svr_ip: (string) ip
:param api_svr_port: (string) ip
:param unlock_password: (string) 交易解锁密码, 必需修改!
:param test_code: (string) 股票
:param trade_env: 参见 ft.TrdEnv的定义
:param acc_id: 交易子账号id
### Response:
def make_order_and_cancel(api_svr_ip, api_svr_port, unlock_password, test_code, trade_env, acc_id):
"""
使用请先配置正确参数:
:param api_svr_ip: (string) ip
:param api_svr_port: (string) ip
:param unlock_password: (string) 交易解锁密码, 必需修改!
:param test_code: (string) 股票
:param trade_env: 参见 ft.TrdEnv的定义
:param acc_id: 交易子账号id
"""
if unlock_password == "":
raise Exception("请先配置交易解锁密码!")
quote_ctx = ft.OpenQuoteContext(host=api_svr_ip, port=api_svr_port) # 创建行情api
quote_ctx.subscribe(test_code, ft.SubType.ORDER_BOOK) # 定阅摆盘
# 创建交易api
is_hk_trade = 'HK.' in test_code
if is_hk_trade:
trade_ctx = ft.OpenHKTradeContext(host=api_svr_ip, port=api_svr_port)
else:
trade_ctx = ft.OpenUSTradeContext(host=api_svr_ip, port=api_svr_port)
# 每手股数
lot_size = 0
is_unlock_trade = False
is_fire_trade = False
while not is_fire_trade:
sleep(2)
# 解锁交易
if not is_unlock_trade and trade_env == ft.TrdEnv.REAL:
print("unlocking trade...")
ret_code, ret_data = trade_ctx.unlock_trade(unlock_password)
is_unlock_trade = (ret_code == ft.RET_OK)
if not is_unlock_trade:
print("请求交易解锁失败:{}".format(ret_data))
break
if lot_size == 0:
print("get lotsize...")
ret, data = quote_ctx.get_market_snapshot(test_code)
lot_size = data.iloc[0]['lot_size'] if ret == ft.RET_OK else 0
if ret != ft.RET_OK:
print("取不到每手信息,重试中: {}".format(data))
continue
elif lot_size <= 0:
raise BaseException("该股票每手信息错误,可能不支持交易 code ={}".format(test_code))
print("get order book...")
ret, data = quote_ctx.get_order_book(test_code) # 得到第十档数据
if ret != ft.RET_OK:
continue
# 计算交易价格
bid_order_arr = data['Bid']
if is_hk_trade:
if len(bid_order_arr) != 10:
continue
# 港股下单: 价格定为第十档
price, _, _ = bid_order_arr[9]
else:
if len(bid_order_arr) == 0:
continue
# 美股下单: 价格定为一档降10%
price, _, _ = bid_order_arr[0]
price = round(price * 0.9, 2)
qty = lot_size
# 价格和数量判断
if qty == 0 or price == 0.0:
continue
# 下单
order_id = 0
print("place order : price={} qty={} code={}".format(price, qty, test_code))
ret_code, ret_data = trade_ctx.place_order(price=price, qty=qty, code=test_code, trd_side=ft.TrdSide.BUY,
order_type=ft.OrderType.NORMAL, trd_env=trade_env, acc_id=acc_id)
is_fire_trade = True
print('下单ret={} data={}'.format(ret_code, ret_data))
if ret_code == ft.RET_OK:
row = ret_data.iloc[0]
order_id = row['order_id']
# 循环撤单
sleep(2)
if order_id:
while True:
ret_code, ret_data = trade_ctx.order_list_query(order_id=order_id, status_filter_list=[], code='',
start='', end='', trd_env=trade_env, acc_id=acc_id)
if ret_code != ft.RET_OK:
sleep(2)
continue
order_status = ret_data.iloc[0]['order_status']
if order_status in [ft.OrderStatus.SUBMIT_FAILED, ft.OrderStatus.TIMEOUT, ft.OrderStatus.FILLED_ALL,
ft.OrderStatus.FAILED, ft.OrderStatus.DELETED]:
break
print("cancel order...")
ret_code, ret_data = trade_ctx.modify_order(modify_order_op=ft.ModifyOrderOp.CANCEL, order_id=order_id,
price=price, qty=qty, adjust_limit=0, trd_env=trade_env, acc_id=acc_id)
print("撤单ret={} data={}".format(ret_code, ret_data))
if ret_code == ft.RET_OK:
break
else:
sleep(2)
# destroy object
quote_ctx.close()
trade_ctx.close() |
def _print_help(self):
"""Custom help message to group commands by functionality."""
msg = """Commands (type help <command> for details)
CLI: help history exit quit
Session, General: set load save reset
Session, Access Control: allowaccess denyaccess clearaccess
Session, Replication: allowrep denyrep preferrep blockrep
removerep numberrep clearrep
Read Operations: get meta list log resolve
Write Operations: update create package archive
updateaccess updatereplication
Utilities: listformats listnodes search ping
Write Operation Queue: queue run edit clearqueue
Command History: Arrow Up, Arrow Down
Command Editing: Arrow Left, Arrow Right, Delete
"""
if platform.system() != "Windows":
msg += """Command Completion: Single Tab: Complete unique command
Double Tab: Display possible commands
"""
d1_cli.impl.util.print_info(msg) | Custom help message to group commands by functionality. | Below is the the instruction that describes the task:
### Input:
Custom help message to group commands by functionality.
### Response:
def _print_help(self):
"""Custom help message to group commands by functionality."""
msg = """Commands (type help <command> for details)
CLI: help history exit quit
Session, General: set load save reset
Session, Access Control: allowaccess denyaccess clearaccess
Session, Replication: allowrep denyrep preferrep blockrep
removerep numberrep clearrep
Read Operations: get meta list log resolve
Write Operations: update create package archive
updateaccess updatereplication
Utilities: listformats listnodes search ping
Write Operation Queue: queue run edit clearqueue
Command History: Arrow Up, Arrow Down
Command Editing: Arrow Left, Arrow Right, Delete
"""
if platform.system() != "Windows":
msg += """Command Completion: Single Tab: Complete unique command
Double Tab: Display possible commands
"""
d1_cli.impl.util.print_info(msg) |
def check_error(self):
"""Check if the async response is an error.
Take care to call `is_done` before calling `error`. Note that the error
messages are always encoded as strings.
:raises CloudUnhandledError: When not checking `is_done` first
:return: status_code, error_msg, payload
:rtype: tuple
"""
if not self.is_done:
raise CloudUnhandledError("Need to check if request is done, before checking for error")
response = self.db[self.async_id]
error_msg = response["error"]
status_code = int(response["status_code"])
payload = response["payload"]
return status_code, error_msg, payload | Check if the async response is an error.
Take care to call `is_done` before calling `error`. Note that the error
messages are always encoded as strings.
:raises CloudUnhandledError: When not checking `is_done` first
:return: status_code, error_msg, payload
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Check if the async response is an error.
Take care to call `is_done` before calling `error`. Note that the error
messages are always encoded as strings.
:raises CloudUnhandledError: When not checking `is_done` first
:return: status_code, error_msg, payload
:rtype: tuple
### Response:
def check_error(self):
"""Check if the async response is an error.
Take care to call `is_done` before calling `error`. Note that the error
messages are always encoded as strings.
:raises CloudUnhandledError: When not checking `is_done` first
:return: status_code, error_msg, payload
:rtype: tuple
"""
if not self.is_done:
raise CloudUnhandledError("Need to check if request is done, before checking for error")
response = self.db[self.async_id]
error_msg = response["error"]
status_code = int(response["status_code"])
payload = response["payload"]
return status_code, error_msg, payload |
def band_names(self):
"""Raster affine."""
if self._band_names is None:
self._populate_from_rasterio_object(read_image=False)
return self._band_names | Raster affine. | Below is the the instruction that describes the task:
### Input:
Raster affine.
### Response:
def band_names(self):
"""Raster affine."""
if self._band_names is None:
self._populate_from_rasterio_object(read_image=False)
return self._band_names |
def visualize_explanation(explanation, label=None):
"""
Given the output of the explain() endpoint, produces a terminal visual that plots response strength over a sequence
"""
if not sys.version_info[:2] >= (3, 5):
raise IndicoError("Python >= 3.5+ is required for explanation visualization")
try:
from colr import Colr as C
except ImportError:
raise IndicoError("Package colr >= 0.8.1 is required for explanation visualization.")
cursor = 0
text = explanation['text']
for token in explanation.get('token_predictions'):
try:
class_confidence = token.get('prediction')[label]
except KeyError:
raise IndicoError("Invalid label: {}".format(label))
if class_confidence > 0.5:
fg_color = (255, 255, 255)
else:
fg_color = (0, 0, 0)
rg_value = 255 - int(class_confidence * 255)
token_end = token.get('token').get('end')
token_text = text[cursor:token_end]
cursor = token_end
sys.stdout.write(
str(C().b_rgb(
rg_value, rg_value, 255
).rgb(
fg_color[0], fg_color[1], fg_color[2], token_text
))
)
sys.stdout.write("\n")
sys.stdout.flush() | Given the output of the explain() endpoint, produces a terminal visual that plots response strength over a sequence | Below is the the instruction that describes the task:
### Input:
Given the output of the explain() endpoint, produces a terminal visual that plots response strength over a sequence
### Response:
def visualize_explanation(explanation, label=None):
"""
Given the output of the explain() endpoint, produces a terminal visual that plots response strength over a sequence
"""
if not sys.version_info[:2] >= (3, 5):
raise IndicoError("Python >= 3.5+ is required for explanation visualization")
try:
from colr import Colr as C
except ImportError:
raise IndicoError("Package colr >= 0.8.1 is required for explanation visualization.")
cursor = 0
text = explanation['text']
for token in explanation.get('token_predictions'):
try:
class_confidence = token.get('prediction')[label]
except KeyError:
raise IndicoError("Invalid label: {}".format(label))
if class_confidence > 0.5:
fg_color = (255, 255, 255)
else:
fg_color = (0, 0, 0)
rg_value = 255 - int(class_confidence * 255)
token_end = token.get('token').get('end')
token_text = text[cursor:token_end]
cursor = token_end
sys.stdout.write(
str(C().b_rgb(
rg_value, rg_value, 255
).rgb(
fg_color[0], fg_color[1], fg_color[2], token_text
))
)
sys.stdout.write("\n")
sys.stdout.flush() |
def description(self):
"""Returns the full docstring information for the element suggested
as a completion."""
result = ""
if isinstance(self._element, ValueElement):
if self._element.kind is not None:
result = "{}({}) | {}".format(self._element.dtype, self._element.kind,
self._element.summary)
else:
result = "{} | {}".format(self._element.dtype,
self._element.summary)
elif isinstance(self._element, Executable):
result = "({})".format(self._element.parameters_as_string())
elif isinstance(self._element, str):
result = "Intrinsic Fortran Symbol"
elif isinstance(self._element, TypeExecutable):
result = self._type_description()
#Clean off any line breaks from the XML and excessive whitespace.
cleaned = re.sub("\s+", " ", result.replace("\n", " "))
return cleaned | Returns the full docstring information for the element suggested
as a completion. | Below is the the instruction that describes the task:
### Input:
Returns the full docstring information for the element suggested
as a completion.
### Response:
def description(self):
"""Returns the full docstring information for the element suggested
as a completion."""
result = ""
if isinstance(self._element, ValueElement):
if self._element.kind is not None:
result = "{}({}) | {}".format(self._element.dtype, self._element.kind,
self._element.summary)
else:
result = "{} | {}".format(self._element.dtype,
self._element.summary)
elif isinstance(self._element, Executable):
result = "({})".format(self._element.parameters_as_string())
elif isinstance(self._element, str):
result = "Intrinsic Fortran Symbol"
elif isinstance(self._element, TypeExecutable):
result = self._type_description()
#Clean off any line breaks from the XML and excessive whitespace.
cleaned = re.sub("\s+", " ", result.replace("\n", " "))
return cleaned |
def process_dimensions(kdims, vdims):
"""Converts kdims and vdims to Dimension objects.
Args:
kdims: List or single key dimension(s) specified as strings,
tuples dicts or Dimension objects.
vdims: List or single value dimension(s) specified as strings,
tuples dicts or Dimension objects.
Returns:
Dictionary containing kdims and vdims converted to Dimension
objects:
{'kdims': [Dimension('x')], 'vdims': [Dimension('y')]
"""
dimensions = {}
for group, dims in [('kdims', kdims), ('vdims', vdims)]:
if dims is None:
continue
elif isinstance(dims, (tuple, basestring, Dimension, dict)):
dims = [dims]
elif not isinstance(dims, list):
raise ValueError("%s argument expects a Dimension or list of dimensions, "
"specified as tuples, strings, dictionaries or Dimension "
"instances, not a %s type. Ensure you passed the data as the "
"first argument." % (group, type(dims).__name__))
for dim in dims:
if not isinstance(dim, (tuple, basestring, Dimension, dict)):
raise ValueError('Dimensions must be defined as a tuple, '
'string, dictionary or Dimension instance, '
'found a %s type.' % type(dim).__name__)
dimensions[group] = [asdim(d) for d in dims]
return dimensions | Converts kdims and vdims to Dimension objects.
Args:
kdims: List or single key dimension(s) specified as strings,
tuples dicts or Dimension objects.
vdims: List or single value dimension(s) specified as strings,
tuples dicts or Dimension objects.
Returns:
Dictionary containing kdims and vdims converted to Dimension
objects:
{'kdims': [Dimension('x')], 'vdims': [Dimension('y')] | Below is the the instruction that describes the task:
### Input:
Converts kdims and vdims to Dimension objects.
Args:
kdims: List or single key dimension(s) specified as strings,
tuples dicts or Dimension objects.
vdims: List or single value dimension(s) specified as strings,
tuples dicts or Dimension objects.
Returns:
Dictionary containing kdims and vdims converted to Dimension
objects:
{'kdims': [Dimension('x')], 'vdims': [Dimension('y')]
### Response:
def process_dimensions(kdims, vdims):
"""Converts kdims and vdims to Dimension objects.
Args:
kdims: List or single key dimension(s) specified as strings,
tuples dicts or Dimension objects.
vdims: List or single value dimension(s) specified as strings,
tuples dicts or Dimension objects.
Returns:
Dictionary containing kdims and vdims converted to Dimension
objects:
{'kdims': [Dimension('x')], 'vdims': [Dimension('y')]
"""
dimensions = {}
for group, dims in [('kdims', kdims), ('vdims', vdims)]:
if dims is None:
continue
elif isinstance(dims, (tuple, basestring, Dimension, dict)):
dims = [dims]
elif not isinstance(dims, list):
raise ValueError("%s argument expects a Dimension or list of dimensions, "
"specified as tuples, strings, dictionaries or Dimension "
"instances, not a %s type. Ensure you passed the data as the "
"first argument." % (group, type(dims).__name__))
for dim in dims:
if not isinstance(dim, (tuple, basestring, Dimension, dict)):
raise ValueError('Dimensions must be defined as a tuple, '
'string, dictionary or Dimension instance, '
'found a %s type.' % type(dim).__name__)
dimensions[group] = [asdim(d) for d in dims]
return dimensions |
def get_root(self):
"""Return the furthest ancestor of this node."""
if self.is_root_node():
return self
return self.get_ancestors().order_by(
"-%s__depth" % self._closure_parentref()
)[0] | Return the furthest ancestor of this node. | Below is the the instruction that describes the task:
### Input:
Return the furthest ancestor of this node.
### Response:
def get_root(self):
"""Return the furthest ancestor of this node."""
if self.is_root_node():
return self
return self.get_ancestors().order_by(
"-%s__depth" % self._closure_parentref()
)[0] |
def merge(self, imgs):
"""Merge image channels.
Parameters
----------
imgs : `list` of `PIL.Image.Image`
Returns
-------
`PIL.Image.Image`
Raises
------
ValueError
If image channel list is empty.
"""
if not imgs:
raise ValueError('empty channel list')
if len(imgs) == 1:
return imgs[0]
return Image.merge(self.mode, imgs) | Merge image channels.
Parameters
----------
imgs : `list` of `PIL.Image.Image`
Returns
-------
`PIL.Image.Image`
Raises
------
ValueError
If image channel list is empty. | Below is the the instruction that describes the task:
### Input:
Merge image channels.
Parameters
----------
imgs : `list` of `PIL.Image.Image`
Returns
-------
`PIL.Image.Image`
Raises
------
ValueError
If image channel list is empty.
### Response:
def merge(self, imgs):
"""Merge image channels.
Parameters
----------
imgs : `list` of `PIL.Image.Image`
Returns
-------
`PIL.Image.Image`
Raises
------
ValueError
If image channel list is empty.
"""
if not imgs:
raise ValueError('empty channel list')
if len(imgs) == 1:
return imgs[0]
return Image.merge(self.mode, imgs) |
def density_profile(self,ixaxis='mass',ifig=None,colour=None,label=None,fname=None):
'''
Plot density as a function of either mass coordiate or radius.
Parameters
----------
ixaxis : string
'mass' or 'radius'
The default value is 'mass'
ifig : integer or string
The figure label
The default value is None
colour : string
What colour the line should be
The default value is None
label : string
Label for the line
The default value is None
fname : integer
What cycle to plot from (if SE output)
The default value is None
'''
pT=self._classTest()
# Class-specific things:
if pT is 'mesa_profile':
x = self.get(ixaxis)
if ixaxis is 'radius':
x = x*ast.rsun_cm
y = self.get('logRho')
elif pT is 'se':
if fname is None:
raise IOError("Please provide the cycle number fname")
x = self.se.get(fname,ixaxis)
y = np.log10(self.se.get(fname,'rho'))
else:
raise IOError("Sorry. the density_profile method is not available \
for this class")
# Plot-specific things:
if ixaxis is 'radius':
x = np.log10(x)
xlab='$\log_{10}(r\,/\,{\\rm cm})$'
else:
xlab='${\\rm Mass}\,/\,M_\odot$'
if ifig is not None:
pl.figure(ifig)
if label is not None:
if colour is not None:
pl.plot(x,y,color=colour,label=label)
else:
pl.plot(x,y,label=label)
pl.legend(loc='best').draw_frame(False)
else:
if colour is not None:
pl.plot(x,y,color=colour)
else:
pl.plot(x,y)
pl.xlabel(xlab)
pl.ylabel('$\log_{10}(\\rho\,/\,{\\rm g\,cm}^{-3})$') | Plot density as a function of either mass coordiate or radius.
Parameters
----------
ixaxis : string
'mass' or 'radius'
The default value is 'mass'
ifig : integer or string
The figure label
The default value is None
colour : string
What colour the line should be
The default value is None
label : string
Label for the line
The default value is None
fname : integer
What cycle to plot from (if SE output)
The default value is None | Below is the the instruction that describes the task:
### Input:
Plot density as a function of either mass coordiate or radius.
Parameters
----------
ixaxis : string
'mass' or 'radius'
The default value is 'mass'
ifig : integer or string
The figure label
The default value is None
colour : string
What colour the line should be
The default value is None
label : string
Label for the line
The default value is None
fname : integer
What cycle to plot from (if SE output)
The default value is None
### Response:
def density_profile(self,ixaxis='mass',ifig=None,colour=None,label=None,fname=None):
'''
Plot density as a function of either mass coordiate or radius.
Parameters
----------
ixaxis : string
'mass' or 'radius'
The default value is 'mass'
ifig : integer or string
The figure label
The default value is None
colour : string
What colour the line should be
The default value is None
label : string
Label for the line
The default value is None
fname : integer
What cycle to plot from (if SE output)
The default value is None
'''
pT=self._classTest()
# Class-specific things:
if pT is 'mesa_profile':
x = self.get(ixaxis)
if ixaxis is 'radius':
x = x*ast.rsun_cm
y = self.get('logRho')
elif pT is 'se':
if fname is None:
raise IOError("Please provide the cycle number fname")
x = self.se.get(fname,ixaxis)
y = np.log10(self.se.get(fname,'rho'))
else:
raise IOError("Sorry. the density_profile method is not available \
for this class")
# Plot-specific things:
if ixaxis is 'radius':
x = np.log10(x)
xlab='$\log_{10}(r\,/\,{\\rm cm})$'
else:
xlab='${\\rm Mass}\,/\,M_\odot$'
if ifig is not None:
pl.figure(ifig)
if label is not None:
if colour is not None:
pl.plot(x,y,color=colour,label=label)
else:
pl.plot(x,y,label=label)
pl.legend(loc='best').draw_frame(False)
else:
if colour is not None:
pl.plot(x,y,color=colour)
else:
pl.plot(x,y)
pl.xlabel(xlab)
pl.ylabel('$\log_{10}(\\rho\,/\,{\\rm g\,cm}^{-3})$') |
def circle_radii(params, xedge, yedge):
"""Compute the distance to the center from cartesian coordinates
This method is used for fitting a circle to a set of contour
points.
Parameters
----------
params: lmfit.Parameters
Must contain the keys:
- "cx": origin of x coordinate [px]
- "cy": origin of y coordinate [px]
xedge: 1D np.ndarray
Edge coordinates x [px]
yedge: 1D np.ndarray
Edge coordinates y [px]
Returns
-------
radii: 1D np.ndarray
Radii corresponding to edge coordinates relative to origin
"""
cx = params["cx"].value
cy = params["cy"].value
radii = np.sqrt((cx - xedge)**2 + (cy - yedge)**2)
return radii | Compute the distance to the center from cartesian coordinates
This method is used for fitting a circle to a set of contour
points.
Parameters
----------
params: lmfit.Parameters
Must contain the keys:
- "cx": origin of x coordinate [px]
- "cy": origin of y coordinate [px]
xedge: 1D np.ndarray
Edge coordinates x [px]
yedge: 1D np.ndarray
Edge coordinates y [px]
Returns
-------
radii: 1D np.ndarray
Radii corresponding to edge coordinates relative to origin | Below is the the instruction that describes the task:
### Input:
Compute the distance to the center from cartesian coordinates
This method is used for fitting a circle to a set of contour
points.
Parameters
----------
params: lmfit.Parameters
Must contain the keys:
- "cx": origin of x coordinate [px]
- "cy": origin of y coordinate [px]
xedge: 1D np.ndarray
Edge coordinates x [px]
yedge: 1D np.ndarray
Edge coordinates y [px]
Returns
-------
radii: 1D np.ndarray
Radii corresponding to edge coordinates relative to origin
### Response:
def circle_radii(params, xedge, yedge):
"""Compute the distance to the center from cartesian coordinates
This method is used for fitting a circle to a set of contour
points.
Parameters
----------
params: lmfit.Parameters
Must contain the keys:
- "cx": origin of x coordinate [px]
- "cy": origin of y coordinate [px]
xedge: 1D np.ndarray
Edge coordinates x [px]
yedge: 1D np.ndarray
Edge coordinates y [px]
Returns
-------
radii: 1D np.ndarray
Radii corresponding to edge coordinates relative to origin
"""
cx = params["cx"].value
cy = params["cy"].value
radii = np.sqrt((cx - xedge)**2 + (cy - yedge)**2)
return radii |
def convert_all(self):
'''Convert all links in URL table.'''
for url_record in self._url_table.get_all():
if url_record.status != Status.done:
continue
self.convert_by_record(url_record) | Convert all links in URL table. | Below is the the instruction that describes the task:
### Input:
Convert all links in URL table.
### Response:
def convert_all(self):
'''Convert all links in URL table.'''
for url_record in self._url_table.get_all():
if url_record.status != Status.done:
continue
self.convert_by_record(url_record) |
def missing_pids(self):
"""Filter persistent identifiers."""
missing = []
for p in self.pids:
try:
PersistentIdentifier.get(p.pid_type, p.pid_value)
except PIDDoesNotExistError:
missing.append(p)
return missing | Filter persistent identifiers. | Below is the the instruction that describes the task:
### Input:
Filter persistent identifiers.
### Response:
def missing_pids(self):
"""Filter persistent identifiers."""
missing = []
for p in self.pids:
try:
PersistentIdentifier.get(p.pid_type, p.pid_value)
except PIDDoesNotExistError:
missing.append(p)
return missing |
def which(software, strip_newline=True):
'''get_install will return the path to where an executable is installed.
'''
if software is None:
software = "singularity"
cmd = ['which', software ]
try:
result = run_command(cmd)
if strip_newline is True:
result['message'] = result['message'].strip('\n')
return result
except: # FileNotFoundError
return None | get_install will return the path to where an executable is installed. | Below is the the instruction that describes the task:
### Input:
get_install will return the path to where an executable is installed.
### Response:
def which(software, strip_newline=True):
'''get_install will return the path to where an executable is installed.
'''
if software is None:
software = "singularity"
cmd = ['which', software ]
try:
result = run_command(cmd)
if strip_newline is True:
result['message'] = result['message'].strip('\n')
return result
except: # FileNotFoundError
return None |
def migrate_keys(self, host, port, keys, dest_db, timeout, *,
copy=False, replace=False):
"""Atomically transfer keys from one Redis instance to another one.
Keys argument must be list/tuple of keys to migrate.
"""
if not isinstance(host, str):
raise TypeError("host argument must be str")
if not isinstance(timeout, int):
raise TypeError("timeout argument must be int")
if not isinstance(dest_db, int):
raise TypeError("dest_db argument must be int")
if not isinstance(keys, (list, tuple)):
raise TypeError("keys argument must be list or tuple")
if not host:
raise ValueError("Got empty host")
if dest_db < 0:
raise ValueError("dest_db must be greater equal 0")
if timeout < 0:
raise ValueError("timeout must be greater equal 0")
if not keys:
raise ValueError("keys must not be empty")
flags = []
if copy:
flags.append(b'COPY')
if replace:
flags.append(b'REPLACE')
flags.append(b'KEYS')
flags.extend(keys)
fut = self.execute(b'MIGRATE', host, port,
"", dest_db, timeout, *flags)
return wait_ok(fut) | Atomically transfer keys from one Redis instance to another one.
Keys argument must be list/tuple of keys to migrate. | Below is the the instruction that describes the task:
### Input:
Atomically transfer keys from one Redis instance to another one.
Keys argument must be list/tuple of keys to migrate.
### Response:
def migrate_keys(self, host, port, keys, dest_db, timeout, *,
copy=False, replace=False):
"""Atomically transfer keys from one Redis instance to another one.
Keys argument must be list/tuple of keys to migrate.
"""
if not isinstance(host, str):
raise TypeError("host argument must be str")
if not isinstance(timeout, int):
raise TypeError("timeout argument must be int")
if not isinstance(dest_db, int):
raise TypeError("dest_db argument must be int")
if not isinstance(keys, (list, tuple)):
raise TypeError("keys argument must be list or tuple")
if not host:
raise ValueError("Got empty host")
if dest_db < 0:
raise ValueError("dest_db must be greater equal 0")
if timeout < 0:
raise ValueError("timeout must be greater equal 0")
if not keys:
raise ValueError("keys must not be empty")
flags = []
if copy:
flags.append(b'COPY')
if replace:
flags.append(b'REPLACE')
flags.append(b'KEYS')
flags.extend(keys)
fut = self.execute(b'MIGRATE', host, port,
"", dest_db, timeout, *flags)
return wait_ok(fut) |
def is_admin(controller, client, actor):
"""Used to determine whether someone issuing a command is an admin.
By default, checks to see if there's a line of the type nick=host that
matches the command's actor in the [admins] section of the config file,
or a key that matches the entire mask (e.g. "foo@bar" or "foo@bar=1").
"""
config = controller.config
if not config.has_section("admins"):
logging.debug("Ignoring is_admin check - no [admins] config found.")
return False
for key,val in config.items("admins"):
if actor == User(key):
logging.debug("is_admin: %r matches admin %r", actor, key)
return True
if actor.nick.lower() == key.lower() and actor.host.lower() == val.lower():
logging.debug("is_admin: %r matches admin %r=%r", actor, key, val)
return True
logging.debug("is_admin: %r is not an admin.", actor)
return False | Used to determine whether someone issuing a command is an admin.
By default, checks to see if there's a line of the type nick=host that
matches the command's actor in the [admins] section of the config file,
or a key that matches the entire mask (e.g. "foo@bar" or "foo@bar=1"). | Below is the the instruction that describes the task:
### Input:
Used to determine whether someone issuing a command is an admin.
By default, checks to see if there's a line of the type nick=host that
matches the command's actor in the [admins] section of the config file,
or a key that matches the entire mask (e.g. "foo@bar" or "foo@bar=1").
### Response:
def is_admin(controller, client, actor):
"""Used to determine whether someone issuing a command is an admin.
By default, checks to see if there's a line of the type nick=host that
matches the command's actor in the [admins] section of the config file,
or a key that matches the entire mask (e.g. "foo@bar" or "foo@bar=1").
"""
config = controller.config
if not config.has_section("admins"):
logging.debug("Ignoring is_admin check - no [admins] config found.")
return False
for key,val in config.items("admins"):
if actor == User(key):
logging.debug("is_admin: %r matches admin %r", actor, key)
return True
if actor.nick.lower() == key.lower() and actor.host.lower() == val.lower():
logging.debug("is_admin: %r matches admin %r=%r", actor, key, val)
return True
logging.debug("is_admin: %r is not an admin.", actor)
return False |
def get(self, username):
"""
If using the remember option and KitsuAuth is storing your tokens, this function will retrieve one.
:param username: The username whose token we are retrieving
:return: A token, NotFound or NotSaving error
"""
if not self.remember:
raise NotSaving
if username not in self.token_storage:
raise UserNotFound
if self.token_storage[username]['expiration'] < time.time():
new_token = self.refresh(self.token_storage[username]['refresh'])
self.token_storage[username]['token'] = new_token[0]
self.token_storage[username]['expiration'] = new_token[1]
return new_token[0]
else:
return self.token_storage[username]['token'] | If using the remember option and KitsuAuth is storing your tokens, this function will retrieve one.
:param username: The username whose token we are retrieving
:return: A token, NotFound or NotSaving error | Below is the the instruction that describes the task:
### Input:
If using the remember option and KitsuAuth is storing your tokens, this function will retrieve one.
:param username: The username whose token we are retrieving
:return: A token, NotFound or NotSaving error
### Response:
def get(self, username):
"""
If using the remember option and KitsuAuth is storing your tokens, this function will retrieve one.
:param username: The username whose token we are retrieving
:return: A token, NotFound or NotSaving error
"""
if not self.remember:
raise NotSaving
if username not in self.token_storage:
raise UserNotFound
if self.token_storage[username]['expiration'] < time.time():
new_token = self.refresh(self.token_storage[username]['refresh'])
self.token_storage[username]['token'] = new_token[0]
self.token_storage[username]['expiration'] = new_token[1]
return new_token[0]
else:
return self.token_storage[username]['token'] |
def new(cls, pn, comment="", email=""):
"""
Create a new User ID or photo.
:param pn: User ID name, or photo. If this is a ``bytearray``, it will be loaded as a photo.
Otherwise, it will be used as the name field for a User ID.
:type pn: ``bytearray``, ``str``, ``unicode``
:param comment: The comment field for a User ID. Ignored if this is a photo.
:type comment: ``str``, ``unicode``
:param email: The email address field for a User ID. Ignored if this is a photo.
:type email: ``str``, ``unicode``
:returns: :py:obj:`PGPUID`
"""
uid = PGPUID()
if isinstance(pn, bytearray):
uid._uid = UserAttribute()
uid._uid.image.image = pn
uid._uid.image.iencoding = ImageEncoding.encodingof(pn)
uid._uid.update_hlen()
else:
uid._uid = UserID()
uid._uid.name = pn
uid._uid.comment = comment
uid._uid.email = email
uid._uid.update_hlen()
return uid | Create a new User ID or photo.
:param pn: User ID name, or photo. If this is a ``bytearray``, it will be loaded as a photo.
Otherwise, it will be used as the name field for a User ID.
:type pn: ``bytearray``, ``str``, ``unicode``
:param comment: The comment field for a User ID. Ignored if this is a photo.
:type comment: ``str``, ``unicode``
:param email: The email address field for a User ID. Ignored if this is a photo.
:type email: ``str``, ``unicode``
:returns: :py:obj:`PGPUID` | Below is the the instruction that describes the task:
### Input:
Create a new User ID or photo.
:param pn: User ID name, or photo. If this is a ``bytearray``, it will be loaded as a photo.
Otherwise, it will be used as the name field for a User ID.
:type pn: ``bytearray``, ``str``, ``unicode``
:param comment: The comment field for a User ID. Ignored if this is a photo.
:type comment: ``str``, ``unicode``
:param email: The email address field for a User ID. Ignored if this is a photo.
:type email: ``str``, ``unicode``
:returns: :py:obj:`PGPUID`
### Response:
def new(cls, pn, comment="", email=""):
"""
Create a new User ID or photo.
:param pn: User ID name, or photo. If this is a ``bytearray``, it will be loaded as a photo.
Otherwise, it will be used as the name field for a User ID.
:type pn: ``bytearray``, ``str``, ``unicode``
:param comment: The comment field for a User ID. Ignored if this is a photo.
:type comment: ``str``, ``unicode``
:param email: The email address field for a User ID. Ignored if this is a photo.
:type email: ``str``, ``unicode``
:returns: :py:obj:`PGPUID`
"""
uid = PGPUID()
if isinstance(pn, bytearray):
uid._uid = UserAttribute()
uid._uid.image.image = pn
uid._uid.image.iencoding = ImageEncoding.encodingof(pn)
uid._uid.update_hlen()
else:
uid._uid = UserID()
uid._uid.name = pn
uid._uid.comment = comment
uid._uid.email = email
uid._uid.update_hlen()
return uid |
def ExpandPath(path, opts=None):
"""Applies all expansion mechanisms to the given path.
Args:
path: A path to expand.
opts: A `PathOpts` object.
Yields:
All paths possible to obtain from a given path by performing expansions.
"""
precondition.AssertType(path, Text)
for grouped_path in ExpandGroups(path):
for globbed_path in ExpandGlobs(grouped_path, opts):
yield globbed_path | Applies all expansion mechanisms to the given path.
Args:
path: A path to expand.
opts: A `PathOpts` object.
Yields:
All paths possible to obtain from a given path by performing expansions. | Below is the the instruction that describes the task:
### Input:
Applies all expansion mechanisms to the given path.
Args:
path: A path to expand.
opts: A `PathOpts` object.
Yields:
All paths possible to obtain from a given path by performing expansions.
### Response:
def ExpandPath(path, opts=None):
"""Applies all expansion mechanisms to the given path.
Args:
path: A path to expand.
opts: A `PathOpts` object.
Yields:
All paths possible to obtain from a given path by performing expansions.
"""
precondition.AssertType(path, Text)
for grouped_path in ExpandGroups(path):
for globbed_path in ExpandGlobs(grouped_path, opts):
yield globbed_path |
def _set_ca(self, v, load=False):
"""
Setter method for ca, mapped from YANG variable /rbridge_id/crypto/ca (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ca is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ca() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("trustpoint",ca.ca, yang_name="ca", rest_name="ca", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='trustpoint', extensions={u'tailf-common': {u'info': u'Configure TrustpointCA', u'cli-suppress-list-no': None, u'callpoint': u'crypto_ca_cp', u'cli-full-command': None}}), is_container='list', yang_name="ca", rest_name="ca", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure TrustpointCA', u'cli-suppress-list-no': None, u'callpoint': u'crypto_ca_cp', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-crypto', defining_module='brocade-crypto', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ca must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("trustpoint",ca.ca, yang_name="ca", rest_name="ca", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='trustpoint', extensions={u'tailf-common': {u'info': u'Configure TrustpointCA', u'cli-suppress-list-no': None, u'callpoint': u'crypto_ca_cp', u'cli-full-command': None}}), is_container='list', yang_name="ca", rest_name="ca", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure TrustpointCA', u'cli-suppress-list-no': None, u'callpoint': u'crypto_ca_cp', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-crypto', defining_module='brocade-crypto', yang_type='list', is_config=True)""",
})
self.__ca = t
if hasattr(self, '_set'):
self._set() | Setter method for ca, mapped from YANG variable /rbridge_id/crypto/ca (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ca is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ca() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for ca, mapped from YANG variable /rbridge_id/crypto/ca (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ca is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ca() directly.
### Response:
def _set_ca(self, v, load=False):
"""
Setter method for ca, mapped from YANG variable /rbridge_id/crypto/ca (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ca is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ca() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("trustpoint",ca.ca, yang_name="ca", rest_name="ca", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='trustpoint', extensions={u'tailf-common': {u'info': u'Configure TrustpointCA', u'cli-suppress-list-no': None, u'callpoint': u'crypto_ca_cp', u'cli-full-command': None}}), is_container='list', yang_name="ca", rest_name="ca", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure TrustpointCA', u'cli-suppress-list-no': None, u'callpoint': u'crypto_ca_cp', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-crypto', defining_module='brocade-crypto', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ca must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("trustpoint",ca.ca, yang_name="ca", rest_name="ca", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='trustpoint', extensions={u'tailf-common': {u'info': u'Configure TrustpointCA', u'cli-suppress-list-no': None, u'callpoint': u'crypto_ca_cp', u'cli-full-command': None}}), is_container='list', yang_name="ca", rest_name="ca", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure TrustpointCA', u'cli-suppress-list-no': None, u'callpoint': u'crypto_ca_cp', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-crypto', defining_module='brocade-crypto', yang_type='list', is_config=True)""",
})
self.__ca = t
if hasattr(self, '_set'):
self._set() |
def generate(variables, templates_path, main_template):
"""
:Parameters:
variables : dict
Template parameters, passed through.
templates_path : str
Root directory for transclusions.
main_template : str
Contents of the main template.
Returns the rendered output.
"""
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(templates_path),
lstrip_blocks=True,
trim_blocks=True
)
def norm_alg_filename(alg_name):
if alg_name in variables['globals']['algorithm_filename_parts']:
return variables['globals']['algorithm_filename_parts'][alg_name]
else:
raise KeyError("{0} not found in globals.algorithm_filename_parts"
.format(alg_name))
env.globals.update(norm_alg_filename=norm_alg_filename)
template = env.from_string(main_template)
return template.render(variables) + "\n" | :Parameters:
variables : dict
Template parameters, passed through.
templates_path : str
Root directory for transclusions.
main_template : str
Contents of the main template.
Returns the rendered output. | Below is the the instruction that describes the task:
### Input:
:Parameters:
variables : dict
Template parameters, passed through.
templates_path : str
Root directory for transclusions.
main_template : str
Contents of the main template.
Returns the rendered output.
### Response:
def generate(variables, templates_path, main_template):
"""
:Parameters:
variables : dict
Template parameters, passed through.
templates_path : str
Root directory for transclusions.
main_template : str
Contents of the main template.
Returns the rendered output.
"""
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(templates_path),
lstrip_blocks=True,
trim_blocks=True
)
def norm_alg_filename(alg_name):
if alg_name in variables['globals']['algorithm_filename_parts']:
return variables['globals']['algorithm_filename_parts'][alg_name]
else:
raise KeyError("{0} not found in globals.algorithm_filename_parts"
.format(alg_name))
env.globals.update(norm_alg_filename=norm_alg_filename)
template = env.from_string(main_template)
return template.render(variables) + "\n" |
def parameters_from_model(parameters_model):
"""
Get the tool parameters model from dictionaries
:param parameters_model: The parameters as a mongoengine model
:return: The tool parameters as a dictionary
"""
parameters = {}
for p in parameters_model:
if p.is_function:
code, defaults, closure = pickle.loads(p.value)
parameters[p.key] = func_load(code, defaults, closure, globs=globals())
elif p.is_set:
parameters[p.key] = set(p.value)
else:
parameters[p.key] = p.value
return parameters | Get the tool parameters model from dictionaries
:param parameters_model: The parameters as a mongoengine model
:return: The tool parameters as a dictionary | Below is the the instruction that describes the task:
### Input:
Get the tool parameters model from dictionaries
:param parameters_model: The parameters as a mongoengine model
:return: The tool parameters as a dictionary
### Response:
def parameters_from_model(parameters_model):
"""
Get the tool parameters model from dictionaries
:param parameters_model: The parameters as a mongoengine model
:return: The tool parameters as a dictionary
"""
parameters = {}
for p in parameters_model:
if p.is_function:
code, defaults, closure = pickle.loads(p.value)
parameters[p.key] = func_load(code, defaults, closure, globs=globals())
elif p.is_set:
parameters[p.key] = set(p.value)
else:
parameters[p.key] = p.value
return parameters |
def add_scope(self, scope_type, scope_name, scope_start, is_method=False):
"""we identified a scope and add it to positions."""
if self._curr is not None:
self._curr['end'] = scope_start - 1 # close last scope
self._curr = {
'type': scope_type, 'name': scope_name,
'start': scope_start, 'end': scope_start
}
if is_method and self._positions:
last = self._positions[-1]
if not 'methods' in last:
last['methods'] = []
last['methods'].append(self._curr)
else:
self._positions.append(self._curr) | we identified a scope and add it to positions. | Below is the the instruction that describes the task:
### Input:
we identified a scope and add it to positions.
### Response:
def add_scope(self, scope_type, scope_name, scope_start, is_method=False):
"""we identified a scope and add it to positions."""
if self._curr is not None:
self._curr['end'] = scope_start - 1 # close last scope
self._curr = {
'type': scope_type, 'name': scope_name,
'start': scope_start, 'end': scope_start
}
if is_method and self._positions:
last = self._positions[-1]
if not 'methods' in last:
last['methods'] = []
last['methods'].append(self._curr)
else:
self._positions.append(self._curr) |
def iter_variants_by_names(self, names):
"""Iterates over the genotypes for variants using a list of names.
Args:
names (list): The list of names for variant extraction.
"""
for name in names:
for result in self.get_variant_by_name(name):
yield result | Iterates over the genotypes for variants using a list of names.
Args:
names (list): The list of names for variant extraction. | Below is the the instruction that describes the task:
### Input:
Iterates over the genotypes for variants using a list of names.
Args:
names (list): The list of names for variant extraction.
### Response:
def iter_variants_by_names(self, names):
"""Iterates over the genotypes for variants using a list of names.
Args:
names (list): The list of names for variant extraction.
"""
for name in names:
for result in self.get_variant_by_name(name):
yield result |
def get_all_results_from_jobs(user, j_id):
"""Get all results from job.
"""
job = v1_utils.verify_existence_and_get(j_id, _TABLE)
if not user.is_in_team(job['team_id']) and not user.is_read_only_user():
raise dci_exc.Unauthorized()
# get testscases from tests_results
query = sql.select([models.TESTS_RESULTS]). \
where(models.TESTS_RESULTS.c.job_id == job['id'])
all_tests_results = flask.g.db_conn.execute(query).fetchall()
results = []
for test_result in all_tests_results:
test_result = dict(test_result)
results.append({'filename': test_result['name'],
'name': test_result['name'],
'total': test_result['total'],
'failures': test_result['failures'],
'errors': test_result['errors'],
'skips': test_result['skips'],
'time': test_result['time'],
'regressions': test_result['regressions'],
'successfixes': test_result['successfixes'],
'success': test_result['success'],
'file_id': test_result['file_id']})
return flask.jsonify({'results': results,
'_meta': {'count': len(results)}}) | Get all results from job. | Below is the the instruction that describes the task:
### Input:
Get all results from job.
### Response:
def get_all_results_from_jobs(user, j_id):
"""Get all results from job.
"""
job = v1_utils.verify_existence_and_get(j_id, _TABLE)
if not user.is_in_team(job['team_id']) and not user.is_read_only_user():
raise dci_exc.Unauthorized()
# get testscases from tests_results
query = sql.select([models.TESTS_RESULTS]). \
where(models.TESTS_RESULTS.c.job_id == job['id'])
all_tests_results = flask.g.db_conn.execute(query).fetchall()
results = []
for test_result in all_tests_results:
test_result = dict(test_result)
results.append({'filename': test_result['name'],
'name': test_result['name'],
'total': test_result['total'],
'failures': test_result['failures'],
'errors': test_result['errors'],
'skips': test_result['skips'],
'time': test_result['time'],
'regressions': test_result['regressions'],
'successfixes': test_result['successfixes'],
'success': test_result['success'],
'file_id': test_result['file_id']})
return flask.jsonify({'results': results,
'_meta': {'count': len(results)}}) |
def get_version(filename, pattern=None):
"""Extract the __version__ from a file without importing it.
While you could get the __version__ by importing the module, the very act
of importing can cause unintended consequences. For example, Distribute's
automatic 2to3 support will break. Instead, this searches the file for a
line that starts with __version__, and extract the version number by
regular expression matching.
By default, two or three dot-separated digits are recognized, but by
passing a pattern parameter, you can recognize just about anything. Use
the `version` group name to specify the match group.
:param filename: The name of the file to search.
:type filename: string
:param pattern: Optional alternative regular expression pattern to use.
:type pattern: string
:return: The version that was extracted.
:rtype: string
"""
if pattern is None:
cre = DEFAULT_VERSION_RE
else:
cre = re.compile(pattern)
with open(filename) as fp:
for line in fp:
if line.startswith('__version__'):
mo = cre.search(line)
assert mo, 'No valid __version__ string found'
return mo.group('version')
raise AssertionError('No __version__ assignment found') | Extract the __version__ from a file without importing it.
While you could get the __version__ by importing the module, the very act
of importing can cause unintended consequences. For example, Distribute's
automatic 2to3 support will break. Instead, this searches the file for a
line that starts with __version__, and extract the version number by
regular expression matching.
By default, two or three dot-separated digits are recognized, but by
passing a pattern parameter, you can recognize just about anything. Use
the `version` group name to specify the match group.
:param filename: The name of the file to search.
:type filename: string
:param pattern: Optional alternative regular expression pattern to use.
:type pattern: string
:return: The version that was extracted.
:rtype: string | Below is the the instruction that describes the task:
### Input:
Extract the __version__ from a file without importing it.
While you could get the __version__ by importing the module, the very act
of importing can cause unintended consequences. For example, Distribute's
automatic 2to3 support will break. Instead, this searches the file for a
line that starts with __version__, and extract the version number by
regular expression matching.
By default, two or three dot-separated digits are recognized, but by
passing a pattern parameter, you can recognize just about anything. Use
the `version` group name to specify the match group.
:param filename: The name of the file to search.
:type filename: string
:param pattern: Optional alternative regular expression pattern to use.
:type pattern: string
:return: The version that was extracted.
:rtype: string
### Response:
def get_version(filename, pattern=None):
"""Extract the __version__ from a file without importing it.
While you could get the __version__ by importing the module, the very act
of importing can cause unintended consequences. For example, Distribute's
automatic 2to3 support will break. Instead, this searches the file for a
line that starts with __version__, and extract the version number by
regular expression matching.
By default, two or three dot-separated digits are recognized, but by
passing a pattern parameter, you can recognize just about anything. Use
the `version` group name to specify the match group.
:param filename: The name of the file to search.
:type filename: string
:param pattern: Optional alternative regular expression pattern to use.
:type pattern: string
:return: The version that was extracted.
:rtype: string
"""
if pattern is None:
cre = DEFAULT_VERSION_RE
else:
cre = re.compile(pattern)
with open(filename) as fp:
for line in fp:
if line.startswith('__version__'):
mo = cre.search(line)
assert mo, 'No valid __version__ string found'
return mo.group('version')
raise AssertionError('No __version__ assignment found') |
def info (logname, msg, *args, **kwargs):
"""Log an informational message.
return: None
"""
log = logging.getLogger(logname)
if log.isEnabledFor(logging.INFO):
_log(log.info, msg, args, **kwargs) | Log an informational message.
return: None | Below is the the instruction that describes the task:
### Input:
Log an informational message.
return: None
### Response:
def info (logname, msg, *args, **kwargs):
"""Log an informational message.
return: None
"""
log = logging.getLogger(logname)
if log.isEnabledFor(logging.INFO):
_log(log.info, msg, args, **kwargs) |
def cuboid(space, min_pt=None, max_pt=None):
"""Rectangular cuboid.
Parameters
----------
space : `DiscreteLp`
Space in which the phantom should be created.
min_pt : array-like of shape ``(space.ndim,)``, optional
Lower left corner of the cuboid. If ``None`` is given, a quarter
of the extent from ``space.min_pt`` towards the inside is chosen.
max_pt : array-like of shape ``(space.ndim,)``, optional
Upper right corner of the cuboid. If ``None`` is given, ``min_pt``
plus half the extent is chosen.
Returns
-------
phantom : `DiscretizedSpaceElement`
The generated cuboid phantom in ``space``.
Examples
--------
If both ``min_pt`` and ``max_pt`` are omitted, the cuboid lies in the
middle of the space domain and extends halfway towards all sides:
>>> space = odl.uniform_discr([0, 0], [1, 1], [4, 6])
>>> odl.phantom.cuboid(space)
uniform_discr([ 0., 0.], [ 1., 1.], (4, 6)).element(
[[ 0., 0., 0., 0., 0., 0.],
[ 0., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 0.],
[ 0., 0., 0., 0., 0., 0.]]
)
By specifying the corners, the cuboid can be arbitrarily placed and
scaled:
>>> odl.phantom.cuboid(space, [0.25, 0], [0.75, 0.5])
uniform_discr([ 0., 0.], [ 1., 1.], (4, 6)).element(
[[ 0., 0., 0., 0., 0., 0.],
[ 1., 1., 1., 0., 0., 0.],
[ 1., 1., 1., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0.]]
)
"""
dom_min_pt = np.asarray(space.domain.min())
dom_max_pt = np.asarray(space.domain.max())
if min_pt is None:
min_pt = dom_min_pt * 0.75 + dom_max_pt * 0.25
if max_pt is None:
max_pt = dom_min_pt * 0.25 + dom_max_pt * 0.75
min_pt = np.atleast_1d(min_pt)
max_pt = np.atleast_1d(max_pt)
if min_pt.shape != (space.ndim,):
raise ValueError('shape of `min_pt` must be {}, got {}'
''.format((space.ndim,), min_pt.shape))
if max_pt.shape != (space.ndim,):
raise ValueError('shape of `max_pt` must be {}, got {}'
''.format((space.ndim,), max_pt.shape))
def phantom(x):
result = True
for xi, xmin, xmax in zip(x, min_pt, max_pt):
result = (result &
np.less_equal(xmin, xi) & np.less_equal(xi, xmax))
return result
return space.element(phantom) | Rectangular cuboid.
Parameters
----------
space : `DiscreteLp`
Space in which the phantom should be created.
min_pt : array-like of shape ``(space.ndim,)``, optional
Lower left corner of the cuboid. If ``None`` is given, a quarter
of the extent from ``space.min_pt`` towards the inside is chosen.
max_pt : array-like of shape ``(space.ndim,)``, optional
Upper right corner of the cuboid. If ``None`` is given, ``min_pt``
plus half the extent is chosen.
Returns
-------
phantom : `DiscretizedSpaceElement`
The generated cuboid phantom in ``space``.
Examples
--------
If both ``min_pt`` and ``max_pt`` are omitted, the cuboid lies in the
middle of the space domain and extends halfway towards all sides:
>>> space = odl.uniform_discr([0, 0], [1, 1], [4, 6])
>>> odl.phantom.cuboid(space)
uniform_discr([ 0., 0.], [ 1., 1.], (4, 6)).element(
[[ 0., 0., 0., 0., 0., 0.],
[ 0., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 0.],
[ 0., 0., 0., 0., 0., 0.]]
)
By specifying the corners, the cuboid can be arbitrarily placed and
scaled:
>>> odl.phantom.cuboid(space, [0.25, 0], [0.75, 0.5])
uniform_discr([ 0., 0.], [ 1., 1.], (4, 6)).element(
[[ 0., 0., 0., 0., 0., 0.],
[ 1., 1., 1., 0., 0., 0.],
[ 1., 1., 1., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0.]]
) | Below is the the instruction that describes the task:
### Input:
Rectangular cuboid.
Parameters
----------
space : `DiscreteLp`
Space in which the phantom should be created.
min_pt : array-like of shape ``(space.ndim,)``, optional
Lower left corner of the cuboid. If ``None`` is given, a quarter
of the extent from ``space.min_pt`` towards the inside is chosen.
max_pt : array-like of shape ``(space.ndim,)``, optional
Upper right corner of the cuboid. If ``None`` is given, ``min_pt``
plus half the extent is chosen.
Returns
-------
phantom : `DiscretizedSpaceElement`
The generated cuboid phantom in ``space``.
Examples
--------
If both ``min_pt`` and ``max_pt`` are omitted, the cuboid lies in the
middle of the space domain and extends halfway towards all sides:
>>> space = odl.uniform_discr([0, 0], [1, 1], [4, 6])
>>> odl.phantom.cuboid(space)
uniform_discr([ 0., 0.], [ 1., 1.], (4, 6)).element(
[[ 0., 0., 0., 0., 0., 0.],
[ 0., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 0.],
[ 0., 0., 0., 0., 0., 0.]]
)
By specifying the corners, the cuboid can be arbitrarily placed and
scaled:
>>> odl.phantom.cuboid(space, [0.25, 0], [0.75, 0.5])
uniform_discr([ 0., 0.], [ 1., 1.], (4, 6)).element(
[[ 0., 0., 0., 0., 0., 0.],
[ 1., 1., 1., 0., 0., 0.],
[ 1., 1., 1., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0.]]
)
### Response:
def cuboid(space, min_pt=None, max_pt=None):
"""Rectangular cuboid.
Parameters
----------
space : `DiscreteLp`
Space in which the phantom should be created.
min_pt : array-like of shape ``(space.ndim,)``, optional
Lower left corner of the cuboid. If ``None`` is given, a quarter
of the extent from ``space.min_pt`` towards the inside is chosen.
max_pt : array-like of shape ``(space.ndim,)``, optional
Upper right corner of the cuboid. If ``None`` is given, ``min_pt``
plus half the extent is chosen.
Returns
-------
phantom : `DiscretizedSpaceElement`
The generated cuboid phantom in ``space``.
Examples
--------
If both ``min_pt`` and ``max_pt`` are omitted, the cuboid lies in the
middle of the space domain and extends halfway towards all sides:
>>> space = odl.uniform_discr([0, 0], [1, 1], [4, 6])
>>> odl.phantom.cuboid(space)
uniform_discr([ 0., 0.], [ 1., 1.], (4, 6)).element(
[[ 0., 0., 0., 0., 0., 0.],
[ 0., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 0.],
[ 0., 0., 0., 0., 0., 0.]]
)
By specifying the corners, the cuboid can be arbitrarily placed and
scaled:
>>> odl.phantom.cuboid(space, [0.25, 0], [0.75, 0.5])
uniform_discr([ 0., 0.], [ 1., 1.], (4, 6)).element(
[[ 0., 0., 0., 0., 0., 0.],
[ 1., 1., 1., 0., 0., 0.],
[ 1., 1., 1., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0.]]
)
"""
dom_min_pt = np.asarray(space.domain.min())
dom_max_pt = np.asarray(space.domain.max())
if min_pt is None:
min_pt = dom_min_pt * 0.75 + dom_max_pt * 0.25
if max_pt is None:
max_pt = dom_min_pt * 0.25 + dom_max_pt * 0.75
min_pt = np.atleast_1d(min_pt)
max_pt = np.atleast_1d(max_pt)
if min_pt.shape != (space.ndim,):
raise ValueError('shape of `min_pt` must be {}, got {}'
''.format((space.ndim,), min_pt.shape))
if max_pt.shape != (space.ndim,):
raise ValueError('shape of `max_pt` must be {}, got {}'
''.format((space.ndim,), max_pt.shape))
def phantom(x):
result = True
for xi, xmin, xmax in zip(x, min_pt, max_pt):
result = (result &
np.less_equal(xmin, xi) & np.less_equal(xi, xmax))
return result
return space.element(phantom) |
def _get_spaces(self):
"""
Get the marketplace services.
"""
guid = self.api.config.get_organization_guid()
uri = '/v2/organizations/%s/spaces' % (guid)
return self.api.get(uri) | Get the marketplace services. | Below is the the instruction that describes the task:
### Input:
Get the marketplace services.
### Response:
def _get_spaces(self):
"""
Get the marketplace services.
"""
guid = self.api.config.get_organization_guid()
uri = '/v2/organizations/%s/spaces' % (guid)
return self.api.get(uri) |
def jackknife_connectivity(measures, data, var, nfft=512, leaveout=1, n_jobs=1,
verbose=0):
"""Calculate jackknife estimates of connectivity.
For each jackknife estimate a block of trials is left out. This is repeated
until each trial was left out exactly once. The number of estimates depends
on the number of trials and the value of `leaveout`. It is calculated by
repeats = `n_trials` // `leaveout`.
.. note:: Parameter `var` will be modified by the function. Treat as
undefined after the function returns.
Parameters
----------
measures : str or list of str
Name(s) of the connectivity measure(s) to calculate. See
:class:`Connectivity` for supported measures.
data : array, shape (trials, channels, samples)
Time series data (multiple trials).
var : VARBase-like object
Instance of a VAR model.
nfft : int, optional
Number of frequency bins to calculate. Note that these points cover the
range between 0 and half the sampling rate.
leaveout : int, optional
Number of trials to leave out in each estimate.
n_jobs : int | None, optional
Number of jobs to run in parallel. If set to None, joblib is not used
at all. See `joblib.Parallel` for details.
verbose : int, optional
Verbosity level passed to joblib.
Returns
-------
result : array, shape (`repeats`, n_channels, n_channels, nfft)
Values of the connectivity measure for each surrogate. If
`measure_names` is a list of strings a dictionary is returned, where
each key is the name of the measure, and the corresponding values are
arrays of shape (`repeats`, n_channels, n_channels, nfft).
"""
data = atleast_3d(data)
t, m, n = data.shape
assert(t > 1)
if leaveout < 1:
leaveout = int(leaveout * t)
num_blocks = t // leaveout
mask = lambda block: [i for i in range(t) if i < block*leaveout or
i >= (block + 1) * leaveout]
par, func = parallel_loop(_calc_jackknife, n_jobs=n_jobs, verbose=verbose)
output = par(func(data[mask(b), :, :], var, measures, nfft)
for b in range(num_blocks))
return convert_output_(output, measures) | Calculate jackknife estimates of connectivity.
For each jackknife estimate a block of trials is left out. This is repeated
until each trial was left out exactly once. The number of estimates depends
on the number of trials and the value of `leaveout`. It is calculated by
repeats = `n_trials` // `leaveout`.
.. note:: Parameter `var` will be modified by the function. Treat as
undefined after the function returns.
Parameters
----------
measures : str or list of str
Name(s) of the connectivity measure(s) to calculate. See
:class:`Connectivity` for supported measures.
data : array, shape (trials, channels, samples)
Time series data (multiple trials).
var : VARBase-like object
Instance of a VAR model.
nfft : int, optional
Number of frequency bins to calculate. Note that these points cover the
range between 0 and half the sampling rate.
leaveout : int, optional
Number of trials to leave out in each estimate.
n_jobs : int | None, optional
Number of jobs to run in parallel. If set to None, joblib is not used
at all. See `joblib.Parallel` for details.
verbose : int, optional
Verbosity level passed to joblib.
Returns
-------
result : array, shape (`repeats`, n_channels, n_channels, nfft)
Values of the connectivity measure for each surrogate. If
`measure_names` is a list of strings a dictionary is returned, where
each key is the name of the measure, and the corresponding values are
arrays of shape (`repeats`, n_channels, n_channels, nfft). | Below is the the instruction that describes the task:
### Input:
Calculate jackknife estimates of connectivity.
For each jackknife estimate a block of trials is left out. This is repeated
until each trial was left out exactly once. The number of estimates depends
on the number of trials and the value of `leaveout`. It is calculated by
repeats = `n_trials` // `leaveout`.
.. note:: Parameter `var` will be modified by the function. Treat as
undefined after the function returns.
Parameters
----------
measures : str or list of str
Name(s) of the connectivity measure(s) to calculate. See
:class:`Connectivity` for supported measures.
data : array, shape (trials, channels, samples)
Time series data (multiple trials).
var : VARBase-like object
Instance of a VAR model.
nfft : int, optional
Number of frequency bins to calculate. Note that these points cover the
range between 0 and half the sampling rate.
leaveout : int, optional
Number of trials to leave out in each estimate.
n_jobs : int | None, optional
Number of jobs to run in parallel. If set to None, joblib is not used
at all. See `joblib.Parallel` for details.
verbose : int, optional
Verbosity level passed to joblib.
Returns
-------
result : array, shape (`repeats`, n_channels, n_channels, nfft)
Values of the connectivity measure for each surrogate. If
`measure_names` is a list of strings a dictionary is returned, where
each key is the name of the measure, and the corresponding values are
arrays of shape (`repeats`, n_channels, n_channels, nfft).
### Response:
def jackknife_connectivity(measures, data, var, nfft=512, leaveout=1, n_jobs=1,
verbose=0):
"""Calculate jackknife estimates of connectivity.
For each jackknife estimate a block of trials is left out. This is repeated
until each trial was left out exactly once. The number of estimates depends
on the number of trials and the value of `leaveout`. It is calculated by
repeats = `n_trials` // `leaveout`.
.. note:: Parameter `var` will be modified by the function. Treat as
undefined after the function returns.
Parameters
----------
measures : str or list of str
Name(s) of the connectivity measure(s) to calculate. See
:class:`Connectivity` for supported measures.
data : array, shape (trials, channels, samples)
Time series data (multiple trials).
var : VARBase-like object
Instance of a VAR model.
nfft : int, optional
Number of frequency bins to calculate. Note that these points cover the
range between 0 and half the sampling rate.
leaveout : int, optional
Number of trials to leave out in each estimate.
n_jobs : int | None, optional
Number of jobs to run in parallel. If set to None, joblib is not used
at all. See `joblib.Parallel` for details.
verbose : int, optional
Verbosity level passed to joblib.
Returns
-------
result : array, shape (`repeats`, n_channels, n_channels, nfft)
Values of the connectivity measure for each surrogate. If
`measure_names` is a list of strings a dictionary is returned, where
each key is the name of the measure, and the corresponding values are
arrays of shape (`repeats`, n_channels, n_channels, nfft).
"""
data = atleast_3d(data)
t, m, n = data.shape
assert(t > 1)
if leaveout < 1:
leaveout = int(leaveout * t)
num_blocks = t // leaveout
mask = lambda block: [i for i in range(t) if i < block*leaveout or
i >= (block + 1) * leaveout]
par, func = parallel_loop(_calc_jackknife, n_jobs=n_jobs, verbose=verbose)
output = par(func(data[mask(b), :, :], var, measures, nfft)
for b in range(num_blocks))
return convert_output_(output, measures) |
def get_directly_accessible_stops_within_distance(self, stop, distance):
"""
Returns stops that are accessible without transfer from the stops that are within a specific walking distance
:param stop: int
:param distance: int
:return:
"""
query = """SELECT stop.* FROM
(SELECT st2.* FROM
(SELECT * FROM stop_distances
WHERE from_stop_I = %s) sd,
(SELECT * FROM stop_times) st1,
(SELECT * FROM stop_times) st2
WHERE sd.d < %s AND sd.to_stop_I = st1.stop_I AND st1.trip_I = st2.trip_I
GROUP BY st2.stop_I) sq,
(SELECT * FROM stops) stop
WHERE sq.stop_I = stop.stop_I""" % (stop, distance)
return pd.read_sql_query(query, self.conn) | Returns stops that are accessible without transfer from the stops that are within a specific walking distance
:param stop: int
:param distance: int
:return: | Below is the the instruction that describes the task:
### Input:
Returns stops that are accessible without transfer from the stops that are within a specific walking distance
:param stop: int
:param distance: int
:return:
### Response:
def get_directly_accessible_stops_within_distance(self, stop, distance):
"""
Returns stops that are accessible without transfer from the stops that are within a specific walking distance
:param stop: int
:param distance: int
:return:
"""
query = """SELECT stop.* FROM
(SELECT st2.* FROM
(SELECT * FROM stop_distances
WHERE from_stop_I = %s) sd,
(SELECT * FROM stop_times) st1,
(SELECT * FROM stop_times) st2
WHERE sd.d < %s AND sd.to_stop_I = st1.stop_I AND st1.trip_I = st2.trip_I
GROUP BY st2.stop_I) sq,
(SELECT * FROM stops) stop
WHERE sq.stop_I = stop.stop_I""" % (stop, distance)
return pd.read_sql_query(query, self.conn) |
def get_rec_dtype(self, **keys):
"""
Get the dtype for the specified columns
parameters
----------
colnums: integer array
The column numbers, 0 offset
vstorage: string, optional
See docs in read_columns
"""
colnums = keys.get('colnums', None)
vstorage = keys.get('vstorage', self._vstorage)
if colnums is None:
colnums = self._extract_colnums()
descr = []
isvararray = numpy.zeros(len(colnums), dtype=numpy.bool)
for i, colnum in enumerate(colnums):
dt, isvar = self.get_rec_column_descr(colnum, vstorage)
descr.append(dt)
isvararray[i] = isvar
dtype = numpy.dtype(descr)
offsets = numpy.zeros(len(colnums), dtype='i8')
for i, n in enumerate(dtype.names):
offsets[i] = dtype.fields[n][1]
return dtype, offsets, isvararray | Get the dtype for the specified columns
parameters
----------
colnums: integer array
The column numbers, 0 offset
vstorage: string, optional
See docs in read_columns | Below is the the instruction that describes the task:
### Input:
Get the dtype for the specified columns
parameters
----------
colnums: integer array
The column numbers, 0 offset
vstorage: string, optional
See docs in read_columns
### Response:
def get_rec_dtype(self, **keys):
"""
Get the dtype for the specified columns
parameters
----------
colnums: integer array
The column numbers, 0 offset
vstorage: string, optional
See docs in read_columns
"""
colnums = keys.get('colnums', None)
vstorage = keys.get('vstorage', self._vstorage)
if colnums is None:
colnums = self._extract_colnums()
descr = []
isvararray = numpy.zeros(len(colnums), dtype=numpy.bool)
for i, colnum in enumerate(colnums):
dt, isvar = self.get_rec_column_descr(colnum, vstorage)
descr.append(dt)
isvararray[i] = isvar
dtype = numpy.dtype(descr)
offsets = numpy.zeros(len(colnums), dtype='i8')
for i, n in enumerate(dtype.names):
offsets[i] = dtype.fields[n][1]
return dtype, offsets, isvararray |
def do_GET_body(self):
"""Create body of GET."""
iiif = self.iiif
if (len(self.path) > 1024):
raise IIIFError(code=414,
text="URI Too Long: Max 1024 chars, got %d\n" % len(self.path))
try:
# self.path has leading / then identifier/params...
self.path = self.path.lstrip('/')
sys.stderr.write("path = %s" % (self.path))
iiif.parse_url(self.path)
except Exception as e:
# Something completely unexpected => 500
raise IIIFError(code=500,
text="Internal Server Error: unexpected exception parsing request (" + str(e) + ")")
# Now we have a full iiif request
if (re.match('[\w\.\-]+$', iiif.identifier)):
file = os.path.join(TESTIMAGE_DIR, iiif.identifier)
if (not os.path.isfile(file)):
images_available = ""
for image_file in os.listdir(TESTIMAGE_DIR):
if (os.path.isfile(os.path.join(TESTIMAGE_DIR, image_file))):
images_available += " " + image_file + "\n"
raise IIIFError(code=404, parameter="identifier",
text="Image resource '" + iiif.identifier + "' not found. Local image files available:\n" + images_available)
else:
raise IIIFError(code=404, parameter="identifier",
text="Image resource '" + iiif.identifier + "' not found. Only local test images and http: URIs for images are supported.\n")
# Now know image is OK
manipulator = IIIFRequestHandler.manipulator_class()
# Stash manipulator object so we can cleanup after reading file
self.manipulator = manipulator
self.compliance_uri = manipulator.compliance_uri
if (iiif.info):
# get size
manipulator.srcfile = file
manipulator.do_first()
# most of info.json comes from config, a few things
# specific to image
i = IIIFInfo()
i.identifier = self.iiif.identifier
i.width = manipulator.width
i.height = manipulator.height
import io
return(io.StringIO(i.as_json()), "application/json")
else:
(outfile, mime_type) = manipulator.derive(file, iiif)
return(open(outfile, 'r'), mime_type) | Create body of GET. | Below is the the instruction that describes the task:
### Input:
Create body of GET.
### Response:
def do_GET_body(self):
"""Create body of GET."""
iiif = self.iiif
if (len(self.path) > 1024):
raise IIIFError(code=414,
text="URI Too Long: Max 1024 chars, got %d\n" % len(self.path))
try:
# self.path has leading / then identifier/params...
self.path = self.path.lstrip('/')
sys.stderr.write("path = %s" % (self.path))
iiif.parse_url(self.path)
except Exception as e:
# Something completely unexpected => 500
raise IIIFError(code=500,
text="Internal Server Error: unexpected exception parsing request (" + str(e) + ")")
# Now we have a full iiif request
if (re.match('[\w\.\-]+$', iiif.identifier)):
file = os.path.join(TESTIMAGE_DIR, iiif.identifier)
if (not os.path.isfile(file)):
images_available = ""
for image_file in os.listdir(TESTIMAGE_DIR):
if (os.path.isfile(os.path.join(TESTIMAGE_DIR, image_file))):
images_available += " " + image_file + "\n"
raise IIIFError(code=404, parameter="identifier",
text="Image resource '" + iiif.identifier + "' not found. Local image files available:\n" + images_available)
else:
raise IIIFError(code=404, parameter="identifier",
text="Image resource '" + iiif.identifier + "' not found. Only local test images and http: URIs for images are supported.\n")
# Now know image is OK
manipulator = IIIFRequestHandler.manipulator_class()
# Stash manipulator object so we can cleanup after reading file
self.manipulator = manipulator
self.compliance_uri = manipulator.compliance_uri
if (iiif.info):
# get size
manipulator.srcfile = file
manipulator.do_first()
# most of info.json comes from config, a few things
# specific to image
i = IIIFInfo()
i.identifier = self.iiif.identifier
i.width = manipulator.width
i.height = manipulator.height
import io
return(io.StringIO(i.as_json()), "application/json")
else:
(outfile, mime_type) = manipulator.derive(file, iiif)
return(open(outfile, 'r'), mime_type) |
def translate_char(source_char, carrier, reverse=False, encoding=False):
u"""translate unicode emoji character to unicode carrier emoji character (or reverse)
Attributes:
source_char - emoji character. it must be unicode instance or have to set `encoding` attribute to decode
carrier - the target carrier
reverse - if you want to translate CARRIER => UNICODE, turn it True
encoding - encoding name for decode (Default is None)
"""
if not isinstance(source_char, unicode) and encoding:
source_char = source_char.decode(encoding, 'replace')
elif not isinstance(source_char, unicode):
raise AttributeError(u"`source_char` must be decoded to `unicode` or set `encoding` attribute to decode `source_char`")
if len(source_char) > 1:
raise AttributeError(u"`source_char` must be a letter. use `translate` method insted.")
translate_dictionary = _loader.translate_dictionaries[carrier]
if not reverse:
translate_dictionary = translate_dictionary[0]
else:
translate_dictionary = translate_dictionary[1]
if not translate_dictionary:
return source_char
return translate_dictionary.get(source_char, source_char) | u"""translate unicode emoji character to unicode carrier emoji character (or reverse)
Attributes:
source_char - emoji character. it must be unicode instance or have to set `encoding` attribute to decode
carrier - the target carrier
reverse - if you want to translate CARRIER => UNICODE, turn it True
encoding - encoding name for decode (Default is None) | Below is the the instruction that describes the task:
### Input:
u"""translate unicode emoji character to unicode carrier emoji character (or reverse)
Attributes:
source_char - emoji character. it must be unicode instance or have to set `encoding` attribute to decode
carrier - the target carrier
reverse - if you want to translate CARRIER => UNICODE, turn it True
encoding - encoding name for decode (Default is None)
### Response:
def translate_char(source_char, carrier, reverse=False, encoding=False):
u"""translate unicode emoji character to unicode carrier emoji character (or reverse)
Attributes:
source_char - emoji character. it must be unicode instance or have to set `encoding` attribute to decode
carrier - the target carrier
reverse - if you want to translate CARRIER => UNICODE, turn it True
encoding - encoding name for decode (Default is None)
"""
if not isinstance(source_char, unicode) and encoding:
source_char = source_char.decode(encoding, 'replace')
elif not isinstance(source_char, unicode):
raise AttributeError(u"`source_char` must be decoded to `unicode` or set `encoding` attribute to decode `source_char`")
if len(source_char) > 1:
raise AttributeError(u"`source_char` must be a letter. use `translate` method insted.")
translate_dictionary = _loader.translate_dictionaries[carrier]
if not reverse:
translate_dictionary = translate_dictionary[0]
else:
translate_dictionary = translate_dictionary[1]
if not translate_dictionary:
return source_char
return translate_dictionary.get(source_char, source_char) |
def snow(im, voxel_size=1,
boundary_faces=['top', 'bottom', 'left', 'right', 'front', 'back'],
marching_cubes_area=False):
r"""
Analyzes an image that has been partitioned into void and solid regions
and extracts the void and solid phase geometry as well as network
connectivity.
Parameters
----------
im : ND-array
Binary image in the Boolean form with True’s as void phase and False’s
as solid phase.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1, which is useful when overlaying the PNM on the original
image since the scale of the image is alway 1 unit lenth per voxel.
boundary_faces : list of strings
Boundary faces labels are provided to assign hypothetical boundary
nodes having zero resistance to transport process. For cubical
geometry, the user can choose ‘left’, ‘right’, ‘top’, ‘bottom’,
‘front’ and ‘back’ face labels to assign boundary nodes. If no label is
assigned then all six faces will be selected as boundary nodes
automatically which can be trimmed later on based on user requirements.
marching_cubes_area : bool
If ``True`` then the surface area and interfacial area between regions
will be using the marching cube algorithm. This is a more accurate
representation of area in extracted network, but is quite slow, so
it is ``False`` by default. The default method simply counts voxels
so does not correctly account for the voxelated nature of the images.
Returns
-------
A dictionary containing the void phase size data, as well as the network
topological information. The dictionary names use the OpenPNM
convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
directly to an OpenPNM network object using the ``update`` command.
* ``net``: A dictionary containing all the void and solid phase size data,
as well as the network topological information. The dictionary names
use the OpenPNM convention (i.e. 'pore.coords', 'throat.conns') so it
may be converted directly to an OpenPNM network object using the
``update`` command.
* ``im``: The binary image of the void space
* ``dt``: The combined distance transform of the image
* ``regions``: The void and solid space partitioned into pores and solids
phases using a marker based watershed with the peaks found by the
SNOW Algorithm.
"""
# -------------------------------------------------------------------------
# SNOW void phase
regions = snow_partitioning(im=im, return_all=True)
im = regions.im
dt = regions.dt
regions = regions.regions
b_num = sp.amax(regions)
# -------------------------------------------------------------------------
# Boundary Conditions
regions = add_boundary_regions(regions=regions, faces=boundary_faces)
# -------------------------------------------------------------------------
# Padding distance transform and image to extract geometrical properties
dt = pad_faces(im=dt, faces=boundary_faces)
im = pad_faces(im=im, faces=boundary_faces)
regions = regions*im
regions = make_contiguous(regions)
# -------------------------------------------------------------------------
# Extract void and throat information from image
net = regions_to_network(im=regions, dt=dt, voxel_size=voxel_size)
# -------------------------------------------------------------------------
# Extract marching cube surface area and interfacial area of regions
if marching_cubes_area:
areas = region_surface_areas(regions=regions)
interface_area = region_interface_areas(regions=regions, areas=areas,
voxel_size=voxel_size)
net['pore.surface_area'] = areas * voxel_size**2
net['throat.area'] = interface_area.area
# -------------------------------------------------------------------------
# Find void to void connections of boundary and internal voids
boundary_labels = net['pore.label'] > b_num
loc1 = net['throat.conns'][:, 0] < b_num
loc2 = net['throat.conns'][:, 1] >= b_num
pore_labels = net['pore.label'] <= b_num
loc3 = net['throat.conns'][:, 0] < b_num
loc4 = net['throat.conns'][:, 1] < b_num
net['pore.boundary'] = boundary_labels
net['throat.boundary'] = loc1 * loc2
net['pore.internal'] = pore_labels
net['throat.internal'] = loc3 * loc4
# -------------------------------------------------------------------------
# label boundary cells
net = label_boundary_cells(network=net, boundary_faces=boundary_faces)
# -------------------------------------------------------------------------
# assign out values to dummy dict
temp = _net_dict(net)
temp.im = im.copy()
temp.dt = dt
temp.regions = regions
return temp | r"""
Analyzes an image that has been partitioned into void and solid regions
and extracts the void and solid phase geometry as well as network
connectivity.
Parameters
----------
im : ND-array
Binary image in the Boolean form with True’s as void phase and False’s
as solid phase.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1, which is useful when overlaying the PNM on the original
image since the scale of the image is alway 1 unit lenth per voxel.
boundary_faces : list of strings
Boundary faces labels are provided to assign hypothetical boundary
nodes having zero resistance to transport process. For cubical
geometry, the user can choose ‘left’, ‘right’, ‘top’, ‘bottom’,
‘front’ and ‘back’ face labels to assign boundary nodes. If no label is
assigned then all six faces will be selected as boundary nodes
automatically which can be trimmed later on based on user requirements.
marching_cubes_area : bool
If ``True`` then the surface area and interfacial area between regions
will be using the marching cube algorithm. This is a more accurate
representation of area in extracted network, but is quite slow, so
it is ``False`` by default. The default method simply counts voxels
so does not correctly account for the voxelated nature of the images.
Returns
-------
A dictionary containing the void phase size data, as well as the network
topological information. The dictionary names use the OpenPNM
convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
directly to an OpenPNM network object using the ``update`` command.
* ``net``: A dictionary containing all the void and solid phase size data,
as well as the network topological information. The dictionary names
use the OpenPNM convention (i.e. 'pore.coords', 'throat.conns') so it
may be converted directly to an OpenPNM network object using the
``update`` command.
* ``im``: The binary image of the void space
* ``dt``: The combined distance transform of the image
* ``regions``: The void and solid space partitioned into pores and solids
phases using a marker based watershed with the peaks found by the
SNOW Algorithm. | Below is the the instruction that describes the task:
### Input:
r"""
Analyzes an image that has been partitioned into void and solid regions
and extracts the void and solid phase geometry as well as network
connectivity.
Parameters
----------
im : ND-array
Binary image in the Boolean form with True’s as void phase and False’s
as solid phase.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1, which is useful when overlaying the PNM on the original
image since the scale of the image is alway 1 unit lenth per voxel.
boundary_faces : list of strings
Boundary faces labels are provided to assign hypothetical boundary
nodes having zero resistance to transport process. For cubical
geometry, the user can choose ‘left’, ‘right’, ‘top’, ‘bottom’,
‘front’ and ‘back’ face labels to assign boundary nodes. If no label is
assigned then all six faces will be selected as boundary nodes
automatically which can be trimmed later on based on user requirements.
marching_cubes_area : bool
If ``True`` then the surface area and interfacial area between regions
will be using the marching cube algorithm. This is a more accurate
representation of area in extracted network, but is quite slow, so
it is ``False`` by default. The default method simply counts voxels
so does not correctly account for the voxelated nature of the images.
Returns
-------
A dictionary containing the void phase size data, as well as the network
topological information. The dictionary names use the OpenPNM
convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
directly to an OpenPNM network object using the ``update`` command.
* ``net``: A dictionary containing all the void and solid phase size data,
as well as the network topological information. The dictionary names
use the OpenPNM convention (i.e. 'pore.coords', 'throat.conns') so it
may be converted directly to an OpenPNM network object using the
``update`` command.
* ``im``: The binary image of the void space
* ``dt``: The combined distance transform of the image
* ``regions``: The void and solid space partitioned into pores and solids
phases using a marker based watershed with the peaks found by the
SNOW Algorithm.
### Response:
def snow(im, voxel_size=1,
boundary_faces=['top', 'bottom', 'left', 'right', 'front', 'back'],
marching_cubes_area=False):
r"""
Analyzes an image that has been partitioned into void and solid regions
and extracts the void and solid phase geometry as well as network
connectivity.
Parameters
----------
im : ND-array
Binary image in the Boolean form with True’s as void phase and False’s
as solid phase.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1, which is useful when overlaying the PNM on the original
image since the scale of the image is alway 1 unit lenth per voxel.
boundary_faces : list of strings
Boundary faces labels are provided to assign hypothetical boundary
nodes having zero resistance to transport process. For cubical
geometry, the user can choose ‘left’, ‘right’, ‘top’, ‘bottom’,
‘front’ and ‘back’ face labels to assign boundary nodes. If no label is
assigned then all six faces will be selected as boundary nodes
automatically which can be trimmed later on based on user requirements.
marching_cubes_area : bool
If ``True`` then the surface area and interfacial area between regions
will be using the marching cube algorithm. This is a more accurate
representation of area in extracted network, but is quite slow, so
it is ``False`` by default. The default method simply counts voxels
so does not correctly account for the voxelated nature of the images.
Returns
-------
A dictionary containing the void phase size data, as well as the network
topological information. The dictionary names use the OpenPNM
convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
directly to an OpenPNM network object using the ``update`` command.
* ``net``: A dictionary containing all the void and solid phase size data,
as well as the network topological information. The dictionary names
use the OpenPNM convention (i.e. 'pore.coords', 'throat.conns') so it
may be converted directly to an OpenPNM network object using the
``update`` command.
* ``im``: The binary image of the void space
* ``dt``: The combined distance transform of the image
* ``regions``: The void and solid space partitioned into pores and solids
phases using a marker based watershed with the peaks found by the
SNOW Algorithm.
"""
# -------------------------------------------------------------------------
# SNOW void phase
regions = snow_partitioning(im=im, return_all=True)
im = regions.im
dt = regions.dt
regions = regions.regions
b_num = sp.amax(regions)
# -------------------------------------------------------------------------
# Boundary Conditions
regions = add_boundary_regions(regions=regions, faces=boundary_faces)
# -------------------------------------------------------------------------
# Padding distance transform and image to extract geometrical properties
dt = pad_faces(im=dt, faces=boundary_faces)
im = pad_faces(im=im, faces=boundary_faces)
regions = regions*im
regions = make_contiguous(regions)
# -------------------------------------------------------------------------
# Extract void and throat information from image
net = regions_to_network(im=regions, dt=dt, voxel_size=voxel_size)
# -------------------------------------------------------------------------
# Extract marching cube surface area and interfacial area of regions
if marching_cubes_area:
areas = region_surface_areas(regions=regions)
interface_area = region_interface_areas(regions=regions, areas=areas,
voxel_size=voxel_size)
net['pore.surface_area'] = areas * voxel_size**2
net['throat.area'] = interface_area.area
# -------------------------------------------------------------------------
# Find void to void connections of boundary and internal voids
boundary_labels = net['pore.label'] > b_num
loc1 = net['throat.conns'][:, 0] < b_num
loc2 = net['throat.conns'][:, 1] >= b_num
pore_labels = net['pore.label'] <= b_num
loc3 = net['throat.conns'][:, 0] < b_num
loc4 = net['throat.conns'][:, 1] < b_num
net['pore.boundary'] = boundary_labels
net['throat.boundary'] = loc1 * loc2
net['pore.internal'] = pore_labels
net['throat.internal'] = loc3 * loc4
# -------------------------------------------------------------------------
# label boundary cells
net = label_boundary_cells(network=net, boundary_faces=boundary_faces)
# -------------------------------------------------------------------------
# assign out values to dummy dict
temp = _net_dict(net)
temp.im = im.copy()
temp.dt = dt
temp.regions = regions
return temp |
def add_policy(self, name, policy_type, cooldown, change=None,
is_percent=False, desired_capacity=None, args=None):
"""
Adds a policy with the given values to this scaling group. The
'change' parameter is treated as an absolute amount, unless
'is_percent' is True, in which case it is treated as a percentage.
"""
return self.manager.add_policy(self, name, policy_type, cooldown,
change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args) | Adds a policy with the given values to this scaling group. The
'change' parameter is treated as an absolute amount, unless
'is_percent' is True, in which case it is treated as a percentage. | Below is the the instruction that describes the task:
### Input:
Adds a policy with the given values to this scaling group. The
'change' parameter is treated as an absolute amount, unless
'is_percent' is True, in which case it is treated as a percentage.
### Response:
def add_policy(self, name, policy_type, cooldown, change=None,
is_percent=False, desired_capacity=None, args=None):
"""
Adds a policy with the given values to this scaling group. The
'change' parameter is treated as an absolute amount, unless
'is_percent' is True, in which case it is treated as a percentage.
"""
return self.manager.add_policy(self, name, policy_type, cooldown,
change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args) |
def i2osp(x, x_len):
'''Converts the integer x to its big-endian representation of length
x_len.
'''
if x > 256**x_len:
raise exceptions.IntegerTooLarge
h = hex(x)[2:]
if h[-1] == 'L':
h = h[:-1]
if len(h) & 1 == 1:
h = '0%s' % h
x = binascii.unhexlify(h)
return b'\x00' * int(x_len-len(x)) + x | Converts the integer x to its big-endian representation of length
x_len. | Below is the the instruction that describes the task:
### Input:
Converts the integer x to its big-endian representation of length
x_len.
### Response:
def i2osp(x, x_len):
'''Converts the integer x to its big-endian representation of length
x_len.
'''
if x > 256**x_len:
raise exceptions.IntegerTooLarge
h = hex(x)[2:]
if h[-1] == 'L':
h = h[:-1]
if len(h) & 1 == 1:
h = '0%s' % h
x = binascii.unhexlify(h)
return b'\x00' * int(x_len-len(x)) + x |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.