code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def system_generate_batch_inputs(input_params={}, always_retry=True, **kwargs):
"""
Invokes the /system/generateBatchInputs API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method:-/system/generateBatchInputs
"""
return DXHTTPRequest('/system/generateBatchInputs', input_params, always_retry=always_retry, **kwargs) | Invokes the /system/generateBatchInputs API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method:-/system/generateBatchInputs | Below is the the instruction that describes the task:
### Input:
Invokes the /system/generateBatchInputs API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method:-/system/generateBatchInputs
### Response:
def system_generate_batch_inputs(input_params={}, always_retry=True, **kwargs):
"""
Invokes the /system/generateBatchInputs API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method:-/system/generateBatchInputs
"""
return DXHTTPRequest('/system/generateBatchInputs', input_params, always_retry=always_retry, **kwargs) |
def create_dagrun(self,
run_id,
state,
execution_date,
start_date=None,
external_trigger=False,
conf=None,
session=None):
"""
Creates a dag run from this dag including the tasks associated with this dag.
Returns the dag run.
:param run_id: defines the the run id for this dag run
:type run_id: str
:param execution_date: the execution date of this dag run
:type execution_date: datetime.datetime
:param state: the state of the dag run
:type state: airflow.utils.state.State
:param start_date: the date this dag run should be evaluated
:type start_date: datetime.datetime
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param session: database session
:type session: sqlalchemy.orm.session.Session
"""
return self.get_dag().create_dagrun(run_id=run_id,
state=state,
execution_date=execution_date,
start_date=start_date,
external_trigger=external_trigger,
conf=conf,
session=session) | Creates a dag run from this dag including the tasks associated with this dag.
Returns the dag run.
:param run_id: defines the the run id for this dag run
:type run_id: str
:param execution_date: the execution date of this dag run
:type execution_date: datetime.datetime
:param state: the state of the dag run
:type state: airflow.utils.state.State
:param start_date: the date this dag run should be evaluated
:type start_date: datetime.datetime
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param session: database session
:type session: sqlalchemy.orm.session.Session | Below is the the instruction that describes the task:
### Input:
Creates a dag run from this dag including the tasks associated with this dag.
Returns the dag run.
:param run_id: defines the the run id for this dag run
:type run_id: str
:param execution_date: the execution date of this dag run
:type execution_date: datetime.datetime
:param state: the state of the dag run
:type state: airflow.utils.state.State
:param start_date: the date this dag run should be evaluated
:type start_date: datetime.datetime
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param session: database session
:type session: sqlalchemy.orm.session.Session
### Response:
def create_dagrun(self,
run_id,
state,
execution_date,
start_date=None,
external_trigger=False,
conf=None,
session=None):
"""
Creates a dag run from this dag including the tasks associated with this dag.
Returns the dag run.
:param run_id: defines the the run id for this dag run
:type run_id: str
:param execution_date: the execution date of this dag run
:type execution_date: datetime.datetime
:param state: the state of the dag run
:type state: airflow.utils.state.State
:param start_date: the date this dag run should be evaluated
:type start_date: datetime.datetime
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param session: database session
:type session: sqlalchemy.orm.session.Session
"""
return self.get_dag().create_dagrun(run_id=run_id,
state=state,
execution_date=execution_date,
start_date=start_date,
external_trigger=external_trigger,
conf=conf,
session=session) |
def resolved_packages(self):
"""Return a list of PackageVariant objects, or None if the resolve did
not complete or was unsuccessful.
"""
if (self.status != SolverStatus.solved):
return None
final_phase = self.phase_stack[-1]
return final_phase._get_solved_variants() | Return a list of PackageVariant objects, or None if the resolve did
not complete or was unsuccessful. | Below is the the instruction that describes the task:
### Input:
Return a list of PackageVariant objects, or None if the resolve did
not complete or was unsuccessful.
### Response:
def resolved_packages(self):
"""Return a list of PackageVariant objects, or None if the resolve did
not complete or was unsuccessful.
"""
if (self.status != SolverStatus.solved):
return None
final_phase = self.phase_stack[-1]
return final_phase._get_solved_variants() |
def _advapi32_encrypt(certificate_or_public_key, data, rsa_oaep_padding=False):
"""
Encrypts a value using an RSA public key via CryptoAPI
:param certificate_or_public_key:
A Certificate or PublicKey instance to encrypt with
:param data:
A byte string of the data to encrypt
:param rsa_oaep_padding:
If OAEP padding should be used instead of PKCS#1 v1.5
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
flags = 0
if rsa_oaep_padding:
flags = Advapi32Const.CRYPT_OAEP
out_len = new(advapi32, 'DWORD *', len(data))
res = advapi32.CryptEncrypt(
certificate_or_public_key.ex_key_handle,
null(),
True,
flags,
null(),
out_len,
0
)
handle_error(res)
buffer_len = deref(out_len)
buffer = buffer_from_bytes(buffer_len)
write_to_buffer(buffer, data)
pointer_set(out_len, len(data))
res = advapi32.CryptEncrypt(
certificate_or_public_key.ex_key_handle,
null(),
True,
flags,
buffer,
out_len,
buffer_len
)
handle_error(res)
return bytes_from_buffer(buffer, deref(out_len))[::-1] | Encrypts a value using an RSA public key via CryptoAPI
:param certificate_or_public_key:
A Certificate or PublicKey instance to encrypt with
:param data:
A byte string of the data to encrypt
:param rsa_oaep_padding:
If OAEP padding should be used instead of PKCS#1 v1.5
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext | Below is the the instruction that describes the task:
### Input:
Encrypts a value using an RSA public key via CryptoAPI
:param certificate_or_public_key:
A Certificate or PublicKey instance to encrypt with
:param data:
A byte string of the data to encrypt
:param rsa_oaep_padding:
If OAEP padding should be used instead of PKCS#1 v1.5
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
### Response:
def _advapi32_encrypt(certificate_or_public_key, data, rsa_oaep_padding=False):
"""
Encrypts a value using an RSA public key via CryptoAPI
:param certificate_or_public_key:
A Certificate or PublicKey instance to encrypt with
:param data:
A byte string of the data to encrypt
:param rsa_oaep_padding:
If OAEP padding should be used instead of PKCS#1 v1.5
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
flags = 0
if rsa_oaep_padding:
flags = Advapi32Const.CRYPT_OAEP
out_len = new(advapi32, 'DWORD *', len(data))
res = advapi32.CryptEncrypt(
certificate_or_public_key.ex_key_handle,
null(),
True,
flags,
null(),
out_len,
0
)
handle_error(res)
buffer_len = deref(out_len)
buffer = buffer_from_bytes(buffer_len)
write_to_buffer(buffer, data)
pointer_set(out_len, len(data))
res = advapi32.CryptEncrypt(
certificate_or_public_key.ex_key_handle,
null(),
True,
flags,
buffer,
out_len,
buffer_len
)
handle_error(res)
return bytes_from_buffer(buffer, deref(out_len))[::-1] |
def add(request, kind, method, *args):
"""
add(request, "mixpanel", "track", "purchase", {order: "1234", amount: "100"})
add(request, "google", "push", ["_addTrans", "1234", "Gondor", "100"])
"""
request.session.setdefault(_key_name(kind), []).append({
"method": method,
"args": args
}) | add(request, "mixpanel", "track", "purchase", {order: "1234", amount: "100"})
add(request, "google", "push", ["_addTrans", "1234", "Gondor", "100"]) | Below is the the instruction that describes the task:
### Input:
add(request, "mixpanel", "track", "purchase", {order: "1234", amount: "100"})
add(request, "google", "push", ["_addTrans", "1234", "Gondor", "100"])
### Response:
def add(request, kind, method, *args):
"""
add(request, "mixpanel", "track", "purchase", {order: "1234", amount: "100"})
add(request, "google", "push", ["_addTrans", "1234", "Gondor", "100"])
"""
request.session.setdefault(_key_name(kind), []).append({
"method": method,
"args": args
}) |
def main(bot):
"""
Entry point for the command line launcher.
:param bot: the IRC bot to run
:type bot: :class:`fatbotslim.irc.bot.IRC`
"""
greenlet = spawn(bot.run)
try:
greenlet.join()
except KeyboardInterrupt:
print '' # cosmetics matters
log.info("Killed by user, disconnecting...")
bot.disconnect()
finally:
greenlet.kill() | Entry point for the command line launcher.
:param bot: the IRC bot to run
:type bot: :class:`fatbotslim.irc.bot.IRC` | Below is the the instruction that describes the task:
### Input:
Entry point for the command line launcher.
:param bot: the IRC bot to run
:type bot: :class:`fatbotslim.irc.bot.IRC`
### Response:
def main(bot):
"""
Entry point for the command line launcher.
:param bot: the IRC bot to run
:type bot: :class:`fatbotslim.irc.bot.IRC`
"""
greenlet = spawn(bot.run)
try:
greenlet.join()
except KeyboardInterrupt:
print '' # cosmetics matters
log.info("Killed by user, disconnecting...")
bot.disconnect()
finally:
greenlet.kill() |
def text(what="sentence", *args, **kwargs):
"""An aggregator for all above defined public methods."""
if what == "character":
return character(*args, **kwargs)
elif what == "characters":
return characters(*args, **kwargs)
elif what == "word":
return word(*args, **kwargs)
elif what == "words":
return words(*args, **kwargs)
elif what == "sentence":
return sentence(*args, **kwargs)
elif what == "sentences":
return sentences(*args, **kwargs)
elif what == "paragraph":
return paragraph(*args, **kwargs)
elif what == "paragraphs":
return paragraphs(*args, **kwargs)
elif what == "title":
return title(*args, **kwargs)
else:
raise NameError('No such method') | An aggregator for all above defined public methods. | Below is the the instruction that describes the task:
### Input:
An aggregator for all above defined public methods.
### Response:
def text(what="sentence", *args, **kwargs):
"""An aggregator for all above defined public methods."""
if what == "character":
return character(*args, **kwargs)
elif what == "characters":
return characters(*args, **kwargs)
elif what == "word":
return word(*args, **kwargs)
elif what == "words":
return words(*args, **kwargs)
elif what == "sentence":
return sentence(*args, **kwargs)
elif what == "sentences":
return sentences(*args, **kwargs)
elif what == "paragraph":
return paragraph(*args, **kwargs)
elif what == "paragraphs":
return paragraphs(*args, **kwargs)
elif what == "title":
return title(*args, **kwargs)
else:
raise NameError('No such method') |
def repair_broken_bonds(self, slab, bonds):
"""
This method will find undercoordinated atoms due to slab
cleaving specified by the bonds parameter and move them
to the other surface to make sure the bond is kept intact.
In a future release of surface.py, the ghost_sites will be
used to tell us how the repair bonds should look like.
Arg:
slab (structure): A structure object representing a slab.
bonds ({(specie1, specie2): max_bond_dist}: bonds are
specified as a dict of tuples: float of specie1, specie2
and the max bonding distance. For example, PO4 groups may be
defined as {("P", "O"): 3}.
Returns:
(Slab) A Slab object with a particular shifted oriented unit cell.
"""
for pair in bonds.keys():
blength = bonds[pair]
# First lets determine which element should be the
# reference (center element) to determine broken bonds.
# e.g. P for a PO4 bond. Find integer coordination
# numbers of the pair of elements wrt to each other
cn_dict = {}
for i, el in enumerate(pair):
cnlist = []
for site in self.oriented_unit_cell:
poly_coord = 0
if site.species_string == el:
for nn in self.oriented_unit_cell.get_neighbors(
site, blength):
if nn[0].species_string == pair[i-1]:
poly_coord += 1
cnlist.append(poly_coord)
cn_dict[el] = cnlist
# We make the element with the higher coordination our reference
if max(cn_dict[pair[0]]) > max(cn_dict[pair[1]]):
element1, element2 = pair
else:
element2, element1 = pair
for i, site in enumerate(slab):
# Determine the coordination of our reference
if site.species_string == element1:
poly_coord = 0
for neighbor in slab.get_neighbors(site, blength):
poly_coord += 1 if neighbor[0].species_string == element2 else 0
# suppose we find an undercoordinated reference atom
if poly_coord not in cn_dict[element1]:
# We get the reference atom of the broken bonds
# (undercoordinated), move it to the other surface
slab = self.move_to_other_side(slab, [i])
# find its NNs with the corresponding
# species it should be coordinated with
neighbors = slab.get_neighbors(slab[i], blength,
include_index=True)
tomove = [nn[2] for nn in neighbors if
nn[0].species_string == element2]
tomove.append(i)
# and then move those NNs along with the central
# atom back to the other side of the slab again
slab = self.move_to_other_side(slab, tomove)
return slab | This method will find undercoordinated atoms due to slab
cleaving specified by the bonds parameter and move them
to the other surface to make sure the bond is kept intact.
In a future release of surface.py, the ghost_sites will be
used to tell us how the repair bonds should look like.
Arg:
slab (structure): A structure object representing a slab.
bonds ({(specie1, specie2): max_bond_dist}: bonds are
specified as a dict of tuples: float of specie1, specie2
and the max bonding distance. For example, PO4 groups may be
defined as {("P", "O"): 3}.
Returns:
(Slab) A Slab object with a particular shifted oriented unit cell. | Below is the the instruction that describes the task:
### Input:
This method will find undercoordinated atoms due to slab
cleaving specified by the bonds parameter and move them
to the other surface to make sure the bond is kept intact.
In a future release of surface.py, the ghost_sites will be
used to tell us how the repair bonds should look like.
Arg:
slab (structure): A structure object representing a slab.
bonds ({(specie1, specie2): max_bond_dist}: bonds are
specified as a dict of tuples: float of specie1, specie2
and the max bonding distance. For example, PO4 groups may be
defined as {("P", "O"): 3}.
Returns:
(Slab) A Slab object with a particular shifted oriented unit cell.
### Response:
def repair_broken_bonds(self, slab, bonds):
"""
This method will find undercoordinated atoms due to slab
cleaving specified by the bonds parameter and move them
to the other surface to make sure the bond is kept intact.
In a future release of surface.py, the ghost_sites will be
used to tell us how the repair bonds should look like.
Arg:
slab (structure): A structure object representing a slab.
bonds ({(specie1, specie2): max_bond_dist}: bonds are
specified as a dict of tuples: float of specie1, specie2
and the max bonding distance. For example, PO4 groups may be
defined as {("P", "O"): 3}.
Returns:
(Slab) A Slab object with a particular shifted oriented unit cell.
"""
for pair in bonds.keys():
blength = bonds[pair]
# First lets determine which element should be the
# reference (center element) to determine broken bonds.
# e.g. P for a PO4 bond. Find integer coordination
# numbers of the pair of elements wrt to each other
cn_dict = {}
for i, el in enumerate(pair):
cnlist = []
for site in self.oriented_unit_cell:
poly_coord = 0
if site.species_string == el:
for nn in self.oriented_unit_cell.get_neighbors(
site, blength):
if nn[0].species_string == pair[i-1]:
poly_coord += 1
cnlist.append(poly_coord)
cn_dict[el] = cnlist
# We make the element with the higher coordination our reference
if max(cn_dict[pair[0]]) > max(cn_dict[pair[1]]):
element1, element2 = pair
else:
element2, element1 = pair
for i, site in enumerate(slab):
# Determine the coordination of our reference
if site.species_string == element1:
poly_coord = 0
for neighbor in slab.get_neighbors(site, blength):
poly_coord += 1 if neighbor[0].species_string == element2 else 0
# suppose we find an undercoordinated reference atom
if poly_coord not in cn_dict[element1]:
# We get the reference atom of the broken bonds
# (undercoordinated), move it to the other surface
slab = self.move_to_other_side(slab, [i])
# find its NNs with the corresponding
# species it should be coordinated with
neighbors = slab.get_neighbors(slab[i], blength,
include_index=True)
tomove = [nn[2] for nn in neighbors if
nn[0].species_string == element2]
tomove.append(i)
# and then move those NNs along with the central
# atom back to the other side of the slab again
slab = self.move_to_other_side(slab, tomove)
return slab |
def get_account(self, account):
'''
check the account whether in the protfolio dict or not
:param account: QA_Account
:return: QA_Account if in dict
None not in list
'''
try:
return self.get_account_by_cookie(account.account_cookie)
except:
QA_util_log_info(
'Can not find this account with cookies %s' %
account.account_cookie
)
return None | check the account whether in the protfolio dict or not
:param account: QA_Account
:return: QA_Account if in dict
None not in list | Below is the the instruction that describes the task:
### Input:
check the account whether in the protfolio dict or not
:param account: QA_Account
:return: QA_Account if in dict
None not in list
### Response:
def get_account(self, account):
'''
check the account whether in the protfolio dict or not
:param account: QA_Account
:return: QA_Account if in dict
None not in list
'''
try:
return self.get_account_by_cookie(account.account_cookie)
except:
QA_util_log_info(
'Can not find this account with cookies %s' %
account.account_cookie
)
return None |
def _await_descriptor_upload(tor_protocol, onion, progress, await_all_uploads):
"""
Internal helper.
:param tor_protocol: ITorControlProtocol instance
:param onion: IOnionService instance
:param progress: a progess callback, or None
:returns: a Deferred that fires once we've detected at least one
descriptor upload for the service (as detected by listening for
HS_DESC events)
"""
# For v3 services, Tor attempts to upload to 16 services; we'll
# assume that for now but also cap it (we want to show some
# progress for "attempting uploads" but we need to decide how
# much) .. so we leave 50% of the "progress" for attempts, and the
# other 50% for "are we done" (which is either "one thing
# uploaded" or "all the things uploaded")
attempted_uploads = set()
confirmed_uploads = set()
failed_uploads = set()
uploaded = defer.Deferred()
await_all = False if await_all_uploads is None else await_all_uploads
def translate_progress(tag, description):
if progress:
done = len(confirmed_uploads) + len(failed_uploads)
done_endpoint = float(len(attempted_uploads)) if await_all else 1.0
done_pct = 0 if not attempted_uploads else float(done) / done_endpoint
started_pct = float(min(16, len(attempted_uploads))) / 16.0
try:
progress(
(done_pct * 50.0) + (started_pct * 50.0),
tag,
description,
)
except Exception:
log.err()
def hostname_matches(hostname):
if IAuthenticatedOnionClients.providedBy(onion):
return hostname[:-6] == onion.get_permanent_id()
else:
# provides IOnionService
return onion.hostname == hostname
def hs_desc(evt):
"""
From control-spec:
"650" SP "HS_DESC" SP Action SP HSAddress SP AuthType SP HsDir
[SP DescriptorID] [SP "REASON=" Reason] [SP "REPLICA=" Replica]
"""
args = evt.split()
subtype = args[0]
if subtype == 'UPLOAD':
if hostname_matches('{}.onion'.format(args[1])):
attempted_uploads.add(args[3])
translate_progress(
"wait_descriptor",
"Upload to {} started".format(args[3])
)
elif subtype == 'UPLOADED':
# we only need ONE successful upload to happen for the
# HS to be reachable.
# unused? addr = args[1]
# XXX FIXME I think tor is sending the onion-address
# properly with these now, so we can use those
# (i.e. instead of matching to "attempted_uploads")
if args[3] in attempted_uploads:
confirmed_uploads.add(args[3])
log.msg("Uploaded '{}' to '{}'".format(args[1], args[3]))
translate_progress(
"wait_descriptor",
"Successful upload to {}".format(args[3])
)
if not uploaded.called:
if await_all:
if (len(failed_uploads) + len(confirmed_uploads)) == len(attempted_uploads):
uploaded.callback(onion)
else:
uploaded.callback(onion)
elif subtype == 'FAILED':
if hostname_matches('{}.onion'.format(args[1])):
failed_uploads.add(args[3])
translate_progress(
"wait_descriptor",
"Failed upload to {}".format(args[3])
)
if failed_uploads == attempted_uploads:
msg = "Failed to upload '{}' to: {}".format(
args[1],
', '.join(failed_uploads),
)
uploaded.errback(RuntimeError(msg))
# the first 'yield' should be the add_event_listener so that a
# caller can do "d = _await_descriptor_upload()", then add the
# service.
yield tor_protocol.add_event_listener('HS_DESC', hs_desc)
yield uploaded
yield tor_protocol.remove_event_listener('HS_DESC', hs_desc)
# ensure we show "100%" at the end
if progress:
if await_all_uploads:
msg = "Completed descriptor uploads"
else:
msg = "At least one descriptor uploaded"
try:
progress(100.0, "wait_descriptor", msg)
except Exception:
log.err() | Internal helper.
:param tor_protocol: ITorControlProtocol instance
:param onion: IOnionService instance
:param progress: a progess callback, or None
:returns: a Deferred that fires once we've detected at least one
descriptor upload for the service (as detected by listening for
HS_DESC events) | Below is the the instruction that describes the task:
### Input:
Internal helper.
:param tor_protocol: ITorControlProtocol instance
:param onion: IOnionService instance
:param progress: a progess callback, or None
:returns: a Deferred that fires once we've detected at least one
descriptor upload for the service (as detected by listening for
HS_DESC events)
### Response:
def _await_descriptor_upload(tor_protocol, onion, progress, await_all_uploads):
"""
Internal helper.
:param tor_protocol: ITorControlProtocol instance
:param onion: IOnionService instance
:param progress: a progess callback, or None
:returns: a Deferred that fires once we've detected at least one
descriptor upload for the service (as detected by listening for
HS_DESC events)
"""
# For v3 services, Tor attempts to upload to 16 services; we'll
# assume that for now but also cap it (we want to show some
# progress for "attempting uploads" but we need to decide how
# much) .. so we leave 50% of the "progress" for attempts, and the
# other 50% for "are we done" (which is either "one thing
# uploaded" or "all the things uploaded")
attempted_uploads = set()
confirmed_uploads = set()
failed_uploads = set()
uploaded = defer.Deferred()
await_all = False if await_all_uploads is None else await_all_uploads
def translate_progress(tag, description):
if progress:
done = len(confirmed_uploads) + len(failed_uploads)
done_endpoint = float(len(attempted_uploads)) if await_all else 1.0
done_pct = 0 if not attempted_uploads else float(done) / done_endpoint
started_pct = float(min(16, len(attempted_uploads))) / 16.0
try:
progress(
(done_pct * 50.0) + (started_pct * 50.0),
tag,
description,
)
except Exception:
log.err()
def hostname_matches(hostname):
if IAuthenticatedOnionClients.providedBy(onion):
return hostname[:-6] == onion.get_permanent_id()
else:
# provides IOnionService
return onion.hostname == hostname
def hs_desc(evt):
"""
From control-spec:
"650" SP "HS_DESC" SP Action SP HSAddress SP AuthType SP HsDir
[SP DescriptorID] [SP "REASON=" Reason] [SP "REPLICA=" Replica]
"""
args = evt.split()
subtype = args[0]
if subtype == 'UPLOAD':
if hostname_matches('{}.onion'.format(args[1])):
attempted_uploads.add(args[3])
translate_progress(
"wait_descriptor",
"Upload to {} started".format(args[3])
)
elif subtype == 'UPLOADED':
# we only need ONE successful upload to happen for the
# HS to be reachable.
# unused? addr = args[1]
# XXX FIXME I think tor is sending the onion-address
# properly with these now, so we can use those
# (i.e. instead of matching to "attempted_uploads")
if args[3] in attempted_uploads:
confirmed_uploads.add(args[3])
log.msg("Uploaded '{}' to '{}'".format(args[1], args[3]))
translate_progress(
"wait_descriptor",
"Successful upload to {}".format(args[3])
)
if not uploaded.called:
if await_all:
if (len(failed_uploads) + len(confirmed_uploads)) == len(attempted_uploads):
uploaded.callback(onion)
else:
uploaded.callback(onion)
elif subtype == 'FAILED':
if hostname_matches('{}.onion'.format(args[1])):
failed_uploads.add(args[3])
translate_progress(
"wait_descriptor",
"Failed upload to {}".format(args[3])
)
if failed_uploads == attempted_uploads:
msg = "Failed to upload '{}' to: {}".format(
args[1],
', '.join(failed_uploads),
)
uploaded.errback(RuntimeError(msg))
# the first 'yield' should be the add_event_listener so that a
# caller can do "d = _await_descriptor_upload()", then add the
# service.
yield tor_protocol.add_event_listener('HS_DESC', hs_desc)
yield uploaded
yield tor_protocol.remove_event_listener('HS_DESC', hs_desc)
# ensure we show "100%" at the end
if progress:
if await_all_uploads:
msg = "Completed descriptor uploads"
else:
msg = "At least one descriptor uploaded"
try:
progress(100.0, "wait_descriptor", msg)
except Exception:
log.err() |
def eval_nonagg_call(self, exp):
"helper for eval_callx; evaluator for CallX that consume a single value"
# todo: get more concrete about argument counts
args=self.eval(exp.args)
if exp.f=='coalesce':
a,b=args # todo: does coalesce take more than 2 args?
return b if a is None else a
elif exp.f=='unnest': return self.eval(exp.args)[0] # note: run_select does some work in this case too
elif exp.f in ('to_tsquery','to_tsvector'): return set(self.eval(exp.args.children[0]).split())
else: raise NotImplementedError('unk_function',exp.f) | helper for eval_callx; evaluator for CallX that consume a single value | Below is the the instruction that describes the task:
### Input:
helper for eval_callx; evaluator for CallX that consume a single value
### Response:
def eval_nonagg_call(self, exp):
"helper for eval_callx; evaluator for CallX that consume a single value"
# todo: get more concrete about argument counts
args=self.eval(exp.args)
if exp.f=='coalesce':
a,b=args # todo: does coalesce take more than 2 args?
return b if a is None else a
elif exp.f=='unnest': return self.eval(exp.args)[0] # note: run_select does some work in this case too
elif exp.f in ('to_tsquery','to_tsvector'): return set(self.eval(exp.args.children[0]).split())
else: raise NotImplementedError('unk_function',exp.f) |
def _add_spin_magnitudes(self, structure):
"""
Replaces Spin.up/Spin.down with spin magnitudes specified
by mag_species_spin.
:param structure:
:return:
"""
for idx, site in enumerate(structure):
if getattr(site.specie, '_properties', None):
spin = site.specie._properties.get('spin', None)
sign = int(spin) if spin else 0
if spin:
new_properties = site.specie._properties.copy()
# this very hacky bit of code only works because we know
# that on disordered sites in this class, all species are the same
# but have different spins, and this is comma-delimited
sp = str(site.specie).split(",")[0]
new_properties.update({
'spin': sign * self.mag_species_spin.get(sp, 0)
})
new_specie = Specie(site.specie.symbol,
getattr(site.specie, 'oxi_state', None),
new_properties)
structure.replace(idx, new_specie,
properties=site.properties)
logger.debug('Structure with spin magnitudes:\n{}'.format(str(structure)))
return structure | Replaces Spin.up/Spin.down with spin magnitudes specified
by mag_species_spin.
:param structure:
:return: | Below is the the instruction that describes the task:
### Input:
Replaces Spin.up/Spin.down with spin magnitudes specified
by mag_species_spin.
:param structure:
:return:
### Response:
def _add_spin_magnitudes(self, structure):
"""
Replaces Spin.up/Spin.down with spin magnitudes specified
by mag_species_spin.
:param structure:
:return:
"""
for idx, site in enumerate(structure):
if getattr(site.specie, '_properties', None):
spin = site.specie._properties.get('spin', None)
sign = int(spin) if spin else 0
if spin:
new_properties = site.specie._properties.copy()
# this very hacky bit of code only works because we know
# that on disordered sites in this class, all species are the same
# but have different spins, and this is comma-delimited
sp = str(site.specie).split(",")[0]
new_properties.update({
'spin': sign * self.mag_species_spin.get(sp, 0)
})
new_specie = Specie(site.specie.symbol,
getattr(site.specie, 'oxi_state', None),
new_properties)
structure.replace(idx, new_specie,
properties=site.properties)
logger.debug('Structure with spin magnitudes:\n{}'.format(str(structure)))
return structure |
def _migrate_subresource(subresource, parent, migrations):
"""
Migrate a resource's subresource
:param subresource: the perch.SubResource instance
:param parent: the parent perch.Document instance
:param migrations: the migrations for a resource
"""
for key, doc in getattr(parent, subresource.parent_key, {}).items():
for migration in migrations['migrations']:
instance = migration(subresource(id=key, **doc))
parent._resource['doc_version'] = unicode(migration.version)
instance = _migrate_subresources(
instance,
migrations['subresources']
)
doc = instance._resource
doc.pop('id', None)
doc.pop(instance.resource_type + '_id', None)
getattr(parent, subresource.parent_key)[key] = doc
return parent | Migrate a resource's subresource
:param subresource: the perch.SubResource instance
:param parent: the parent perch.Document instance
:param migrations: the migrations for a resource | Below is the the instruction that describes the task:
### Input:
Migrate a resource's subresource
:param subresource: the perch.SubResource instance
:param parent: the parent perch.Document instance
:param migrations: the migrations for a resource
### Response:
def _migrate_subresource(subresource, parent, migrations):
"""
Migrate a resource's subresource
:param subresource: the perch.SubResource instance
:param parent: the parent perch.Document instance
:param migrations: the migrations for a resource
"""
for key, doc in getattr(parent, subresource.parent_key, {}).items():
for migration in migrations['migrations']:
instance = migration(subresource(id=key, **doc))
parent._resource['doc_version'] = unicode(migration.version)
instance = _migrate_subresources(
instance,
migrations['subresources']
)
doc = instance._resource
doc.pop('id', None)
doc.pop(instance.resource_type + '_id', None)
getattr(parent, subresource.parent_key)[key] = doc
return parent |
def gen_age(output, ascii_props=False, append=False, prefix=""):
"""Generate `age` property."""
obj = {}
all_chars = ALL_ASCII if ascii_props else ALL_CHARS
with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, 'DerivedAge.txt'), 'r', 'utf-8') as uf:
for line in uf:
if not line.startswith('#'):
data = line.split('#')[0].split(';')
if len(data) < 2:
continue
span = create_span([int(i, 16) for i in data[0].strip().split('..')], is_bytes=ascii_props)
name = format_name(data[1])
if name not in obj:
obj[name] = []
if span is None:
continue
obj[name].extend(span)
unassigned = set()
for x in obj.values():
unassigned |= set(x)
obj['na'] = list(all_chars - unassigned)
for name in list(obj.keys()):
s = set(obj[name])
obj[name] = sorted(s)
# Convert characters values to ranges
char2range(obj, is_bytes=ascii_props)
with codecs.open(output, 'a' if append else 'w', 'utf-8') as f:
if not append:
f.write(HEADER)
# Write out the Unicode properties
f.write('%s_age = {\n' % prefix)
count = len(obj) - 1
i = 0
for k1, v1 in sorted(obj.items()):
f.write(' "%s": "%s"' % (k1, v1))
if i == count:
f.write('\n}\n')
else:
f.write(',\n')
i += 1 | Generate `age` property. | Below is the the instruction that describes the task:
### Input:
Generate `age` property.
### Response:
def gen_age(output, ascii_props=False, append=False, prefix=""):
"""Generate `age` property."""
obj = {}
all_chars = ALL_ASCII if ascii_props else ALL_CHARS
with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, 'DerivedAge.txt'), 'r', 'utf-8') as uf:
for line in uf:
if not line.startswith('#'):
data = line.split('#')[0].split(';')
if len(data) < 2:
continue
span = create_span([int(i, 16) for i in data[0].strip().split('..')], is_bytes=ascii_props)
name = format_name(data[1])
if name not in obj:
obj[name] = []
if span is None:
continue
obj[name].extend(span)
unassigned = set()
for x in obj.values():
unassigned |= set(x)
obj['na'] = list(all_chars - unassigned)
for name in list(obj.keys()):
s = set(obj[name])
obj[name] = sorted(s)
# Convert characters values to ranges
char2range(obj, is_bytes=ascii_props)
with codecs.open(output, 'a' if append else 'w', 'utf-8') as f:
if not append:
f.write(HEADER)
# Write out the Unicode properties
f.write('%s_age = {\n' % prefix)
count = len(obj) - 1
i = 0
for k1, v1 in sorted(obj.items()):
f.write(' "%s": "%s"' % (k1, v1))
if i == count:
f.write('\n}\n')
else:
f.write(',\n')
i += 1 |
def make_success_redirect(self):
""" Return a Django ``HttpResponseRedirect`` describing the request success.
The custom authorization endpoint should return the result of this method
when the user grants the Client's authorization request. The request is
assumed to have successfully been vetted by the :py:meth:`validate` method.
"""
new_authorization_code = AuthorizationCode.objects.create(
user=self.user,
client=self.client,
redirect_uri=(self.redirect_uri if self.request_redirect_uri else None)
)
new_authorization_code.scopes = self.valid_scope_objects
new_authorization_code.save()
response_params = {'code': new_authorization_code.value}
# From http://tools.ietf.org/html/rfc6749#section-4.1.2 :
#
# REQUIRED if the "state" parameter was present in the client
# authorization request. The exact value received from the
# client.
#
if self.state is not None:
response_params['state'] = self.state
return HttpResponseRedirect(
update_parameters(self.redirect_uri, response_params)) | Return a Django ``HttpResponseRedirect`` describing the request success.
The custom authorization endpoint should return the result of this method
when the user grants the Client's authorization request. The request is
assumed to have successfully been vetted by the :py:meth:`validate` method. | Below is the the instruction that describes the task:
### Input:
Return a Django ``HttpResponseRedirect`` describing the request success.
The custom authorization endpoint should return the result of this method
when the user grants the Client's authorization request. The request is
assumed to have successfully been vetted by the :py:meth:`validate` method.
### Response:
def make_success_redirect(self):
""" Return a Django ``HttpResponseRedirect`` describing the request success.
The custom authorization endpoint should return the result of this method
when the user grants the Client's authorization request. The request is
assumed to have successfully been vetted by the :py:meth:`validate` method.
"""
new_authorization_code = AuthorizationCode.objects.create(
user=self.user,
client=self.client,
redirect_uri=(self.redirect_uri if self.request_redirect_uri else None)
)
new_authorization_code.scopes = self.valid_scope_objects
new_authorization_code.save()
response_params = {'code': new_authorization_code.value}
# From http://tools.ietf.org/html/rfc6749#section-4.1.2 :
#
# REQUIRED if the "state" parameter was present in the client
# authorization request. The exact value received from the
# client.
#
if self.state is not None:
response_params['state'] = self.state
return HttpResponseRedirect(
update_parameters(self.redirect_uri, response_params)) |
def short_dask_repr(array, show_dtype=True):
"""Similar to dask.array.DataArray.__repr__, but without
redundant information that's already printed by the repr
function of the xarray wrapper.
"""
chunksize = tuple(c[0] for c in array.chunks)
if show_dtype:
return 'dask.array<shape={}, dtype={}, chunksize={}>'.format(
array.shape, array.dtype, chunksize)
else:
return 'dask.array<shape={}, chunksize={}>'.format(
array.shape, chunksize) | Similar to dask.array.DataArray.__repr__, but without
redundant information that's already printed by the repr
function of the xarray wrapper. | Below is the the instruction that describes the task:
### Input:
Similar to dask.array.DataArray.__repr__, but without
redundant information that's already printed by the repr
function of the xarray wrapper.
### Response:
def short_dask_repr(array, show_dtype=True):
"""Similar to dask.array.DataArray.__repr__, but without
redundant information that's already printed by the repr
function of the xarray wrapper.
"""
chunksize = tuple(c[0] for c in array.chunks)
if show_dtype:
return 'dask.array<shape={}, dtype={}, chunksize={}>'.format(
array.shape, array.dtype, chunksize)
else:
return 'dask.array<shape={}, chunksize={}>'.format(
array.shape, chunksize) |
def add(repo, args, targetdir,
execute=False, generator=False,
includes=[], script=False,
source=None):
"""
Add files to the repository by explicitly specifying them or by
specifying a pattern over files accessed during execution of an
executable.
Parameters
----------
repo: Repository
args: files or command line
(a) If simply adding files, then the list of files that must
be added (including any additional arguments to be passed to
git
(b) If files to be added are an output of a command line, then
args is the command lined
targetdir: Target directory to store the files
execute: Args are not files to be added but scripts that must be run.
includes: patterns used to select files to
script: Is this a script?
generator: Is this a generator
source: Link to the original source of the data
"""
# Gather the files...
if not execute:
files = add_files(args=args,
targetdir=targetdir,
source=source,
script=script,
generator=generator)
else:
files = run_executable(repo, args, includes)
if files is None or len(files) == 0:
return repo
# Update the repo package but with only those that have changed.
filtered_files = []
package = repo.package
for h in files:
found = False
for i, r in enumerate(package['resources']):
if h['relativepath'] == r['relativepath']:
found = True
if h['sha256'] == r['sha256']:
change = False
for attr in ['source']:
if h[attr] != r[attr]:
r[attr] = h[attr]
change = True
if change:
filtered_files.append(h)
continue
else:
filtered_files.append(h)
package['resources'][i] = h
break
if not found:
filtered_files.append(h)
package['resources'].append(h)
if len(filtered_files) == 0:
return 0
# Copy the files
repo.manager.add_files(repo, filtered_files)
# Write to disk...
rootdir = repo.rootdir
with cd(rootdir):
datapath = "datapackage.json"
with open(datapath, 'w') as fd:
fd.write(json.dumps(package, indent=4))
return len(filtered_files) | Add files to the repository by explicitly specifying them or by
specifying a pattern over files accessed during execution of an
executable.
Parameters
----------
repo: Repository
args: files or command line
(a) If simply adding files, then the list of files that must
be added (including any additional arguments to be passed to
git
(b) If files to be added are an output of a command line, then
args is the command lined
targetdir: Target directory to store the files
execute: Args are not files to be added but scripts that must be run.
includes: patterns used to select files to
script: Is this a script?
generator: Is this a generator
source: Link to the original source of the data | Below is the the instruction that describes the task:
### Input:
Add files to the repository by explicitly specifying them or by
specifying a pattern over files accessed during execution of an
executable.
Parameters
----------
repo: Repository
args: files or command line
(a) If simply adding files, then the list of files that must
be added (including any additional arguments to be passed to
git
(b) If files to be added are an output of a command line, then
args is the command lined
targetdir: Target directory to store the files
execute: Args are not files to be added but scripts that must be run.
includes: patterns used to select files to
script: Is this a script?
generator: Is this a generator
source: Link to the original source of the data
### Response:
def add(repo, args, targetdir,
execute=False, generator=False,
includes=[], script=False,
source=None):
"""
Add files to the repository by explicitly specifying them or by
specifying a pattern over files accessed during execution of an
executable.
Parameters
----------
repo: Repository
args: files or command line
(a) If simply adding files, then the list of files that must
be added (including any additional arguments to be passed to
git
(b) If files to be added are an output of a command line, then
args is the command lined
targetdir: Target directory to store the files
execute: Args are not files to be added but scripts that must be run.
includes: patterns used to select files to
script: Is this a script?
generator: Is this a generator
source: Link to the original source of the data
"""
# Gather the files...
if not execute:
files = add_files(args=args,
targetdir=targetdir,
source=source,
script=script,
generator=generator)
else:
files = run_executable(repo, args, includes)
if files is None or len(files) == 0:
return repo
# Update the repo package but with only those that have changed.
filtered_files = []
package = repo.package
for h in files:
found = False
for i, r in enumerate(package['resources']):
if h['relativepath'] == r['relativepath']:
found = True
if h['sha256'] == r['sha256']:
change = False
for attr in ['source']:
if h[attr] != r[attr]:
r[attr] = h[attr]
change = True
if change:
filtered_files.append(h)
continue
else:
filtered_files.append(h)
package['resources'][i] = h
break
if not found:
filtered_files.append(h)
package['resources'].append(h)
if len(filtered_files) == 0:
return 0
# Copy the files
repo.manager.add_files(repo, filtered_files)
# Write to disk...
rootdir = repo.rootdir
with cd(rootdir):
datapath = "datapackage.json"
with open(datapath, 'w') as fd:
fd.write(json.dumps(package, indent=4))
return len(filtered_files) |
def build_ast_schema(
document_ast: DocumentNode,
assume_valid: bool = False,
assume_valid_sdl: bool = False,
) -> GraphQLSchema:
"""Build a GraphQL Schema from a given AST.
This takes the ast of a schema document produced by the parse function in
src/language/parser.py.
If no schema definition is provided, then it will look for types named Query
and Mutation.
Given that AST it constructs a GraphQLSchema. The resulting schema has no
resolve methods, so execution will use default resolvers.
When building a schema from a GraphQL service's introspection result, it might
be safe to assume the schema is valid. Set `assume_valid` to True to assume the
produced schema is valid. Set `assume_valid_sdl` to True to assume it is already
a valid SDL document.
"""
if not isinstance(document_ast, DocumentNode):
raise TypeError("Must provide a Document AST.")
if not (assume_valid or assume_valid_sdl):
from ..validation.validate import assert_valid_sdl
assert_valid_sdl(document_ast)
schema_def: Optional[SchemaDefinitionNode] = None
type_defs: List[TypeDefinitionNode] = []
directive_defs: List[DirectiveDefinitionNode] = []
append_directive_def = directive_defs.append
for def_ in document_ast.definitions:
if isinstance(def_, SchemaDefinitionNode):
schema_def = def_
elif isinstance(def_, TypeDefinitionNode):
def_ = cast(TypeDefinitionNode, def_)
type_defs.append(def_)
elif isinstance(def_, DirectiveDefinitionNode):
append_directive_def(def_)
def resolve_type(type_name: str) -> GraphQLNamedType:
type_ = type_map.get(type_name)
if not type:
raise TypeError(f"Type '{type_name}' not found in document.")
return type_
ast_builder = ASTDefinitionBuilder(
assume_valid=assume_valid, resolve_type=resolve_type
)
type_map = {node.name.value: ast_builder.build_type(node) for node in type_defs}
if schema_def:
operation_types = get_operation_types(schema_def)
else:
operation_types = {
OperationType.QUERY: "Query",
OperationType.MUTATION: "Mutation",
OperationType.SUBSCRIPTION: "Subscription",
}
directives = [
ast_builder.build_directive(directive_def) for directive_def in directive_defs
]
# If specified directives were not explicitly declared, add them.
if not any(directive.name == "skip" for directive in directives):
directives.append(GraphQLSkipDirective)
if not any(directive.name == "include" for directive in directives):
directives.append(GraphQLIncludeDirective)
if not any(directive.name == "deprecated" for directive in directives):
directives.append(GraphQLDeprecatedDirective)
query_type = operation_types.get(OperationType.QUERY)
mutation_type = operation_types.get(OperationType.MUTATION)
subscription_type = operation_types.get(OperationType.SUBSCRIPTION)
return GraphQLSchema(
# Note: While this could make early assertions to get the correctly
# typed values below, that would throw immediately while type system
# validation with `validate_schema()` will produce more actionable results.
query=cast(GraphQLObjectType, type_map.get(query_type)) if query_type else None,
mutation=cast(GraphQLObjectType, type_map.get(mutation_type))
if mutation_type
else None,
subscription=cast(GraphQLObjectType, type_map.get(subscription_type))
if subscription_type
else None,
types=list(type_map.values()),
directives=directives,
ast_node=schema_def,
assume_valid=assume_valid,
) | Build a GraphQL Schema from a given AST.
This takes the ast of a schema document produced by the parse function in
src/language/parser.py.
If no schema definition is provided, then it will look for types named Query
and Mutation.
Given that AST it constructs a GraphQLSchema. The resulting schema has no
resolve methods, so execution will use default resolvers.
When building a schema from a GraphQL service's introspection result, it might
be safe to assume the schema is valid. Set `assume_valid` to True to assume the
produced schema is valid. Set `assume_valid_sdl` to True to assume it is already
a valid SDL document. | Below is the the instruction that describes the task:
### Input:
Build a GraphQL Schema from a given AST.
This takes the ast of a schema document produced by the parse function in
src/language/parser.py.
If no schema definition is provided, then it will look for types named Query
and Mutation.
Given that AST it constructs a GraphQLSchema. The resulting schema has no
resolve methods, so execution will use default resolvers.
When building a schema from a GraphQL service's introspection result, it might
be safe to assume the schema is valid. Set `assume_valid` to True to assume the
produced schema is valid. Set `assume_valid_sdl` to True to assume it is already
a valid SDL document.
### Response:
def build_ast_schema(
document_ast: DocumentNode,
assume_valid: bool = False,
assume_valid_sdl: bool = False,
) -> GraphQLSchema:
"""Build a GraphQL Schema from a given AST.
This takes the ast of a schema document produced by the parse function in
src/language/parser.py.
If no schema definition is provided, then it will look for types named Query
and Mutation.
Given that AST it constructs a GraphQLSchema. The resulting schema has no
resolve methods, so execution will use default resolvers.
When building a schema from a GraphQL service's introspection result, it might
be safe to assume the schema is valid. Set `assume_valid` to True to assume the
produced schema is valid. Set `assume_valid_sdl` to True to assume it is already
a valid SDL document.
"""
if not isinstance(document_ast, DocumentNode):
raise TypeError("Must provide a Document AST.")
if not (assume_valid or assume_valid_sdl):
from ..validation.validate import assert_valid_sdl
assert_valid_sdl(document_ast)
schema_def: Optional[SchemaDefinitionNode] = None
type_defs: List[TypeDefinitionNode] = []
directive_defs: List[DirectiveDefinitionNode] = []
append_directive_def = directive_defs.append
for def_ in document_ast.definitions:
if isinstance(def_, SchemaDefinitionNode):
schema_def = def_
elif isinstance(def_, TypeDefinitionNode):
def_ = cast(TypeDefinitionNode, def_)
type_defs.append(def_)
elif isinstance(def_, DirectiveDefinitionNode):
append_directive_def(def_)
def resolve_type(type_name: str) -> GraphQLNamedType:
type_ = type_map.get(type_name)
if not type:
raise TypeError(f"Type '{type_name}' not found in document.")
return type_
ast_builder = ASTDefinitionBuilder(
assume_valid=assume_valid, resolve_type=resolve_type
)
type_map = {node.name.value: ast_builder.build_type(node) for node in type_defs}
if schema_def:
operation_types = get_operation_types(schema_def)
else:
operation_types = {
OperationType.QUERY: "Query",
OperationType.MUTATION: "Mutation",
OperationType.SUBSCRIPTION: "Subscription",
}
directives = [
ast_builder.build_directive(directive_def) for directive_def in directive_defs
]
# If specified directives were not explicitly declared, add them.
if not any(directive.name == "skip" for directive in directives):
directives.append(GraphQLSkipDirective)
if not any(directive.name == "include" for directive in directives):
directives.append(GraphQLIncludeDirective)
if not any(directive.name == "deprecated" for directive in directives):
directives.append(GraphQLDeprecatedDirective)
query_type = operation_types.get(OperationType.QUERY)
mutation_type = operation_types.get(OperationType.MUTATION)
subscription_type = operation_types.get(OperationType.SUBSCRIPTION)
return GraphQLSchema(
# Note: While this could make early assertions to get the correctly
# typed values below, that would throw immediately while type system
# validation with `validate_schema()` will produce more actionable results.
query=cast(GraphQLObjectType, type_map.get(query_type)) if query_type else None,
mutation=cast(GraphQLObjectType, type_map.get(mutation_type))
if mutation_type
else None,
subscription=cast(GraphQLObjectType, type_map.get(subscription_type))
if subscription_type
else None,
types=list(type_map.values()),
directives=directives,
ast_node=schema_def,
assume_valid=assume_valid,
) |
def variable_declaration(self):
"""
variable_declaration: 'let' assignment ';'
"""
self._process(Nature.LET)
node = VariableDeclaration(assignment=self.assignment())
self._process(Nature.SEMI)
return node | variable_declaration: 'let' assignment ';' | Below is the the instruction that describes the task:
### Input:
variable_declaration: 'let' assignment ';'
### Response:
def variable_declaration(self):
"""
variable_declaration: 'let' assignment ';'
"""
self._process(Nature.LET)
node = VariableDeclaration(assignment=self.assignment())
self._process(Nature.SEMI)
return node |
def DEBUG(msg, *args, **kwargs):
"""temporary logger during development that is always on"""
logger = getLogger("DEBUG")
if len(logger.handlers) == 0:
logger.addHandler(StreamHandler())
logger.propagate = False
logger.setLevel(logging.DEBUG)
logger.DEV(msg, *args, **kwargs) | temporary logger during development that is always on | Below is the the instruction that describes the task:
### Input:
temporary logger during development that is always on
### Response:
def DEBUG(msg, *args, **kwargs):
"""temporary logger during development that is always on"""
logger = getLogger("DEBUG")
if len(logger.handlers) == 0:
logger.addHandler(StreamHandler())
logger.propagate = False
logger.setLevel(logging.DEBUG)
logger.DEV(msg, *args, **kwargs) |
def AddProperty(self, interface, name, value):
'''Add property to this object
interface: D-Bus interface to add this to. For convenience you can
specify '' here to add the property to the object's main
interface (as specified on construction).
name: Property name.
value: Property value.
'''
if not interface:
interface = self.interface
try:
self.props[interface][name]
raise dbus.exceptions.DBusException(
'property %s already exists' % name,
name=self.interface + '.PropertyExists')
except KeyError:
# this is what we expect
pass
# copy.copy removes one level of variant-ness, which means that the
# types get exported in introspection data correctly, but we can't do
# this for container types.
if not (isinstance(value, dbus.Dictionary) or isinstance(value, dbus.Array)):
value = copy.copy(value)
self.props.setdefault(interface, {})[name] = value | Add property to this object
interface: D-Bus interface to add this to. For convenience you can
specify '' here to add the property to the object's main
interface (as specified on construction).
name: Property name.
value: Property value. | Below is the the instruction that describes the task:
### Input:
Add property to this object
interface: D-Bus interface to add this to. For convenience you can
specify '' here to add the property to the object's main
interface (as specified on construction).
name: Property name.
value: Property value.
### Response:
def AddProperty(self, interface, name, value):
'''Add property to this object
interface: D-Bus interface to add this to. For convenience you can
specify '' here to add the property to the object's main
interface (as specified on construction).
name: Property name.
value: Property value.
'''
if not interface:
interface = self.interface
try:
self.props[interface][name]
raise dbus.exceptions.DBusException(
'property %s already exists' % name,
name=self.interface + '.PropertyExists')
except KeyError:
# this is what we expect
pass
# copy.copy removes one level of variant-ness, which means that the
# types get exported in introspection data correctly, but we can't do
# this for container types.
if not (isinstance(value, dbus.Dictionary) or isinstance(value, dbus.Array)):
value = copy.copy(value)
self.props.setdefault(interface, {})[name] = value |
def ensure_table_strings(table):
"""
Force each cell in the table to be a string
Parameters
----------
table : list of lists
Returns
-------
table : list of lists of str
"""
for row in range(len(table)):
for column in range(len(table[row])):
table[row][column] = str(table[row][column])
return table | Force each cell in the table to be a string
Parameters
----------
table : list of lists
Returns
-------
table : list of lists of str | Below is the the instruction that describes the task:
### Input:
Force each cell in the table to be a string
Parameters
----------
table : list of lists
Returns
-------
table : list of lists of str
### Response:
def ensure_table_strings(table):
"""
Force each cell in the table to be a string
Parameters
----------
table : list of lists
Returns
-------
table : list of lists of str
"""
for row in range(len(table)):
for column in range(len(table[row])):
table[row][column] = str(table[row][column])
return table |
def verbose(self):
"""
Make it the verbose log.
A verbose log can be only shown when user want to see more logs.
It works as::
log.verbose.warn('this is a verbose warn')
log.verbose.info('this is a verbose info')
"""
log = copy.copy(self)
log._is_verbose = True
return log | Make it the verbose log.
A verbose log can be only shown when user want to see more logs.
It works as::
log.verbose.warn('this is a verbose warn')
log.verbose.info('this is a verbose info') | Below is the the instruction that describes the task:
### Input:
Make it the verbose log.
A verbose log can be only shown when user want to see more logs.
It works as::
log.verbose.warn('this is a verbose warn')
log.verbose.info('this is a verbose info')
### Response:
def verbose(self):
"""
Make it the verbose log.
A verbose log can be only shown when user want to see more logs.
It works as::
log.verbose.warn('this is a verbose warn')
log.verbose.info('this is a verbose info')
"""
log = copy.copy(self)
log._is_verbose = True
return log |
def GET_AUTH(self, courseid, taskid): # pylint: disable=arguments-differ
""" Edit a task """
if not id_checker(taskid):
raise Exception("Invalid task id")
self.get_course_and_check_rights(courseid, allow_all_staff=False)
request = web.input()
if request.get("action") == "download" and request.get('path') is not None:
return self.action_download(courseid, taskid, request.get('path'))
elif request.get("action") == "delete" and request.get('path') is not None:
return self.action_delete(courseid, taskid, request.get('path'))
elif request.get("action") == "rename" and request.get('path') is not None and request.get('new_path') is not None:
return self.action_rename(courseid, taskid, request.get('path'), request.get('new_path'))
elif request.get("action") == "create" and request.get('path') is not None:
return self.action_create(courseid, taskid, request.get('path'))
elif request.get("action") == "edit" and request.get('path') is not None:
return self.action_edit(courseid, taskid, request.get('path'))
else:
return self.show_tab_file(courseid, taskid) | Edit a task | Below is the the instruction that describes the task:
### Input:
Edit a task
### Response:
def GET_AUTH(self, courseid, taskid): # pylint: disable=arguments-differ
""" Edit a task """
if not id_checker(taskid):
raise Exception("Invalid task id")
self.get_course_and_check_rights(courseid, allow_all_staff=False)
request = web.input()
if request.get("action") == "download" and request.get('path') is not None:
return self.action_download(courseid, taskid, request.get('path'))
elif request.get("action") == "delete" and request.get('path') is not None:
return self.action_delete(courseid, taskid, request.get('path'))
elif request.get("action") == "rename" and request.get('path') is not None and request.get('new_path') is not None:
return self.action_rename(courseid, taskid, request.get('path'), request.get('new_path'))
elif request.get("action") == "create" and request.get('path') is not None:
return self.action_create(courseid, taskid, request.get('path'))
elif request.get("action") == "edit" and request.get('path') is not None:
return self.action_edit(courseid, taskid, request.get('path'))
else:
return self.show_tab_file(courseid, taskid) |
def latitude(self, dms: bool = False) -> Union[str, float]:
"""Generate a random value of latitude.
:param dms: DMS format.
:return: Value of longitude.
"""
return self._get_fs('lt', dms) | Generate a random value of latitude.
:param dms: DMS format.
:return: Value of longitude. | Below is the the instruction that describes the task:
### Input:
Generate a random value of latitude.
:param dms: DMS format.
:return: Value of longitude.
### Response:
def latitude(self, dms: bool = False) -> Union[str, float]:
"""Generate a random value of latitude.
:param dms: DMS format.
:return: Value of longitude.
"""
return self._get_fs('lt', dms) |
def parse_package_string(path):
"""
Parse the effect package string.
Can contain the package python path or path to effect class in an effect package.
Examples::
# Path to effect pacakge
examples.cubes
# Path to effect class
examples.cubes.Cubes
Args:
path: python path to effect package. May also include effect class name.
Returns:
tuple: (package_path, effect_class)
"""
parts = path.split('.')
# Is the last entry in the path capitalized?
if parts[-1][0].isupper():
return ".".join(parts[:-1]), parts[-1]
return path, "" | Parse the effect package string.
Can contain the package python path or path to effect class in an effect package.
Examples::
# Path to effect pacakge
examples.cubes
# Path to effect class
examples.cubes.Cubes
Args:
path: python path to effect package. May also include effect class name.
Returns:
tuple: (package_path, effect_class) | Below is the the instruction that describes the task:
### Input:
Parse the effect package string.
Can contain the package python path or path to effect class in an effect package.
Examples::
# Path to effect pacakge
examples.cubes
# Path to effect class
examples.cubes.Cubes
Args:
path: python path to effect package. May also include effect class name.
Returns:
tuple: (package_path, effect_class)
### Response:
def parse_package_string(path):
"""
Parse the effect package string.
Can contain the package python path or path to effect class in an effect package.
Examples::
# Path to effect pacakge
examples.cubes
# Path to effect class
examples.cubes.Cubes
Args:
path: python path to effect package. May also include effect class name.
Returns:
tuple: (package_path, effect_class)
"""
parts = path.split('.')
# Is the last entry in the path capitalized?
if parts[-1][0].isupper():
return ".".join(parts[:-1]), parts[-1]
return path, "" |
def visit_pass(self, node, parent):
"""visit a Pass node by returning a fresh instance of it"""
return nodes.Pass(node.lineno, node.col_offset, parent) | visit a Pass node by returning a fresh instance of it | Below is the the instruction that describes the task:
### Input:
visit a Pass node by returning a fresh instance of it
### Response:
def visit_pass(self, node, parent):
"""visit a Pass node by returning a fresh instance of it"""
return nodes.Pass(node.lineno, node.col_offset, parent) |
def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)):
"""Remove overlaps in UFOs' glyphs' contours."""
from booleanOperations import union, BooleanOperationsError
for ufo in ufos:
font_name = self._font_name(ufo)
logger.info("Removing overlaps for " + font_name)
for glyph in ufo:
if not glyph_filter(glyph):
continue
contours = list(glyph)
glyph.clearContours()
try:
union(contours, glyph.getPointPen())
except BooleanOperationsError:
logger.error(
"Failed to remove overlaps for %s: %r", font_name, glyph.name
)
raise | Remove overlaps in UFOs' glyphs' contours. | Below is the the instruction that describes the task:
### Input:
Remove overlaps in UFOs' glyphs' contours.
### Response:
def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)):
"""Remove overlaps in UFOs' glyphs' contours."""
from booleanOperations import union, BooleanOperationsError
for ufo in ufos:
font_name = self._font_name(ufo)
logger.info("Removing overlaps for " + font_name)
for glyph in ufo:
if not glyph_filter(glyph):
continue
contours = list(glyph)
glyph.clearContours()
try:
union(contours, glyph.getPointPen())
except BooleanOperationsError:
logger.error(
"Failed to remove overlaps for %s: %r", font_name, glyph.name
)
raise |
def run(arguments: List[str], execution_directory: str=None, execution_environment: Dict=None) -> str:
"""
Runs the given arguments from the given directory (if given, else resorts to the (undefined) current directory).
:param arguments: the CLI arguments to run
:param execution_directory: the directory to execute the arguments in
:param execution_environment: the environment to execute in
:return: what is written to stdout following execution
:exception RunException: called if the execution has a non-zero return code
"""
process = subprocess.Popen(
arguments, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, cwd=execution_directory,
env=execution_environment)
out, error = process.communicate()
stdout = out.decode(_DATA_ENCODING).rstrip()
if process.returncode == _SUCCESS_RETURN_CODE:
return stdout
else:
raise RunException(stdout, error.decode(_DATA_ENCODING).rstrip(), arguments, execution_directory) | Runs the given arguments from the given directory (if given, else resorts to the (undefined) current directory).
:param arguments: the CLI arguments to run
:param execution_directory: the directory to execute the arguments in
:param execution_environment: the environment to execute in
:return: what is written to stdout following execution
:exception RunException: called if the execution has a non-zero return code | Below is the the instruction that describes the task:
### Input:
Runs the given arguments from the given directory (if given, else resorts to the (undefined) current directory).
:param arguments: the CLI arguments to run
:param execution_directory: the directory to execute the arguments in
:param execution_environment: the environment to execute in
:return: what is written to stdout following execution
:exception RunException: called if the execution has a non-zero return code
### Response:
def run(arguments: List[str], execution_directory: str=None, execution_environment: Dict=None) -> str:
"""
Runs the given arguments from the given directory (if given, else resorts to the (undefined) current directory).
:param arguments: the CLI arguments to run
:param execution_directory: the directory to execute the arguments in
:param execution_environment: the environment to execute in
:return: what is written to stdout following execution
:exception RunException: called if the execution has a non-zero return code
"""
process = subprocess.Popen(
arguments, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, cwd=execution_directory,
env=execution_environment)
out, error = process.communicate()
stdout = out.decode(_DATA_ENCODING).rstrip()
if process.returncode == _SUCCESS_RETURN_CODE:
return stdout
else:
raise RunException(stdout, error.decode(_DATA_ENCODING).rstrip(), arguments, execution_directory) |
def get_pmap_from_nrml(oqparam, fname):
"""
:param oqparam:
an :class:`openquake.commonlib.oqvalidation.OqParam` instance
:param fname:
an XML file containing hazard curves
:returns:
site mesh, curve array
"""
hcurves_by_imt = {}
oqparam.hazard_imtls = imtls = {}
for hcurves in nrml.read(fname):
imt = hcurves['IMT']
oqparam.investigation_time = hcurves['investigationTime']
if imt == 'SA':
imt += '(%s)' % hcurves['saPeriod']
imtls[imt] = ~hcurves.IMLs
data = sorted((~node.Point.pos, ~node.poEs) for node in hcurves[1:])
hcurves_by_imt[imt] = numpy.array([d[1] for d in data])
lons, lats = [], []
for xy, poes in data:
lons.append(xy[0])
lats.append(xy[1])
mesh = geo.Mesh(numpy.array(lons), numpy.array(lats))
num_levels = sum(len(v) for v in imtls.values())
array = numpy.zeros((len(mesh), num_levels))
imtls = DictArray(imtls)
for imt_ in hcurves_by_imt:
array[:, imtls(imt_)] = hcurves_by_imt[imt_]
return mesh, ProbabilityMap.from_array(array, range(len(mesh))) | :param oqparam:
an :class:`openquake.commonlib.oqvalidation.OqParam` instance
:param fname:
an XML file containing hazard curves
:returns:
site mesh, curve array | Below is the the instruction that describes the task:
### Input:
:param oqparam:
an :class:`openquake.commonlib.oqvalidation.OqParam` instance
:param fname:
an XML file containing hazard curves
:returns:
site mesh, curve array
### Response:
def get_pmap_from_nrml(oqparam, fname):
"""
:param oqparam:
an :class:`openquake.commonlib.oqvalidation.OqParam` instance
:param fname:
an XML file containing hazard curves
:returns:
site mesh, curve array
"""
hcurves_by_imt = {}
oqparam.hazard_imtls = imtls = {}
for hcurves in nrml.read(fname):
imt = hcurves['IMT']
oqparam.investigation_time = hcurves['investigationTime']
if imt == 'SA':
imt += '(%s)' % hcurves['saPeriod']
imtls[imt] = ~hcurves.IMLs
data = sorted((~node.Point.pos, ~node.poEs) for node in hcurves[1:])
hcurves_by_imt[imt] = numpy.array([d[1] for d in data])
lons, lats = [], []
for xy, poes in data:
lons.append(xy[0])
lats.append(xy[1])
mesh = geo.Mesh(numpy.array(lons), numpy.array(lats))
num_levels = sum(len(v) for v in imtls.values())
array = numpy.zeros((len(mesh), num_levels))
imtls = DictArray(imtls)
for imt_ in hcurves_by_imt:
array[:, imtls(imt_)] = hcurves_by_imt[imt_]
return mesh, ProbabilityMap.from_array(array, range(len(mesh))) |
def tell(self):
"""Returns the current position of read head.
"""
pos = ctypes.c_size_t()
check_call(_LIB.MXRecordIOReaderTell(self.handle, ctypes.byref(pos)))
return pos.value | Returns the current position of read head. | Below is the the instruction that describes the task:
### Input:
Returns the current position of read head.
### Response:
def tell(self):
"""Returns the current position of read head.
"""
pos = ctypes.c_size_t()
check_call(_LIB.MXRecordIOReaderTell(self.handle, ctypes.byref(pos)))
return pos.value |
def _add_post_data(self, request: Request):
'''Add data to the payload.'''
if self._item_session.url_record.post_data:
data = wpull.string.to_bytes(self._item_session.url_record.post_data)
else:
data = wpull.string.to_bytes(
self._processor.fetch_params.post_data
)
request.method = 'POST'
request.fields['Content-Type'] = 'application/x-www-form-urlencoded'
request.fields['Content-Length'] = str(len(data))
_logger.debug('Posting with data {0}.', data)
if not request.body:
request.body = Body(io.BytesIO())
with wpull.util.reset_file_offset(request.body):
request.body.write(data) | Add data to the payload. | Below is the the instruction that describes the task:
### Input:
Add data to the payload.
### Response:
def _add_post_data(self, request: Request):
'''Add data to the payload.'''
if self._item_session.url_record.post_data:
data = wpull.string.to_bytes(self._item_session.url_record.post_data)
else:
data = wpull.string.to_bytes(
self._processor.fetch_params.post_data
)
request.method = 'POST'
request.fields['Content-Type'] = 'application/x-www-form-urlencoded'
request.fields['Content-Length'] = str(len(data))
_logger.debug('Posting with data {0}.', data)
if not request.body:
request.body = Body(io.BytesIO())
with wpull.util.reset_file_offset(request.body):
request.body.write(data) |
def scan_processes_fast(self):
"""
Populates the snapshot with running processes.
Only the PID is retrieved for each process.
Dead processes are removed.
Threads and modules of living processes are ignored.
Tipically you don't need to call this method directly, if unsure use
L{scan} instead.
@note: This method uses the PSAPI. It may be faster for scanning,
but some information may be missing, outdated or slower to obtain.
This could be a good tradeoff under some circumstances.
"""
# Get the new and old list of pids
new_pids = set( win32.EnumProcesses() )
old_pids = set( compat.iterkeys(self.__processDict) )
# Ignore our own pid
our_pid = win32.GetCurrentProcessId()
if our_pid in new_pids:
new_pids.remove(our_pid)
if our_pid in old_pids:
old_pids.remove(our_pid)
# Add newly found pids
for pid in new_pids.difference(old_pids):
self._add_process( Process(pid) )
# Remove missing pids
for pid in old_pids.difference(new_pids):
self._del_process(pid) | Populates the snapshot with running processes.
Only the PID is retrieved for each process.
Dead processes are removed.
Threads and modules of living processes are ignored.
Tipically you don't need to call this method directly, if unsure use
L{scan} instead.
@note: This method uses the PSAPI. It may be faster for scanning,
but some information may be missing, outdated or slower to obtain.
This could be a good tradeoff under some circumstances. | Below is the the instruction that describes the task:
### Input:
Populates the snapshot with running processes.
Only the PID is retrieved for each process.
Dead processes are removed.
Threads and modules of living processes are ignored.
Tipically you don't need to call this method directly, if unsure use
L{scan} instead.
@note: This method uses the PSAPI. It may be faster for scanning,
but some information may be missing, outdated or slower to obtain.
This could be a good tradeoff under some circumstances.
### Response:
def scan_processes_fast(self):
"""
Populates the snapshot with running processes.
Only the PID is retrieved for each process.
Dead processes are removed.
Threads and modules of living processes are ignored.
Tipically you don't need to call this method directly, if unsure use
L{scan} instead.
@note: This method uses the PSAPI. It may be faster for scanning,
but some information may be missing, outdated or slower to obtain.
This could be a good tradeoff under some circumstances.
"""
# Get the new and old list of pids
new_pids = set( win32.EnumProcesses() )
old_pids = set( compat.iterkeys(self.__processDict) )
# Ignore our own pid
our_pid = win32.GetCurrentProcessId()
if our_pid in new_pids:
new_pids.remove(our_pid)
if our_pid in old_pids:
old_pids.remove(our_pid)
# Add newly found pids
for pid in new_pids.difference(old_pids):
self._add_process( Process(pid) )
# Remove missing pids
for pid in old_pids.difference(new_pids):
self._del_process(pid) |
def get_uvec(vec):
""" Gets a unit vector parallel to input vector"""
l = np.linalg.norm(vec)
if l < 1e-8:
return vec
return vec / l | Gets a unit vector parallel to input vector | Below is the the instruction that describes the task:
### Input:
Gets a unit vector parallel to input vector
### Response:
def get_uvec(vec):
""" Gets a unit vector parallel to input vector"""
l = np.linalg.norm(vec)
if l < 1e-8:
return vec
return vec / l |
def add_ne(self, ne):
"""
Parameters
----------
ne : etree.Element
etree representation of a <ne> element
(marks a text span -- (one or more <node> or <word> elements) as a named entity)
Example
-------
<ne xml:id="ne_23" type="PER">
<word xml:id="s3_2" form="Ute" pos="NE" morph="nsf" lemma="Ute" func="-" parent="s3_501" dephead="s3_1" deprel="APP"/>
<word xml:id="s3_3" form="Wedemeier" pos="NE" morph="nsf" lemma="Wedemeier" func="-" parent="s3_501" dephead="s3_2" deprel="APP"/>
</ne>
"""
ne_id = self.get_element_id(ne)
ne_label = 'ne:'+ne.attrib['type']
self.add_node(ne_id, layers={self.ns, self.ns+':ne'},
attr_dict=self.element_attribs_to_dict(ne),
label=ne_label)
# possible children: [('word', 78703), ('node', 11152), ('ne', 49)]
for child in ne.iterchildren():
child_id = self.get_element_id(child)
self.add_edge(ne_id, child_id, layers={self.ns, self.ns+':ne'},
edge_type=dg.EdgeTypes.spanning_relation,
label=ne_label) | Parameters
----------
ne : etree.Element
etree representation of a <ne> element
(marks a text span -- (one or more <node> or <word> elements) as a named entity)
Example
-------
<ne xml:id="ne_23" type="PER">
<word xml:id="s3_2" form="Ute" pos="NE" morph="nsf" lemma="Ute" func="-" parent="s3_501" dephead="s3_1" deprel="APP"/>
<word xml:id="s3_3" form="Wedemeier" pos="NE" morph="nsf" lemma="Wedemeier" func="-" parent="s3_501" dephead="s3_2" deprel="APP"/>
</ne> | Below is the the instruction that describes the task:
### Input:
Parameters
----------
ne : etree.Element
etree representation of a <ne> element
(marks a text span -- (one or more <node> or <word> elements) as a named entity)
Example
-------
<ne xml:id="ne_23" type="PER">
<word xml:id="s3_2" form="Ute" pos="NE" morph="nsf" lemma="Ute" func="-" parent="s3_501" dephead="s3_1" deprel="APP"/>
<word xml:id="s3_3" form="Wedemeier" pos="NE" morph="nsf" lemma="Wedemeier" func="-" parent="s3_501" dephead="s3_2" deprel="APP"/>
</ne>
### Response:
def add_ne(self, ne):
"""
Parameters
----------
ne : etree.Element
etree representation of a <ne> element
(marks a text span -- (one or more <node> or <word> elements) as a named entity)
Example
-------
<ne xml:id="ne_23" type="PER">
<word xml:id="s3_2" form="Ute" pos="NE" morph="nsf" lemma="Ute" func="-" parent="s3_501" dephead="s3_1" deprel="APP"/>
<word xml:id="s3_3" form="Wedemeier" pos="NE" morph="nsf" lemma="Wedemeier" func="-" parent="s3_501" dephead="s3_2" deprel="APP"/>
</ne>
"""
ne_id = self.get_element_id(ne)
ne_label = 'ne:'+ne.attrib['type']
self.add_node(ne_id, layers={self.ns, self.ns+':ne'},
attr_dict=self.element_attribs_to_dict(ne),
label=ne_label)
# possible children: [('word', 78703), ('node', 11152), ('ne', 49)]
for child in ne.iterchildren():
child_id = self.get_element_id(child)
self.add_edge(ne_id, child_id, layers={self.ns, self.ns+':ne'},
edge_type=dg.EdgeTypes.spanning_relation,
label=ne_label) |
def delete(self, id, **kwargs):
"""
Deletes an existing License
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: License id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_with_http_info(id, **kwargs)
else:
(data) = self.delete_with_http_info(id, **kwargs)
return data | Deletes an existing License
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: License id (required)
:return: None
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Deletes an existing License
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: License id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
### Response:
def delete(self, id, **kwargs):
"""
Deletes an existing License
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: License id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_with_http_info(id, **kwargs)
else:
(data) = self.delete_with_http_info(id, **kwargs)
return data |
def _in_version(self, *versions):
"Returns true if this frame is in any of the specified versions of ID3."
for version in versions:
if (self._version == version
or (isinstance(self._version, collections.Container)
and version in self._version)):
return True
return False | Returns true if this frame is in any of the specified versions of ID3. | Below is the the instruction that describes the task:
### Input:
Returns true if this frame is in any of the specified versions of ID3.
### Response:
def _in_version(self, *versions):
"Returns true if this frame is in any of the specified versions of ID3."
for version in versions:
if (self._version == version
or (isinstance(self._version, collections.Container)
and version in self._version)):
return True
return False |
def generous_parse_uri(uri):
"""Return a urlparse.ParseResult object with the results of parsing the
given URI. This has the same properties as the result of parse_uri.
When passed a relative path, it determines the absolute path, sets the
scheme to file, the netloc to localhost and returns a parse of the result.
"""
parse_result = urlparse(uri)
if parse_result.scheme == '':
abspath = os.path.abspath(parse_result.path)
if IS_WINDOWS:
abspath = windows_to_unix_path(abspath)
fixed_uri = "file://{}".format(abspath)
parse_result = urlparse(fixed_uri)
return parse_result | Return a urlparse.ParseResult object with the results of parsing the
given URI. This has the same properties as the result of parse_uri.
When passed a relative path, it determines the absolute path, sets the
scheme to file, the netloc to localhost and returns a parse of the result. | Below is the the instruction that describes the task:
### Input:
Return a urlparse.ParseResult object with the results of parsing the
given URI. This has the same properties as the result of parse_uri.
When passed a relative path, it determines the absolute path, sets the
scheme to file, the netloc to localhost and returns a parse of the result.
### Response:
def generous_parse_uri(uri):
"""Return a urlparse.ParseResult object with the results of parsing the
given URI. This has the same properties as the result of parse_uri.
When passed a relative path, it determines the absolute path, sets the
scheme to file, the netloc to localhost and returns a parse of the result.
"""
parse_result = urlparse(uri)
if parse_result.scheme == '':
abspath = os.path.abspath(parse_result.path)
if IS_WINDOWS:
abspath = windows_to_unix_path(abspath)
fixed_uri = "file://{}".format(abspath)
parse_result = urlparse(fixed_uri)
return parse_result |
def reconstruct_interval(experiment_id):
"""
Reverse the construct_experiment_id operation
:param experiment_id: The experiment id
:return: time interval
"""
start, end = map(lambda x: udatetime.utcfromtimestamp(x / 1000.0), map(float, experiment_id.split("-")))
from ..time_interval import TimeInterval
return TimeInterval(start, end) | Reverse the construct_experiment_id operation
:param experiment_id: The experiment id
:return: time interval | Below is the the instruction that describes the task:
### Input:
Reverse the construct_experiment_id operation
:param experiment_id: The experiment id
:return: time interval
### Response:
def reconstruct_interval(experiment_id):
"""
Reverse the construct_experiment_id operation
:param experiment_id: The experiment id
:return: time interval
"""
start, end = map(lambda x: udatetime.utcfromtimestamp(x / 1000.0), map(float, experiment_id.split("-")))
from ..time_interval import TimeInterval
return TimeInterval(start, end) |
def html(theme_name='readthedocs'):
# disable Flask RSTPAGES due to sphinx incompatibility
os.environ['RSTPAGES'] = 'FALSE'
theme(theme_name)
api()
man()
"""build the doc locally and view"""
clean()
local("cd docs; make html")
local("fab security.check")
local("touch docs/build/html/.nojekyll") | build the doc locally and view | Below is the the instruction that describes the task:
### Input:
build the doc locally and view
### Response:
def html(theme_name='readthedocs'):
# disable Flask RSTPAGES due to sphinx incompatibility
os.environ['RSTPAGES'] = 'FALSE'
theme(theme_name)
api()
man()
"""build the doc locally and view"""
clean()
local("cd docs; make html")
local("fab security.check")
local("touch docs/build/html/.nojekyll") |
def bulk_call(self, call_params):
"""REST BulkCalls Helper
"""
path = '/' + self.api_version + '/BulkCall/'
method = 'POST'
return self.request(path, method, call_params) | REST BulkCalls Helper | Below is the the instruction that describes the task:
### Input:
REST BulkCalls Helper
### Response:
def bulk_call(self, call_params):
"""REST BulkCalls Helper
"""
path = '/' + self.api_version + '/BulkCall/'
method = 'POST'
return self.request(path, method, call_params) |
def predict(self, X):
"""Rank samples according to survival times
Lower ranks indicate shorter survival, higher ranks longer survival.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted ranks.
"""
val = numpy.dot(X, self.coef_)
if hasattr(self, "intercept_"):
val += self.intercept_
# Order by increasing survival time if objective is pure ranking
if self.rank_ratio == 1:
val *= -1
else:
# model was fitted on log(time), transform to original scale
val = numpy.exp(val)
return val | Rank samples according to survival times
Lower ranks indicate shorter survival, higher ranks longer survival.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted ranks. | Below is the the instruction that describes the task:
### Input:
Rank samples according to survival times
Lower ranks indicate shorter survival, higher ranks longer survival.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted ranks.
### Response:
def predict(self, X):
"""Rank samples according to survival times
Lower ranks indicate shorter survival, higher ranks longer survival.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted ranks.
"""
val = numpy.dot(X, self.coef_)
if hasattr(self, "intercept_"):
val += self.intercept_
# Order by increasing survival time if objective is pure ranking
if self.rank_ratio == 1:
val *= -1
else:
# model was fitted on log(time), transform to original scale
val = numpy.exp(val)
return val |
def items(self):
"""Get the items fetched by the jobs."""
# Get and remove queued items in an atomic transaction
pipe = self.conn.pipeline()
pipe.lrange(Q_STORAGE_ITEMS, 0, -1)
pipe.ltrim(Q_STORAGE_ITEMS, 1, 0)
items = pipe.execute()[0]
for item in items:
item = pickle.loads(item)
yield item | Get the items fetched by the jobs. | Below is the the instruction that describes the task:
### Input:
Get the items fetched by the jobs.
### Response:
def items(self):
"""Get the items fetched by the jobs."""
# Get and remove queued items in an atomic transaction
pipe = self.conn.pipeline()
pipe.lrange(Q_STORAGE_ITEMS, 0, -1)
pipe.ltrim(Q_STORAGE_ITEMS, 1, 0)
items = pipe.execute()[0]
for item in items:
item = pickle.loads(item)
yield item |
async def dump_blob(elem, elem_type=None):
"""
Dumps blob message.
Supports both blob and raw value.
:param writer:
:param elem:
:param elem_type:
:param params:
:return:
"""
elem_is_blob = isinstance(elem, x.BlobType)
data = getattr(elem, x.BlobType.DATA_ATTR) if elem_is_blob else elem
if data is None or len(data) == 0:
return b''
if isinstance(data, (bytes, bytearray, list)):
return base64.b16encode(bytes(data))
else:
raise ValueError('Unknown blob type') | Dumps blob message.
Supports both blob and raw value.
:param writer:
:param elem:
:param elem_type:
:param params:
:return: | Below is the the instruction that describes the task:
### Input:
Dumps blob message.
Supports both blob and raw value.
:param writer:
:param elem:
:param elem_type:
:param params:
:return:
### Response:
async def dump_blob(elem, elem_type=None):
"""
Dumps blob message.
Supports both blob and raw value.
:param writer:
:param elem:
:param elem_type:
:param params:
:return:
"""
elem_is_blob = isinstance(elem, x.BlobType)
data = getattr(elem, x.BlobType.DATA_ATTR) if elem_is_blob else elem
if data is None or len(data) == 0:
return b''
if isinstance(data, (bytes, bytearray, list)):
return base64.b16encode(bytes(data))
else:
raise ValueError('Unknown blob type') |
def from_shapefile(cls, shapefile, *args, **kwargs):
"""
Loads a shapefile from disk and optionally merges
it with a dataset. See ``from_records`` for full
signature.
Parameters
----------
records: list of cartopy.io.shapereader.Record
Iterator containing Records.
dataset: holoviews.Dataset
Any HoloViews Dataset type.
on: str or list or dict
A mapping between the attribute names in the records and the
dimensions in the dataset.
value: str
The value dimension in the dataset the values will be drawn
from.
index: str or list
One or more dimensions in the dataset the Shapes will be
indexed by.
drop_missing: boolean
Whether to drop shapes which are missing from the provides
dataset.
Returns
-------
shapes: Polygons or Path object
A Polygons or Path object containing the geometries
"""
reader = Reader(shapefile)
return cls.from_records(reader.records(), *args, **kwargs) | Loads a shapefile from disk and optionally merges
it with a dataset. See ``from_records`` for full
signature.
Parameters
----------
records: list of cartopy.io.shapereader.Record
Iterator containing Records.
dataset: holoviews.Dataset
Any HoloViews Dataset type.
on: str or list or dict
A mapping between the attribute names in the records and the
dimensions in the dataset.
value: str
The value dimension in the dataset the values will be drawn
from.
index: str or list
One or more dimensions in the dataset the Shapes will be
indexed by.
drop_missing: boolean
Whether to drop shapes which are missing from the provides
dataset.
Returns
-------
shapes: Polygons or Path object
A Polygons or Path object containing the geometries | Below is the the instruction that describes the task:
### Input:
Loads a shapefile from disk and optionally merges
it with a dataset. See ``from_records`` for full
signature.
Parameters
----------
records: list of cartopy.io.shapereader.Record
Iterator containing Records.
dataset: holoviews.Dataset
Any HoloViews Dataset type.
on: str or list or dict
A mapping between the attribute names in the records and the
dimensions in the dataset.
value: str
The value dimension in the dataset the values will be drawn
from.
index: str or list
One or more dimensions in the dataset the Shapes will be
indexed by.
drop_missing: boolean
Whether to drop shapes which are missing from the provides
dataset.
Returns
-------
shapes: Polygons or Path object
A Polygons or Path object containing the geometries
### Response:
def from_shapefile(cls, shapefile, *args, **kwargs):
"""
Loads a shapefile from disk and optionally merges
it with a dataset. See ``from_records`` for full
signature.
Parameters
----------
records: list of cartopy.io.shapereader.Record
Iterator containing Records.
dataset: holoviews.Dataset
Any HoloViews Dataset type.
on: str or list or dict
A mapping between the attribute names in the records and the
dimensions in the dataset.
value: str
The value dimension in the dataset the values will be drawn
from.
index: str or list
One or more dimensions in the dataset the Shapes will be
indexed by.
drop_missing: boolean
Whether to drop shapes which are missing from the provides
dataset.
Returns
-------
shapes: Polygons or Path object
A Polygons or Path object containing the geometries
"""
reader = Reader(shapefile)
return cls.from_records(reader.records(), *args, **kwargs) |
def similar(self, **kwargs):
"""
Get the similar TV series for a specific TV series id.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any TV method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('similar')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | Get the similar TV series for a specific TV series id.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any TV method.
Returns:
A dict respresentation of the JSON returned from the API. | Below is the the instruction that describes the task:
### Input:
Get the similar TV series for a specific TV series id.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any TV method.
Returns:
A dict respresentation of the JSON returned from the API.
### Response:
def similar(self, **kwargs):
"""
Get the similar TV series for a specific TV series id.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any TV method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('similar')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response |
def _apply_dvs_capability(capability_spec, capability_dict):
'''
Applies the values of the capability_dict dictionary to a DVS capability
object (vim.vim.DVSCapability)
'''
if 'operation_supported' in capability_dict:
capability_spec.dvsOperationSupported = \
capability_dict['operation_supported']
if 'port_operation_supported' in capability_dict:
capability_spec.dvPortOperationSupported = \
capability_dict['port_operation_supported']
if 'portgroup_operation_supported' in capability_dict:
capability_spec.dvPortGroupOperationSupported = \
capability_dict['portgroup_operation_supported'] | Applies the values of the capability_dict dictionary to a DVS capability
object (vim.vim.DVSCapability) | Below is the the instruction that describes the task:
### Input:
Applies the values of the capability_dict dictionary to a DVS capability
object (vim.vim.DVSCapability)
### Response:
def _apply_dvs_capability(capability_spec, capability_dict):
'''
Applies the values of the capability_dict dictionary to a DVS capability
object (vim.vim.DVSCapability)
'''
if 'operation_supported' in capability_dict:
capability_spec.dvsOperationSupported = \
capability_dict['operation_supported']
if 'port_operation_supported' in capability_dict:
capability_spec.dvPortOperationSupported = \
capability_dict['port_operation_supported']
if 'portgroup_operation_supported' in capability_dict:
capability_spec.dvPortGroupOperationSupported = \
capability_dict['portgroup_operation_supported'] |
def parse_manifest(path_to_manifest):
"""
Parses manifest file for Toil Germline Pipeline
:param str path_to_manifest: Path to sample manifest file
:return: List of GermlineSample namedtuples
:rtype: list[GermlineSample]
"""
bam_re = r"^(?P<uuid>\S+)\s(?P<url>\S+[bsc][r]?am)"
fq_re = r"^(?P<uuid>\S+)\s(?P<url>\S+)\s(?P<paired_url>\S+)?\s?(?P<rg_line>@RG\S+)"
samples = []
with open(path_to_manifest, 'r') as f:
for line in f.readlines():
line = line.strip()
if line.startswith('#'):
continue
bam_match = re.match(bam_re, line)
fastq_match = re.match(fq_re, line)
if bam_match:
uuid = bam_match.group('uuid')
url = bam_match.group('url')
paired_url = None
rg_line = None
require('.bam' in url.lower(),
'Expected .bam extension:\n{}:\t{}'.format(uuid, url))
elif fastq_match:
uuid = fastq_match.group('uuid')
url = fastq_match.group('url')
paired_url = fastq_match.group('paired_url')
rg_line = fastq_match.group('rg_line')
require('.fq' in url.lower() or '.fastq' in url.lower(),
'Expected .fq extension:\n{}:\t{}'.format(uuid, url))
else:
raise ValueError('Could not parse entry in manifest: %s\n%s' % (f.name, line))
# Checks that URL has a scheme
require(urlparse(url).scheme, 'Invalid URL passed for {}'.format(url))
samples.append(GermlineSample(uuid, url, paired_url, rg_line))
return samples | Parses manifest file for Toil Germline Pipeline
:param str path_to_manifest: Path to sample manifest file
:return: List of GermlineSample namedtuples
:rtype: list[GermlineSample] | Below is the the instruction that describes the task:
### Input:
Parses manifest file for Toil Germline Pipeline
:param str path_to_manifest: Path to sample manifest file
:return: List of GermlineSample namedtuples
:rtype: list[GermlineSample]
### Response:
def parse_manifest(path_to_manifest):
"""
Parses manifest file for Toil Germline Pipeline
:param str path_to_manifest: Path to sample manifest file
:return: List of GermlineSample namedtuples
:rtype: list[GermlineSample]
"""
bam_re = r"^(?P<uuid>\S+)\s(?P<url>\S+[bsc][r]?am)"
fq_re = r"^(?P<uuid>\S+)\s(?P<url>\S+)\s(?P<paired_url>\S+)?\s?(?P<rg_line>@RG\S+)"
samples = []
with open(path_to_manifest, 'r') as f:
for line in f.readlines():
line = line.strip()
if line.startswith('#'):
continue
bam_match = re.match(bam_re, line)
fastq_match = re.match(fq_re, line)
if bam_match:
uuid = bam_match.group('uuid')
url = bam_match.group('url')
paired_url = None
rg_line = None
require('.bam' in url.lower(),
'Expected .bam extension:\n{}:\t{}'.format(uuid, url))
elif fastq_match:
uuid = fastq_match.group('uuid')
url = fastq_match.group('url')
paired_url = fastq_match.group('paired_url')
rg_line = fastq_match.group('rg_line')
require('.fq' in url.lower() or '.fastq' in url.lower(),
'Expected .fq extension:\n{}:\t{}'.format(uuid, url))
else:
raise ValueError('Could not parse entry in manifest: %s\n%s' % (f.name, line))
# Checks that URL has a scheme
require(urlparse(url).scheme, 'Invalid URL passed for {}'.format(url))
samples.append(GermlineSample(uuid, url, paired_url, rg_line))
return samples |
def rnd_date(start=date(1970, 1, 1), end=None, **kwargs):
"""
Generate a random date between ``start`` to ``end``.
:param start: Left bound
:type start: string or datetime.date, (default date(1970, 1, 1))
:param end: Right bound
:type end: string or datetime.date, (default date.today())
:return: a datetime.date object
**中文文档**
随机生成一个位于 ``start`` 和 ``end`` 之间的日期。
"""
if end is None:
end = date.today()
start = parser.parse_date(start)
end = parser.parse_date(end)
_assert_correct_start_end(start, end)
return _rnd_date(start, end) | Generate a random date between ``start`` to ``end``.
:param start: Left bound
:type start: string or datetime.date, (default date(1970, 1, 1))
:param end: Right bound
:type end: string or datetime.date, (default date.today())
:return: a datetime.date object
**中文文档**
随机生成一个位于 ``start`` 和 ``end`` 之间的日期。 | Below is the the instruction that describes the task:
### Input:
Generate a random date between ``start`` to ``end``.
:param start: Left bound
:type start: string or datetime.date, (default date(1970, 1, 1))
:param end: Right bound
:type end: string or datetime.date, (default date.today())
:return: a datetime.date object
**中文文档**
随机生成一个位于 ``start`` 和 ``end`` 之间的日期。
### Response:
def rnd_date(start=date(1970, 1, 1), end=None, **kwargs):
"""
Generate a random date between ``start`` to ``end``.
:param start: Left bound
:type start: string or datetime.date, (default date(1970, 1, 1))
:param end: Right bound
:type end: string or datetime.date, (default date.today())
:return: a datetime.date object
**中文文档**
随机生成一个位于 ``start`` 和 ``end`` 之间的日期。
"""
if end is None:
end = date.today()
start = parser.parse_date(start)
end = parser.parse_date(end)
_assert_correct_start_end(start, end)
return _rnd_date(start, end) |
def close(self):
"""
Close service client and its plugins.
"""
self._execute_plugin_hooks_sync(hook='close')
if not self.session.closed:
ensure_future(self.session.close(), loop=self.loop) | Close service client and its plugins. | Below is the the instruction that describes the task:
### Input:
Close service client and its plugins.
### Response:
def close(self):
"""
Close service client and its plugins.
"""
self._execute_plugin_hooks_sync(hook='close')
if not self.session.closed:
ensure_future(self.session.close(), loop=self.loop) |
def xcom_pull(
self,
task_ids=None,
dag_id=None,
key=XCOM_RETURN_KEY,
include_prior_dates=False):
"""
Pull XComs that optionally meet certain criteria.
The default value for `key` limits the search to XComs
that were returned by other tasks (as opposed to those that were pushed
manually). To remove this filter, pass key=None (or any desired value).
If a single task_id string is provided, the result is the value of the
most recent matching XCom from that task_id. If multiple task_ids are
provided, a tuple of matching values is returned. None is returned
whenever no matches are found.
:param key: A key for the XCom. If provided, only XComs with matching
keys will be returned. The default key is 'return_value', also
available as a constant XCOM_RETURN_KEY. This key is automatically
given to XComs returned by tasks (as opposed to being pushed
manually). To remove the filter, pass key=None.
:type key: str
:param task_ids: Only XComs from tasks with matching ids will be
pulled. Can pass None to remove the filter.
:type task_ids: str or iterable of strings (representing task_ids)
:param dag_id: If provided, only pulls XComs from this DAG.
If None (default), the DAG of the calling task is used.
:type dag_id: str
:param include_prior_dates: If False, only XComs from the current
execution_date are returned. If True, XComs from previous dates
are returned as well.
:type include_prior_dates: bool
"""
if dag_id is None:
dag_id = self.dag_id
pull_fn = functools.partial(
XCom.get_one,
execution_date=self.execution_date,
key=key,
dag_id=dag_id,
include_prior_dates=include_prior_dates)
if is_container(task_ids):
return tuple(pull_fn(task_id=t) for t in task_ids)
else:
return pull_fn(task_id=task_ids) | Pull XComs that optionally meet certain criteria.
The default value for `key` limits the search to XComs
that were returned by other tasks (as opposed to those that were pushed
manually). To remove this filter, pass key=None (or any desired value).
If a single task_id string is provided, the result is the value of the
most recent matching XCom from that task_id. If multiple task_ids are
provided, a tuple of matching values is returned. None is returned
whenever no matches are found.
:param key: A key for the XCom. If provided, only XComs with matching
keys will be returned. The default key is 'return_value', also
available as a constant XCOM_RETURN_KEY. This key is automatically
given to XComs returned by tasks (as opposed to being pushed
manually). To remove the filter, pass key=None.
:type key: str
:param task_ids: Only XComs from tasks with matching ids will be
pulled. Can pass None to remove the filter.
:type task_ids: str or iterable of strings (representing task_ids)
:param dag_id: If provided, only pulls XComs from this DAG.
If None (default), the DAG of the calling task is used.
:type dag_id: str
:param include_prior_dates: If False, only XComs from the current
execution_date are returned. If True, XComs from previous dates
are returned as well.
:type include_prior_dates: bool | Below is the the instruction that describes the task:
### Input:
Pull XComs that optionally meet certain criteria.
The default value for `key` limits the search to XComs
that were returned by other tasks (as opposed to those that were pushed
manually). To remove this filter, pass key=None (or any desired value).
If a single task_id string is provided, the result is the value of the
most recent matching XCom from that task_id. If multiple task_ids are
provided, a tuple of matching values is returned. None is returned
whenever no matches are found.
:param key: A key for the XCom. If provided, only XComs with matching
keys will be returned. The default key is 'return_value', also
available as a constant XCOM_RETURN_KEY. This key is automatically
given to XComs returned by tasks (as opposed to being pushed
manually). To remove the filter, pass key=None.
:type key: str
:param task_ids: Only XComs from tasks with matching ids will be
pulled. Can pass None to remove the filter.
:type task_ids: str or iterable of strings (representing task_ids)
:param dag_id: If provided, only pulls XComs from this DAG.
If None (default), the DAG of the calling task is used.
:type dag_id: str
:param include_prior_dates: If False, only XComs from the current
execution_date are returned. If True, XComs from previous dates
are returned as well.
:type include_prior_dates: bool
### Response:
def xcom_pull(
self,
task_ids=None,
dag_id=None,
key=XCOM_RETURN_KEY,
include_prior_dates=False):
"""
Pull XComs that optionally meet certain criteria.
The default value for `key` limits the search to XComs
that were returned by other tasks (as opposed to those that were pushed
manually). To remove this filter, pass key=None (or any desired value).
If a single task_id string is provided, the result is the value of the
most recent matching XCom from that task_id. If multiple task_ids are
provided, a tuple of matching values is returned. None is returned
whenever no matches are found.
:param key: A key for the XCom. If provided, only XComs with matching
keys will be returned. The default key is 'return_value', also
available as a constant XCOM_RETURN_KEY. This key is automatically
given to XComs returned by tasks (as opposed to being pushed
manually). To remove the filter, pass key=None.
:type key: str
:param task_ids: Only XComs from tasks with matching ids will be
pulled. Can pass None to remove the filter.
:type task_ids: str or iterable of strings (representing task_ids)
:param dag_id: If provided, only pulls XComs from this DAG.
If None (default), the DAG of the calling task is used.
:type dag_id: str
:param include_prior_dates: If False, only XComs from the current
execution_date are returned. If True, XComs from previous dates
are returned as well.
:type include_prior_dates: bool
"""
if dag_id is None:
dag_id = self.dag_id
pull_fn = functools.partial(
XCom.get_one,
execution_date=self.execution_date,
key=key,
dag_id=dag_id,
include_prior_dates=include_prior_dates)
if is_container(task_ids):
return tuple(pull_fn(task_id=t) for t in task_ids)
else:
return pull_fn(task_id=task_ids) |
def getAnalystName(self):
""" Returns the name of the currently assigned analyst
"""
mtool = getToolByName(self, 'portal_membership')
analyst = self.getAnalyst().strip()
analyst_member = mtool.getMemberById(analyst)
if analyst_member is not None:
return analyst_member.getProperty('fullname')
return analyst | Returns the name of the currently assigned analyst | Below is the the instruction that describes the task:
### Input:
Returns the name of the currently assigned analyst
### Response:
def getAnalystName(self):
""" Returns the name of the currently assigned analyst
"""
mtool = getToolByName(self, 'portal_membership')
analyst = self.getAnalyst().strip()
analyst_member = mtool.getMemberById(analyst)
if analyst_member is not None:
return analyst_member.getProperty('fullname')
return analyst |
def seek(self, pos):
""" Move to new input file position. If position is negative or out of file, raise Exception. """
if (pos > self.file_size) or (pos < 0):
raise Exception("Unable to seek - position out of file!")
self.file.seek(pos) | Move to new input file position. If position is negative or out of file, raise Exception. | Below is the the instruction that describes the task:
### Input:
Move to new input file position. If position is negative or out of file, raise Exception.
### Response:
def seek(self, pos):
""" Move to new input file position. If position is negative or out of file, raise Exception. """
if (pos > self.file_size) or (pos < 0):
raise Exception("Unable to seek - position out of file!")
self.file.seek(pos) |
def to_list(self):
"""Converts the GeneSet object to a flat list of strings.
Note: see also :meth:`from_list`.
Parameters
----------
Returns
-------
list of str
The data from the GeneSet object as a flat list.
"""
src = self._source or ''
coll = self._collection or ''
desc = self._description or ''
l = [self._id, src, coll, self._name,
','.join(sorted(self._genes)), desc]
return l | Converts the GeneSet object to a flat list of strings.
Note: see also :meth:`from_list`.
Parameters
----------
Returns
-------
list of str
The data from the GeneSet object as a flat list. | Below is the the instruction that describes the task:
### Input:
Converts the GeneSet object to a flat list of strings.
Note: see also :meth:`from_list`.
Parameters
----------
Returns
-------
list of str
The data from the GeneSet object as a flat list.
### Response:
def to_list(self):
"""Converts the GeneSet object to a flat list of strings.
Note: see also :meth:`from_list`.
Parameters
----------
Returns
-------
list of str
The data from the GeneSet object as a flat list.
"""
src = self._source or ''
coll = self._collection or ''
desc = self._description or ''
l = [self._id, src, coll, self._name,
','.join(sorted(self._genes)), desc]
return l |
def spectral_registration(data, target, initial_guess=(0.0, 0.0), frequency_range=None):
"""
Performs the spectral registration method to calculate the frequency and
phase shifts between the input data and the reference spectrum target. The
frequency range over which the two spectra are compared can be specified to
exclude regions where the spectra differ.
:param data:
:param target:
:param initial_guess:
:param frequency_range:
:return:
"""
# make sure that there are no extra dimensions in the data
data = data.squeeze()
target = target.squeeze()
# the supplied frequency range can be none, in which case we use the whole
# spectrum, or it can be a tuple defining two frequencies in Hz, in which
# case we use the spectral points between those two frequencies, or it can
# be a numpy.array of the same size as the data in which case we simply use
# that array as the weightings for the comparison
if type(frequency_range) is tuple:
spectral_weights = frequency_range[0] < data.frequency_axis() & data.frequency_axis() < frequency_range[1]
else:
spectral_weights = frequency_range
# define a residual function for the optimizer to use
def residual(input_vector):
transformed_data = transform_fid(data, input_vector[0], input_vector[1])
residual_data = transformed_data - target
if frequency_range is not None:
spectrum = residual_data.spectrum()
weighted_spectrum = residual_data * spectral_weights
# remove zero-elements
weighted_spectrum = weighted_spectrum[weighted_spectrum != 0]
residual_data = numpy.fft.ifft(numpy.fft.ifftshift(weighted_spectrum))
return_vector = numpy.zeros(len(residual_data) * 2)
return_vector[:len(residual_data)] = residual_data.real
return_vector[len(residual_data):] = residual_data.imag
return return_vector
out = scipy.optimize.leastsq(residual, initial_guess)
return -out[0][0], -out[0][1] | Performs the spectral registration method to calculate the frequency and
phase shifts between the input data and the reference spectrum target. The
frequency range over which the two spectra are compared can be specified to
exclude regions where the spectra differ.
:param data:
:param target:
:param initial_guess:
:param frequency_range:
:return: | Below is the the instruction that describes the task:
### Input:
Performs the spectral registration method to calculate the frequency and
phase shifts between the input data and the reference spectrum target. The
frequency range over which the two spectra are compared can be specified to
exclude regions where the spectra differ.
:param data:
:param target:
:param initial_guess:
:param frequency_range:
:return:
### Response:
def spectral_registration(data, target, initial_guess=(0.0, 0.0), frequency_range=None):
"""
Performs the spectral registration method to calculate the frequency and
phase shifts between the input data and the reference spectrum target. The
frequency range over which the two spectra are compared can be specified to
exclude regions where the spectra differ.
:param data:
:param target:
:param initial_guess:
:param frequency_range:
:return:
"""
# make sure that there are no extra dimensions in the data
data = data.squeeze()
target = target.squeeze()
# the supplied frequency range can be none, in which case we use the whole
# spectrum, or it can be a tuple defining two frequencies in Hz, in which
# case we use the spectral points between those two frequencies, or it can
# be a numpy.array of the same size as the data in which case we simply use
# that array as the weightings for the comparison
if type(frequency_range) is tuple:
spectral_weights = frequency_range[0] < data.frequency_axis() & data.frequency_axis() < frequency_range[1]
else:
spectral_weights = frequency_range
# define a residual function for the optimizer to use
def residual(input_vector):
transformed_data = transform_fid(data, input_vector[0], input_vector[1])
residual_data = transformed_data - target
if frequency_range is not None:
spectrum = residual_data.spectrum()
weighted_spectrum = residual_data * spectral_weights
# remove zero-elements
weighted_spectrum = weighted_spectrum[weighted_spectrum != 0]
residual_data = numpy.fft.ifft(numpy.fft.ifftshift(weighted_spectrum))
return_vector = numpy.zeros(len(residual_data) * 2)
return_vector[:len(residual_data)] = residual_data.real
return_vector[len(residual_data):] = residual_data.imag
return return_vector
out = scipy.optimize.leastsq(residual, initial_guess)
return -out[0][0], -out[0][1] |
def callback(self, request, **kwargs):
"""
Called from the Service when the user accept to activate it
:param request: request object
:return: callback url
:rtype: string , path to the template
"""
access_token = request.session['oauth_token'] + "#TH#"
access_token += str(request.session['oauth_id'])
kwargs = {'access_token': access_token}
return super(ServiceGithub, self).callback(request, **kwargs) | Called from the Service when the user accept to activate it
:param request: request object
:return: callback url
:rtype: string , path to the template | Below is the the instruction that describes the task:
### Input:
Called from the Service when the user accept to activate it
:param request: request object
:return: callback url
:rtype: string , path to the template
### Response:
def callback(self, request, **kwargs):
"""
Called from the Service when the user accept to activate it
:param request: request object
:return: callback url
:rtype: string , path to the template
"""
access_token = request.session['oauth_token'] + "#TH#"
access_token += str(request.session['oauth_id'])
kwargs = {'access_token': access_token}
return super(ServiceGithub, self).callback(request, **kwargs) |
def custom_object_prefix_lax(instance):
"""Ensure custom objects follow lenient naming style conventions
for forward-compatibility.
"""
if (instance['type'] not in enums.TYPES and
instance['type'] not in enums.RESERVED_OBJECTS and
not CUSTOM_TYPE_LAX_PREFIX_RE.match(instance['type'])):
yield JSONError("Custom object type '%s' should start with 'x-' in "
"order to be compatible with future versions of the "
"STIX 2 specification." % instance['type'],
instance['id'], 'custom-prefix-lax') | Ensure custom objects follow lenient naming style conventions
for forward-compatibility. | Below is the the instruction that describes the task:
### Input:
Ensure custom objects follow lenient naming style conventions
for forward-compatibility.
### Response:
def custom_object_prefix_lax(instance):
"""Ensure custom objects follow lenient naming style conventions
for forward-compatibility.
"""
if (instance['type'] not in enums.TYPES and
instance['type'] not in enums.RESERVED_OBJECTS and
not CUSTOM_TYPE_LAX_PREFIX_RE.match(instance['type'])):
yield JSONError("Custom object type '%s' should start with 'x-' in "
"order to be compatible with future versions of the "
"STIX 2 specification." % instance['type'],
instance['id'], 'custom-prefix-lax') |
def enable_key(self):
"""Enable an existing API Key."""
print("This command will enable a disabled key.")
apiKeyID = input("API Key ID: ")
try:
key = self._curl_bitmex("/apiKey/enable",
postdict={"apiKeyID": apiKeyID})
print("Key with ID %s enabled." % key["id"])
except:
print("Unable to enable key, please try again.")
self.enable_key() | Enable an existing API Key. | Below is the the instruction that describes the task:
### Input:
Enable an existing API Key.
### Response:
def enable_key(self):
"""Enable an existing API Key."""
print("This command will enable a disabled key.")
apiKeyID = input("API Key ID: ")
try:
key = self._curl_bitmex("/apiKey/enable",
postdict={"apiKeyID": apiKeyID})
print("Key with ID %s enabled." % key["id"])
except:
print("Unable to enable key, please try again.")
self.enable_key() |
def split_path(path) :
"convenience routine for splitting a path into a list of components."
if isinstance(path, (tuple, list)) :
result = path # assume already split
elif path == "/" :
result = []
else :
if not path.startswith("/") or path.endswith("/") :
raise DBusError(DBUS.ERROR_INVALID_ARGS, "invalid path %s" % repr(path))
#end if
result = path.split("/")[1:]
#end if
return \
result | convenience routine for splitting a path into a list of components. | Below is the the instruction that describes the task:
### Input:
convenience routine for splitting a path into a list of components.
### Response:
def split_path(path) :
"convenience routine for splitting a path into a list of components."
if isinstance(path, (tuple, list)) :
result = path # assume already split
elif path == "/" :
result = []
else :
if not path.startswith("/") or path.endswith("/") :
raise DBusError(DBUS.ERROR_INVALID_ARGS, "invalid path %s" % repr(path))
#end if
result = path.split("/")[1:]
#end if
return \
result |
def replyToComment(self, repo_user, repo_name, pull_number,
body, in_reply_to):
"""
POST /repos/:owner/:repo/pulls/:number/comments
Like create, but reply to an existing comment.
:param body: The text of the comment.
:param in_reply_to: The comment ID to reply to.
"""
return self.api.makeRequest(
["repos", repo_user, repo_name,
"pulls", str(pull_number), "comments"],
method="POST",
data=dict(body=body,
in_reply_to=in_reply_to)) | POST /repos/:owner/:repo/pulls/:number/comments
Like create, but reply to an existing comment.
:param body: The text of the comment.
:param in_reply_to: The comment ID to reply to. | Below is the the instruction that describes the task:
### Input:
POST /repos/:owner/:repo/pulls/:number/comments
Like create, but reply to an existing comment.
:param body: The text of the comment.
:param in_reply_to: The comment ID to reply to.
### Response:
def replyToComment(self, repo_user, repo_name, pull_number,
body, in_reply_to):
"""
POST /repos/:owner/:repo/pulls/:number/comments
Like create, but reply to an existing comment.
:param body: The text of the comment.
:param in_reply_to: The comment ID to reply to.
"""
return self.api.makeRequest(
["repos", repo_user, repo_name,
"pulls", str(pull_number), "comments"],
method="POST",
data=dict(body=body,
in_reply_to=in_reply_to)) |
def _produce_return(self, cursor):
""" Calls callback once with generator.
:rtype: None
"""
self.callback(self._row_generator(cursor), *self.cb_args)
return None | Calls callback once with generator.
:rtype: None | Below is the the instruction that describes the task:
### Input:
Calls callback once with generator.
:rtype: None
### Response:
def _produce_return(self, cursor):
""" Calls callback once with generator.
:rtype: None
"""
self.callback(self._row_generator(cursor), *self.cb_args)
return None |
def init(banner, hidden, backup):
"""Initialize a manage shell in current directory
$ manage init --banner="My awesome app shell"
initializing manage...
creating manage.yml
"""
manage_file = HIDDEN_MANAGE_FILE if hidden else MANAGE_FILE
if os.path.exists(manage_file):
if not click.confirm('Rewrite {0}?'.format(manage_file)):
return
if backup:
bck = '.bck_{0}'.format(manage_file)
with open(manage_file, 'r') as source, open(bck, 'w') as bck_file:
bck_file.write(source.read())
with open(manage_file, 'w') as output:
data = default_manage_dict
if banner:
data['shell']['banner']['message'] = banner
output.write(yaml.dump(data, default_flow_style=False)) | Initialize a manage shell in current directory
$ manage init --banner="My awesome app shell"
initializing manage...
creating manage.yml | Below is the the instruction that describes the task:
### Input:
Initialize a manage shell in current directory
$ manage init --banner="My awesome app shell"
initializing manage...
creating manage.yml
### Response:
def init(banner, hidden, backup):
"""Initialize a manage shell in current directory
$ manage init --banner="My awesome app shell"
initializing manage...
creating manage.yml
"""
manage_file = HIDDEN_MANAGE_FILE if hidden else MANAGE_FILE
if os.path.exists(manage_file):
if not click.confirm('Rewrite {0}?'.format(manage_file)):
return
if backup:
bck = '.bck_{0}'.format(manage_file)
with open(manage_file, 'r') as source, open(bck, 'w') as bck_file:
bck_file.write(source.read())
with open(manage_file, 'w') as output:
data = default_manage_dict
if banner:
data['shell']['banner']['message'] = banner
output.write(yaml.dump(data, default_flow_style=False)) |
def rc_params(usetex=None):
"""Returns a new `matplotlib.RcParams` with updated GWpy parameters
The updated parameters are globally stored as
`gwpy.plot.rc.GWPY_RCPARAMS`, with the updated TeX parameters as
`gwpy.plot.rc.GWPY_TEX_RCPARAMS`.
.. note::
This function doesn't apply the new `RcParams` in any way, just
creates something that can be used to set `matplotlib.rcParams`.
Parameters
----------
usetex : `bool`, `None`
value to set for `text.usetex`; if `None` determine automatically
using the ``GWPY_USETEX`` environment variable, and whether `tex`
is available on the system. If `True` is given (or determined)
a number of other parameters are updated to improve TeX formatting.
Examples
--------
>>> import matplotlib
>>> from gwpy.plot.rc import rc_params as gwpy_rc_params()
>>> matplotlib.rcParams.update(gwpy_rc_params(usetex=False))
"""
# if user didn't specify to use tex or not, guess based on
# the `GWPY_USETEX` environment variable, or whether tex is
# installed at all.
if usetex is None:
usetex = bool_env(
'GWPY_USETEX',
default=rcParams['text.usetex'] or tex.has_tex())
# build RcParams from matplotlib.rcParams with GWpy extras
rcp = GWPY_RCPARAMS.copy()
if usetex:
rcp.update(GWPY_TEX_RCPARAMS)
return rcp | Returns a new `matplotlib.RcParams` with updated GWpy parameters
The updated parameters are globally stored as
`gwpy.plot.rc.GWPY_RCPARAMS`, with the updated TeX parameters as
`gwpy.plot.rc.GWPY_TEX_RCPARAMS`.
.. note::
This function doesn't apply the new `RcParams` in any way, just
creates something that can be used to set `matplotlib.rcParams`.
Parameters
----------
usetex : `bool`, `None`
value to set for `text.usetex`; if `None` determine automatically
using the ``GWPY_USETEX`` environment variable, and whether `tex`
is available on the system. If `True` is given (or determined)
a number of other parameters are updated to improve TeX formatting.
Examples
--------
>>> import matplotlib
>>> from gwpy.plot.rc import rc_params as gwpy_rc_params()
>>> matplotlib.rcParams.update(gwpy_rc_params(usetex=False)) | Below is the the instruction that describes the task:
### Input:
Returns a new `matplotlib.RcParams` with updated GWpy parameters
The updated parameters are globally stored as
`gwpy.plot.rc.GWPY_RCPARAMS`, with the updated TeX parameters as
`gwpy.plot.rc.GWPY_TEX_RCPARAMS`.
.. note::
This function doesn't apply the new `RcParams` in any way, just
creates something that can be used to set `matplotlib.rcParams`.
Parameters
----------
usetex : `bool`, `None`
value to set for `text.usetex`; if `None` determine automatically
using the ``GWPY_USETEX`` environment variable, and whether `tex`
is available on the system. If `True` is given (or determined)
a number of other parameters are updated to improve TeX formatting.
Examples
--------
>>> import matplotlib
>>> from gwpy.plot.rc import rc_params as gwpy_rc_params()
>>> matplotlib.rcParams.update(gwpy_rc_params(usetex=False))
### Response:
def rc_params(usetex=None):
"""Returns a new `matplotlib.RcParams` with updated GWpy parameters
The updated parameters are globally stored as
`gwpy.plot.rc.GWPY_RCPARAMS`, with the updated TeX parameters as
`gwpy.plot.rc.GWPY_TEX_RCPARAMS`.
.. note::
This function doesn't apply the new `RcParams` in any way, just
creates something that can be used to set `matplotlib.rcParams`.
Parameters
----------
usetex : `bool`, `None`
value to set for `text.usetex`; if `None` determine automatically
using the ``GWPY_USETEX`` environment variable, and whether `tex`
is available on the system. If `True` is given (or determined)
a number of other parameters are updated to improve TeX formatting.
Examples
--------
>>> import matplotlib
>>> from gwpy.plot.rc import rc_params as gwpy_rc_params()
>>> matplotlib.rcParams.update(gwpy_rc_params(usetex=False))
"""
# if user didn't specify to use tex or not, guess based on
# the `GWPY_USETEX` environment variable, or whether tex is
# installed at all.
if usetex is None:
usetex = bool_env(
'GWPY_USETEX',
default=rcParams['text.usetex'] or tex.has_tex())
# build RcParams from matplotlib.rcParams with GWpy extras
rcp = GWPY_RCPARAMS.copy()
if usetex:
rcp.update(GWPY_TEX_RCPARAMS)
return rcp |
def calc_allowedremoterelieve_v1(self):
"""Get the allowed remote relieve of the last simulation step.
Required log sequence:
|LoggedAllowedRemoteRelieve|
Calculated flux sequence:
|AllowedRemoteRelieve|
Basic equation:
:math:`AllowedRemoteRelieve = LoggedAllowedRemoteRelieve`
Example:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> logs.loggedallowedremoterelieve = 2.0
>>> model.calc_allowedremoterelieve_v1()
>>> fluxes.allowedremoterelieve
allowedremoterelieve(2.0)
"""
flu = self.sequences.fluxes.fastaccess
log = self.sequences.logs.fastaccess
flu.allowedremoterelieve = log.loggedallowedremoterelieve[0] | Get the allowed remote relieve of the last simulation step.
Required log sequence:
|LoggedAllowedRemoteRelieve|
Calculated flux sequence:
|AllowedRemoteRelieve|
Basic equation:
:math:`AllowedRemoteRelieve = LoggedAllowedRemoteRelieve`
Example:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> logs.loggedallowedremoterelieve = 2.0
>>> model.calc_allowedremoterelieve_v1()
>>> fluxes.allowedremoterelieve
allowedremoterelieve(2.0) | Below is the the instruction that describes the task:
### Input:
Get the allowed remote relieve of the last simulation step.
Required log sequence:
|LoggedAllowedRemoteRelieve|
Calculated flux sequence:
|AllowedRemoteRelieve|
Basic equation:
:math:`AllowedRemoteRelieve = LoggedAllowedRemoteRelieve`
Example:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> logs.loggedallowedremoterelieve = 2.0
>>> model.calc_allowedremoterelieve_v1()
>>> fluxes.allowedremoterelieve
allowedremoterelieve(2.0)
### Response:
def calc_allowedremoterelieve_v1(self):
"""Get the allowed remote relieve of the last simulation step.
Required log sequence:
|LoggedAllowedRemoteRelieve|
Calculated flux sequence:
|AllowedRemoteRelieve|
Basic equation:
:math:`AllowedRemoteRelieve = LoggedAllowedRemoteRelieve`
Example:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> logs.loggedallowedremoterelieve = 2.0
>>> model.calc_allowedremoterelieve_v1()
>>> fluxes.allowedremoterelieve
allowedremoterelieve(2.0)
"""
flu = self.sequences.fluxes.fastaccess
log = self.sequences.logs.fastaccess
flu.allowedremoterelieve = log.loggedallowedremoterelieve[0] |
def validate(self, generator, axesToMove=None, **kwargs):
# type: (AGenerator, AAxesToMove, **Any) -> AConfigureParams
"""Validate configuration parameters and return validated parameters.
Doesn't take device state into account so can be run in any state
"""
iterations = 10
# We will return this, so make sure we fill in defaults
for k, default in self._block.configure.defaults.items():
if k not in kwargs:
kwargs[k] = default
# The validated parameters we will eventually return
params = ConfigureParams(generator, axesToMove, **kwargs)
# Make some tasks just for validate
part_contexts = self.create_part_contexts()
# Get any status from all parts
status_part_info = self.run_hooks(
ReportStatusHook(p, c) for p, c in part_contexts.items())
while iterations > 0:
# Try up to 10 times to get a valid set of parameters
iterations -= 1
# Validate the params with all the parts
validate_part_info = self.run_hooks(
ValidateHook(p, c, status_part_info, **kwargs)
for p, c, kwargs in self._part_params(part_contexts, params))
tweaks = ParameterTweakInfo.filter_values(validate_part_info)
if tweaks:
for tweak in tweaks:
deserialized = self._block.configure.takes.elements[
tweak.parameter].validate(tweak.value)
setattr(params, tweak.parameter, deserialized)
self.log.debug(
"Tweaking %s to %s", tweak.parameter, deserialized)
else:
# Consistent set, just return the params
return params
raise ValueError("Could not get a consistent set of parameters") | Validate configuration parameters and return validated parameters.
Doesn't take device state into account so can be run in any state | Below is the the instruction that describes the task:
### Input:
Validate configuration parameters and return validated parameters.
Doesn't take device state into account so can be run in any state
### Response:
def validate(self, generator, axesToMove=None, **kwargs):
# type: (AGenerator, AAxesToMove, **Any) -> AConfigureParams
"""Validate configuration parameters and return validated parameters.
Doesn't take device state into account so can be run in any state
"""
iterations = 10
# We will return this, so make sure we fill in defaults
for k, default in self._block.configure.defaults.items():
if k not in kwargs:
kwargs[k] = default
# The validated parameters we will eventually return
params = ConfigureParams(generator, axesToMove, **kwargs)
# Make some tasks just for validate
part_contexts = self.create_part_contexts()
# Get any status from all parts
status_part_info = self.run_hooks(
ReportStatusHook(p, c) for p, c in part_contexts.items())
while iterations > 0:
# Try up to 10 times to get a valid set of parameters
iterations -= 1
# Validate the params with all the parts
validate_part_info = self.run_hooks(
ValidateHook(p, c, status_part_info, **kwargs)
for p, c, kwargs in self._part_params(part_contexts, params))
tweaks = ParameterTweakInfo.filter_values(validate_part_info)
if tweaks:
for tweak in tweaks:
deserialized = self._block.configure.takes.elements[
tweak.parameter].validate(tweak.value)
setattr(params, tweak.parameter, deserialized)
self.log.debug(
"Tweaking %s to %s", tweak.parameter, deserialized)
else:
# Consistent set, just return the params
return params
raise ValueError("Could not get a consistent set of parameters") |
def get_current_cmus():
"""
Get the current song from cmus.
"""
result = subprocess.run('cmus-remote -Q'.split(' '), check=True,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
info = {}
for line in result.stdout.decode().split('\n'):
line = line.split(' ')
if line[0] != 'tag':
continue
key = line[1]
if key in ['album', 'title', 'artist', 'albumartist'] and\
key not in info:
info[key] = ' '.join(line[2:])
if 'albumartist' in info:
info['artist'] = info['albumartist']
del info['albumartist']
return Song(**info) | Get the current song from cmus. | Below is the the instruction that describes the task:
### Input:
Get the current song from cmus.
### Response:
def get_current_cmus():
"""
Get the current song from cmus.
"""
result = subprocess.run('cmus-remote -Q'.split(' '), check=True,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
info = {}
for line in result.stdout.decode().split('\n'):
line = line.split(' ')
if line[0] != 'tag':
continue
key = line[1]
if key in ['album', 'title', 'artist', 'albumartist'] and\
key not in info:
info[key] = ' '.join(line[2:])
if 'albumartist' in info:
info['artist'] = info['albumartist']
del info['albumartist']
return Song(**info) |
def validate_unique_items(value, **kwargs):
"""
Validator for ARRAY types to enforce that all array items must be unique.
"""
# we can't just look at the items themselves since 0 and False are treated
# the same as dictionary keys, and objects aren't hashable.
counter = collections.Counter((
json.dumps(v, sort_keys=True) for v in value
))
dupes = [json.loads(v) for v, count in counter.items() if count > 1]
if dupes:
raise ValidationError(
MESSAGES['unique_items']['invalid'].format(
repr(dupes),
),
) | Validator for ARRAY types to enforce that all array items must be unique. | Below is the the instruction that describes the task:
### Input:
Validator for ARRAY types to enforce that all array items must be unique.
### Response:
def validate_unique_items(value, **kwargs):
"""
Validator for ARRAY types to enforce that all array items must be unique.
"""
# we can't just look at the items themselves since 0 and False are treated
# the same as dictionary keys, and objects aren't hashable.
counter = collections.Counter((
json.dumps(v, sort_keys=True) for v in value
))
dupes = [json.loads(v) for v, count in counter.items() if count > 1]
if dupes:
raise ValidationError(
MESSAGES['unique_items']['invalid'].format(
repr(dupes),
),
) |
def _extract_inner_match(self, candidate, offset):
"""Attempts to extract a match from candidate if the whole candidate
does not qualify as a match.
Arguments:
candidate -- The candidate text that might contain a phone number
offset -- The current offset of candidate within text
Returns the match found, None if none can be found
"""
for possible_inner_match in _INNER_MATCHES:
group_match = possible_inner_match.search(candidate)
is_first_match = True
while group_match and self._max_tries > 0:
if is_first_match:
# We should handle any group before this one too.
group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN,
candidate[:group_match.start()])
match = self._parse_and_verify(group, offset)
if match is not None:
return match
self._max_tries -= 1
is_first_match = False
group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN,
group_match.group(1))
match = self._parse_and_verify(group, offset + group_match.start(1))
if match is not None:
return match
self._max_tries -= 1
group_match = possible_inner_match.search(candidate, group_match.start() + 1)
return None | Attempts to extract a match from candidate if the whole candidate
does not qualify as a match.
Arguments:
candidate -- The candidate text that might contain a phone number
offset -- The current offset of candidate within text
Returns the match found, None if none can be found | Below is the the instruction that describes the task:
### Input:
Attempts to extract a match from candidate if the whole candidate
does not qualify as a match.
Arguments:
candidate -- The candidate text that might contain a phone number
offset -- The current offset of candidate within text
Returns the match found, None if none can be found
### Response:
def _extract_inner_match(self, candidate, offset):
"""Attempts to extract a match from candidate if the whole candidate
does not qualify as a match.
Arguments:
candidate -- The candidate text that might contain a phone number
offset -- The current offset of candidate within text
Returns the match found, None if none can be found
"""
for possible_inner_match in _INNER_MATCHES:
group_match = possible_inner_match.search(candidate)
is_first_match = True
while group_match and self._max_tries > 0:
if is_first_match:
# We should handle any group before this one too.
group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN,
candidate[:group_match.start()])
match = self._parse_and_verify(group, offset)
if match is not None:
return match
self._max_tries -= 1
is_first_match = False
group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN,
group_match.group(1))
match = self._parse_and_verify(group, offset + group_match.start(1))
if match is not None:
return match
self._max_tries -= 1
group_match = possible_inner_match.search(candidate, group_match.start() + 1)
return None |
def _send_rpc_response(self, *packets):
"""Send an RPC response.
It is executed in the baBLE working thread: should not be blocking.
The RPC response is notified in one or two packets depending on whether or not
response data is included. If there is a temporary error sending one of the packets
it is retried automatically. If there is a permanent error, it is logged and the response
is abandoned.
"""
if len(packets) == 0:
return
handle, payload = packets[0]
try:
self._send_notification(handle, payload)
except bable_interface.BaBLEException as err:
if err.packet.status == 'Rejected': # If we are streaming too fast, back off and try again
time.sleep(0.05)
self._defer(self._send_rpc_response, list(packets))
else:
self._audit('ErrorSendingRPCResponse')
self._logger.exception("Error while sending RPC response, handle=%s, payload=%s", handle, payload)
return
if len(packets) > 1:
self._defer(self._send_rpc_response, list(packets[1:])) | Send an RPC response.
It is executed in the baBLE working thread: should not be blocking.
The RPC response is notified in one or two packets depending on whether or not
response data is included. If there is a temporary error sending one of the packets
it is retried automatically. If there is a permanent error, it is logged and the response
is abandoned. | Below is the the instruction that describes the task:
### Input:
Send an RPC response.
It is executed in the baBLE working thread: should not be blocking.
The RPC response is notified in one or two packets depending on whether or not
response data is included. If there is a temporary error sending one of the packets
it is retried automatically. If there is a permanent error, it is logged and the response
is abandoned.
### Response:
def _send_rpc_response(self, *packets):
"""Send an RPC response.
It is executed in the baBLE working thread: should not be blocking.
The RPC response is notified in one or two packets depending on whether or not
response data is included. If there is a temporary error sending one of the packets
it is retried automatically. If there is a permanent error, it is logged and the response
is abandoned.
"""
if len(packets) == 0:
return
handle, payload = packets[0]
try:
self._send_notification(handle, payload)
except bable_interface.BaBLEException as err:
if err.packet.status == 'Rejected': # If we are streaming too fast, back off and try again
time.sleep(0.05)
self._defer(self._send_rpc_response, list(packets))
else:
self._audit('ErrorSendingRPCResponse')
self._logger.exception("Error while sending RPC response, handle=%s, payload=%s", handle, payload)
return
if len(packets) > 1:
self._defer(self._send_rpc_response, list(packets[1:])) |
def stat(path, user=None):
"""
Performs the equivalent of :func:`os.stat` on ``path``, returning a
:class:`StatResult` object.
"""
host, port, path_ = split(path, user)
fs = hdfs_fs.hdfs(host, port, user)
retval = StatResult(fs.get_path_info(path_))
if not host:
_update_stat(retval, path_)
fs.close()
return retval | Performs the equivalent of :func:`os.stat` on ``path``, returning a
:class:`StatResult` object. | Below is the the instruction that describes the task:
### Input:
Performs the equivalent of :func:`os.stat` on ``path``, returning a
:class:`StatResult` object.
### Response:
def stat(path, user=None):
"""
Performs the equivalent of :func:`os.stat` on ``path``, returning a
:class:`StatResult` object.
"""
host, port, path_ = split(path, user)
fs = hdfs_fs.hdfs(host, port, user)
retval = StatResult(fs.get_path_info(path_))
if not host:
_update_stat(retval, path_)
fs.close()
return retval |
def _merge_keys(kwargs):
'''
The log_config is a mixture of the CLI options --log-driver and --log-opt
(which we support in Salt as log_driver and log_opt, respectively), but it
must be submitted to the host config in the format {'Type': log_driver,
'Config': log_opt}. So, we need to construct this argument to be passed to
the API from those two arguments.
'''
log_driver = kwargs.pop('log_driver', helpers.NOTSET)
log_opt = kwargs.pop('log_opt', helpers.NOTSET)
if 'log_config' not in kwargs:
if log_driver is not helpers.NOTSET \
or log_opt is not helpers.NOTSET:
kwargs['log_config'] = {
'Type': log_driver
if log_driver is not helpers.NOTSET
else 'none',
'Config': log_opt
if log_opt is not helpers.NOTSET
else {}
} | The log_config is a mixture of the CLI options --log-driver and --log-opt
(which we support in Salt as log_driver and log_opt, respectively), but it
must be submitted to the host config in the format {'Type': log_driver,
'Config': log_opt}. So, we need to construct this argument to be passed to
the API from those two arguments. | Below is the the instruction that describes the task:
### Input:
The log_config is a mixture of the CLI options --log-driver and --log-opt
(which we support in Salt as log_driver and log_opt, respectively), but it
must be submitted to the host config in the format {'Type': log_driver,
'Config': log_opt}. So, we need to construct this argument to be passed to
the API from those two arguments.
### Response:
def _merge_keys(kwargs):
'''
The log_config is a mixture of the CLI options --log-driver and --log-opt
(which we support in Salt as log_driver and log_opt, respectively), but it
must be submitted to the host config in the format {'Type': log_driver,
'Config': log_opt}. So, we need to construct this argument to be passed to
the API from those two arguments.
'''
log_driver = kwargs.pop('log_driver', helpers.NOTSET)
log_opt = kwargs.pop('log_opt', helpers.NOTSET)
if 'log_config' not in kwargs:
if log_driver is not helpers.NOTSET \
or log_opt is not helpers.NOTSET:
kwargs['log_config'] = {
'Type': log_driver
if log_driver is not helpers.NOTSET
else 'none',
'Config': log_opt
if log_opt is not helpers.NOTSET
else {}
} |
def _priority(s):
"""Return priority for a given object."""
if type(s) in (list, tuple, set, frozenset):
return ITERABLE
if type(s) is dict:
return DICT
if issubclass(type(s), type):
return TYPE
if hasattr(s, "validate"):
return VALIDATOR
if callable(s):
return CALLABLE
else:
return COMPARABLE | Return priority for a given object. | Below is the the instruction that describes the task:
### Input:
Return priority for a given object.
### Response:
def _priority(s):
"""Return priority for a given object."""
if type(s) in (list, tuple, set, frozenset):
return ITERABLE
if type(s) is dict:
return DICT
if issubclass(type(s), type):
return TYPE
if hasattr(s, "validate"):
return VALIDATOR
if callable(s):
return CALLABLE
else:
return COMPARABLE |
def permuted_copy(self, partition=None):
""" Return a copy of the collection with all alignment columns permuted
"""
def take(n, iterable):
return [next(iterable) for _ in range(n)]
if partition is None:
partition = Partition([1] * len(self))
index_tuples = partition.get_membership()
alignments = []
for ix in index_tuples:
concat = Concatenation(self, ix)
sites = concat.alignment.get_sites()
random.shuffle(sites)
d = dict(zip(concat.alignment.get_names(), [iter(x) for x in zip(*sites)]))
new_seqs = [[(k, ''.join(take(l, d[k]))) for k in d] for l in concat.lengths]
for seqs, datatype, name in zip(new_seqs, concat.datatypes, concat.names):
alignment = Alignment(seqs, datatype)
alignment.name = name
alignments.append(alignment)
return self.__class__(records=sorted(alignments, key=lambda x: SORT_KEY(x.name))) | Return a copy of the collection with all alignment columns permuted | Below is the the instruction that describes the task:
### Input:
Return a copy of the collection with all alignment columns permuted
### Response:
def permuted_copy(self, partition=None):
""" Return a copy of the collection with all alignment columns permuted
"""
def take(n, iterable):
return [next(iterable) for _ in range(n)]
if partition is None:
partition = Partition([1] * len(self))
index_tuples = partition.get_membership()
alignments = []
for ix in index_tuples:
concat = Concatenation(self, ix)
sites = concat.alignment.get_sites()
random.shuffle(sites)
d = dict(zip(concat.alignment.get_names(), [iter(x) for x in zip(*sites)]))
new_seqs = [[(k, ''.join(take(l, d[k]))) for k in d] for l in concat.lengths]
for seqs, datatype, name in zip(new_seqs, concat.datatypes, concat.names):
alignment = Alignment(seqs, datatype)
alignment.name = name
alignments.append(alignment)
return self.__class__(records=sorted(alignments, key=lambda x: SORT_KEY(x.name))) |
def bounds(self, thr=0):
""" Gets the bounds of this segment
Returns:
(float, float, float, float): Bounds, with min latitude, min longitude,
max latitude and max longitude
"""
min_lat = float("inf")
min_lon = float("inf")
max_lat = -float("inf")
max_lon = -float("inf")
for segment in self.segments:
milat, milon, malat, malon = segment.bounds(thr=thr)
min_lat = min(milat, min_lat)
min_lon = min(milon, min_lon)
max_lat = max(malat, max_lat)
max_lon = max(malon, max_lon)
return min_lat, min_lon, max_lat, max_lon | Gets the bounds of this segment
Returns:
(float, float, float, float): Bounds, with min latitude, min longitude,
max latitude and max longitude | Below is the the instruction that describes the task:
### Input:
Gets the bounds of this segment
Returns:
(float, float, float, float): Bounds, with min latitude, min longitude,
max latitude and max longitude
### Response:
def bounds(self, thr=0):
""" Gets the bounds of this segment
Returns:
(float, float, float, float): Bounds, with min latitude, min longitude,
max latitude and max longitude
"""
min_lat = float("inf")
min_lon = float("inf")
max_lat = -float("inf")
max_lon = -float("inf")
for segment in self.segments:
milat, milon, malat, malon = segment.bounds(thr=thr)
min_lat = min(milat, min_lat)
min_lon = min(milon, min_lon)
max_lat = max(malat, max_lat)
max_lon = max(malon, max_lon)
return min_lat, min_lon, max_lat, max_lon |
def getRoutes(self):
"""Get routing table.
@return: List of routes.
"""
routes = []
try:
out = subprocess.Popen([routeCmd, "-n"],
stdout=subprocess.PIPE).communicate()[0]
except:
raise Exception('Execution of command %s failed.' % ipCmd)
lines = out.splitlines()
if len(lines) > 1:
headers = [col.lower() for col in lines[1].split()]
for line in lines[2:]:
routes.append(dict(zip(headers, line.split())))
return routes | Get routing table.
@return: List of routes. | Below is the the instruction that describes the task:
### Input:
Get routing table.
@return: List of routes.
### Response:
def getRoutes(self):
"""Get routing table.
@return: List of routes.
"""
routes = []
try:
out = subprocess.Popen([routeCmd, "-n"],
stdout=subprocess.PIPE).communicate()[0]
except:
raise Exception('Execution of command %s failed.' % ipCmd)
lines = out.splitlines()
if len(lines) > 1:
headers = [col.lower() for col in lines[1].split()]
for line in lines[2:]:
routes.append(dict(zip(headers, line.split())))
return routes |
def verify_multi(self, otp_list, max_time_window=DEFAULT_MAX_TIME_WINDOW,
sl=None, timeout=None):
"""
Verify a provided list of OTPs.
:param max_time_window: Maximum number of seconds which can pass
between the first and last OTP generation for
the OTP to still be considered valid.
:type max_time_window: ``int``
"""
# Create the OTP objects
otps = []
for otp in otp_list:
otps.append(OTP(otp, self.translate_otp))
if len(otp_list) < 2:
raise ValueError('otp_list needs to contain at least two OTPs')
device_ids = set()
for otp in otps:
device_ids.add(otp.device_id)
# Check that all the OTPs contain same device id
if len(device_ids) != 1:
raise Exception('OTPs contain different device ids')
# Now we verify the OTPs and save the server response for each OTP.
# We need the server response, to retrieve the timestamp.
# It's possible to retrieve this value locally, without querying the
# server but in this case, user would need to provide his AES key.
for otp in otps:
response = self.verify(otp.otp, True, sl, timeout,
return_response=True)
if not response:
return False
otp.timestamp = int(response['timestamp'])
count = len(otps)
delta = otps[count - 1].timestamp - otps[0].timestamp
# OTPs have an 8Hz timestamp counter so we need to divide it to get
# seconds
delta = delta / 8
if delta < 0:
raise Exception('delta is smaller than zero. First OTP appears to '
'be older than the last one')
if delta > max_time_window:
raise Exception(('More than %s seconds have passed between '
'generating the first and the last OTP.') %
(max_time_window))
return True | Verify a provided list of OTPs.
:param max_time_window: Maximum number of seconds which can pass
between the first and last OTP generation for
the OTP to still be considered valid.
:type max_time_window: ``int`` | Below is the the instruction that describes the task:
### Input:
Verify a provided list of OTPs.
:param max_time_window: Maximum number of seconds which can pass
between the first and last OTP generation for
the OTP to still be considered valid.
:type max_time_window: ``int``
### Response:
def verify_multi(self, otp_list, max_time_window=DEFAULT_MAX_TIME_WINDOW,
sl=None, timeout=None):
"""
Verify a provided list of OTPs.
:param max_time_window: Maximum number of seconds which can pass
between the first and last OTP generation for
the OTP to still be considered valid.
:type max_time_window: ``int``
"""
# Create the OTP objects
otps = []
for otp in otp_list:
otps.append(OTP(otp, self.translate_otp))
if len(otp_list) < 2:
raise ValueError('otp_list needs to contain at least two OTPs')
device_ids = set()
for otp in otps:
device_ids.add(otp.device_id)
# Check that all the OTPs contain same device id
if len(device_ids) != 1:
raise Exception('OTPs contain different device ids')
# Now we verify the OTPs and save the server response for each OTP.
# We need the server response, to retrieve the timestamp.
# It's possible to retrieve this value locally, without querying the
# server but in this case, user would need to provide his AES key.
for otp in otps:
response = self.verify(otp.otp, True, sl, timeout,
return_response=True)
if not response:
return False
otp.timestamp = int(response['timestamp'])
count = len(otps)
delta = otps[count - 1].timestamp - otps[0].timestamp
# OTPs have an 8Hz timestamp counter so we need to divide it to get
# seconds
delta = delta / 8
if delta < 0:
raise Exception('delta is smaller than zero. First OTP appears to '
'be older than the last one')
if delta > max_time_window:
raise Exception(('More than %s seconds have passed between '
'generating the first and the last OTP.') %
(max_time_window))
return True |
def mount_share_at_path(share_path, mount_path):
"""Mounts a share at the specified path
Args:
share_path: String URL with all auth info to connect to file share.
mount_path: Path to mount share on.
Returns:
The mount point or raises an error
"""
sh_url = CFURLCreateWithString(None, share_path, None)
mo_url = CFURLCreateWithString(None, mount_path, None)
# Set UI to reduced interaction
open_options = {NetFS.kNAUIOptionKey: NetFS.kNAUIOptionNoUI}
# Allow mounting sub-directories of root shares
# Also specify the share should be mounted directly at (not under)
# mount_path
mount_options = {NetFS.kNetFSAllowSubMountsKey: True,
NetFS.kNetFSMountAtMountDirKey: True}
# Mount!
result, output = NetFS.NetFSMountURLSync(sh_url, mo_url, None, None,
open_options, mount_options, None)
# Check if it worked
if result != 0:
raise Exception('Error mounting url "%s" at path "%s": %s' %
(share_path, mount_path, output))
# Return the mountpath
return str(output[0]) | Mounts a share at the specified path
Args:
share_path: String URL with all auth info to connect to file share.
mount_path: Path to mount share on.
Returns:
The mount point or raises an error | Below is the the instruction that describes the task:
### Input:
Mounts a share at the specified path
Args:
share_path: String URL with all auth info to connect to file share.
mount_path: Path to mount share on.
Returns:
The mount point or raises an error
### Response:
def mount_share_at_path(share_path, mount_path):
"""Mounts a share at the specified path
Args:
share_path: String URL with all auth info to connect to file share.
mount_path: Path to mount share on.
Returns:
The mount point or raises an error
"""
sh_url = CFURLCreateWithString(None, share_path, None)
mo_url = CFURLCreateWithString(None, mount_path, None)
# Set UI to reduced interaction
open_options = {NetFS.kNAUIOptionKey: NetFS.kNAUIOptionNoUI}
# Allow mounting sub-directories of root shares
# Also specify the share should be mounted directly at (not under)
# mount_path
mount_options = {NetFS.kNetFSAllowSubMountsKey: True,
NetFS.kNetFSMountAtMountDirKey: True}
# Mount!
result, output = NetFS.NetFSMountURLSync(sh_url, mo_url, None, None,
open_options, mount_options, None)
# Check if it worked
if result != 0:
raise Exception('Error mounting url "%s" at path "%s": %s' %
(share_path, mount_path, output))
# Return the mountpath
return str(output[0]) |
def get_syllable_count(self, syllables: List[str]) -> int:
"""
Counts the number of syllable groups that would occur after ellision.
Often we will want preserve the position and separation of syllables so that they
can be used to reconstitute a line, and apply stresses to the original word positions.
However, we also want to be able to count the number of syllables accurately.
:param syllables:
:return:
>>> syllabifier = Syllabifier()
>>> print(syllabifier.get_syllable_count([
... 'Jām', 'tūm', 'c', 'au', 'sus', 'es', 'u', 'nus', 'I', 'ta', 'lo', 'rum']))
11
"""
tmp_syllables = copy.deepcopy(syllables)
return len(string_utils.remove_blank_spaces(
string_utils.move_consonant_right(tmp_syllables,
self._find_solo_consonant(tmp_syllables)))) | Counts the number of syllable groups that would occur after ellision.
Often we will want preserve the position and separation of syllables so that they
can be used to reconstitute a line, and apply stresses to the original word positions.
However, we also want to be able to count the number of syllables accurately.
:param syllables:
:return:
>>> syllabifier = Syllabifier()
>>> print(syllabifier.get_syllable_count([
... 'Jām', 'tūm', 'c', 'au', 'sus', 'es', 'u', 'nus', 'I', 'ta', 'lo', 'rum']))
11 | Below is the the instruction that describes the task:
### Input:
Counts the number of syllable groups that would occur after ellision.
Often we will want preserve the position and separation of syllables so that they
can be used to reconstitute a line, and apply stresses to the original word positions.
However, we also want to be able to count the number of syllables accurately.
:param syllables:
:return:
>>> syllabifier = Syllabifier()
>>> print(syllabifier.get_syllable_count([
... 'Jām', 'tūm', 'c', 'au', 'sus', 'es', 'u', 'nus', 'I', 'ta', 'lo', 'rum']))
11
### Response:
def get_syllable_count(self, syllables: List[str]) -> int:
"""
Counts the number of syllable groups that would occur after ellision.
Often we will want preserve the position and separation of syllables so that they
can be used to reconstitute a line, and apply stresses to the original word positions.
However, we also want to be able to count the number of syllables accurately.
:param syllables:
:return:
>>> syllabifier = Syllabifier()
>>> print(syllabifier.get_syllable_count([
... 'Jām', 'tūm', 'c', 'au', 'sus', 'es', 'u', 'nus', 'I', 'ta', 'lo', 'rum']))
11
"""
tmp_syllables = copy.deepcopy(syllables)
return len(string_utils.remove_blank_spaces(
string_utils.move_consonant_right(tmp_syllables,
self._find_solo_consonant(tmp_syllables)))) |
def signFix(val, width):
"""
Convert negative int to positive int which has same bits set
"""
if val > 0:
msb = 1 << (width - 1)
if val & msb:
val -= mask(width) + 1
return val | Convert negative int to positive int which has same bits set | Below is the the instruction that describes the task:
### Input:
Convert negative int to positive int which has same bits set
### Response:
def signFix(val, width):
"""
Convert negative int to positive int which has same bits set
"""
if val > 0:
msb = 1 << (width - 1)
if val & msb:
val -= mask(width) + 1
return val |
def update(self, modelID, modelParams, modelParamsHash, metricResult,
completed, completionReason, matured, numRecords):
""" Insert a new entry or update an existing one. If this is an update
of an existing entry, then modelParams will be None
Parameters:
--------------------------------------------------------------------
modelID: globally unique modelID of this model
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for
a description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
metricResult: value on the optimizeMetric for this model.
May be None if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured
numRecords: Number of records that have been processed so far by this
model.
retval: Canonicalized result on the optimize metric
"""
# The modelParamsHash must always be provided - it can change after a
# model is inserted into the models table if it got detected as an
# orphan
assert (modelParamsHash is not None)
# We consider a model metricResult as "final" if it has completed or
# matured. By default, assume anything that has completed has matured
if completed:
matured = True
# Get the canonicalized optimize metric results. For this metric, lower
# is always better
if metricResult is not None and matured and \
completionReason in [ClientJobsDAO.CMPL_REASON_EOF,
ClientJobsDAO.CMPL_REASON_STOPPED]:
# Canonicalize the error score so that lower is better
if self._hsObj._maximize:
errScore = -1 * metricResult
else:
errScore = metricResult
if errScore < self._bestResult:
self._bestResult = errScore
self._bestModelID = modelID
self._hsObj.logger.info("New best model after %d evaluations: errScore "
"%g on model %s" % (len(self._allResults), self._bestResult,
self._bestModelID))
else:
errScore = numpy.inf
# If this model completed with an unacceptable completion reason, set the
# errScore to infinite and essentially make this model invisible to
# further queries
if completed and completionReason in [ClientJobsDAO.CMPL_REASON_ORPHAN]:
errScore = numpy.inf
hidden = True
else:
hidden = False
# Update our set of erred models and completed models. These are used
# to determine if we should abort the search because of too many errors
if completed:
self._completedModels.add(modelID)
self._numCompletedModels = len(self._completedModels)
if completionReason == ClientJobsDAO.CMPL_REASON_ERROR:
self._errModels.add(modelID)
self._numErrModels = len(self._errModels)
# Are we creating a new entry?
wasHidden = False
if modelID not in self._modelIDToIdx:
assert (modelParams is not None)
entry = dict(modelID=modelID, modelParams=modelParams,
modelParamsHash=modelParamsHash,
errScore=errScore, completed=completed,
matured=matured, numRecords=numRecords, hidden=hidden)
self._allResults.append(entry)
entryIdx = len(self._allResults) - 1
self._modelIDToIdx[modelID] = entryIdx
self._paramsHashToIndexes[modelParamsHash] = entryIdx
swarmId = modelParams['particleState']['swarmId']
if not hidden:
# Update the list of particles in each swarm
if swarmId in self._swarmIdToIndexes:
self._swarmIdToIndexes[swarmId].append(entryIdx)
else:
self._swarmIdToIndexes[swarmId] = [entryIdx]
# Update number of particles at each generation in this swarm
genIdx = modelParams['particleState']['genIdx']
numPsEntry = self._swarmNumParticlesPerGeneration.get(swarmId, [0])
while genIdx >= len(numPsEntry):
numPsEntry.append(0)
numPsEntry[genIdx] += 1
self._swarmNumParticlesPerGeneration[swarmId] = numPsEntry
# Replacing an existing one
else:
entryIdx = self._modelIDToIdx.get(modelID, None)
assert (entryIdx is not None)
entry = self._allResults[entryIdx]
wasHidden = entry['hidden']
# If the paramsHash changed, note that. This can happen for orphaned
# models
if entry['modelParamsHash'] != modelParamsHash:
self._paramsHashToIndexes.pop(entry['modelParamsHash'])
self._paramsHashToIndexes[modelParamsHash] = entryIdx
entry['modelParamsHash'] = modelParamsHash
# Get the model params, swarmId, and genIdx
modelParams = entry['modelParams']
swarmId = modelParams['particleState']['swarmId']
genIdx = modelParams['particleState']['genIdx']
# If this particle just became hidden, remove it from our swarm counts
if hidden and not wasHidden:
assert (entryIdx in self._swarmIdToIndexes[swarmId])
self._swarmIdToIndexes[swarmId].remove(entryIdx)
self._swarmNumParticlesPerGeneration[swarmId][genIdx] -= 1
# Update the entry for the latest info
entry['errScore'] = errScore
entry['completed'] = completed
entry['matured'] = matured
entry['numRecords'] = numRecords
entry['hidden'] = hidden
# Update the particle best errScore
particleId = modelParams['particleState']['id']
genIdx = modelParams['particleState']['genIdx']
if matured and not hidden:
(oldResult, pos) = self._particleBest.get(particleId, (numpy.inf, None))
if errScore < oldResult:
pos = Particle.getPositionFromState(modelParams['particleState'])
self._particleBest[particleId] = (errScore, pos)
# Update the particle latest generation index
prevGenIdx = self._particleLatestGenIdx.get(particleId, -1)
if not hidden and genIdx > prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx
elif hidden and not wasHidden and genIdx == prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx-1
# Update the swarm best score
if not hidden:
swarmId = modelParams['particleState']['swarmId']
if not swarmId in self._swarmBestOverall:
self._swarmBestOverall[swarmId] = []
bestScores = self._swarmBestOverall[swarmId]
while genIdx >= len(bestScores):
bestScores.append((None, numpy.inf))
if errScore < bestScores[genIdx][1]:
bestScores[genIdx] = (modelID, errScore)
# Update the self._modifiedSwarmGens flags to support the
# getMaturedSwarmGenerations() call.
if not hidden:
key = (swarmId, genIdx)
if not key in self._maturedSwarmGens:
self._modifiedSwarmGens.add(key)
return errScore | Insert a new entry or update an existing one. If this is an update
of an existing entry, then modelParams will be None
Parameters:
--------------------------------------------------------------------
modelID: globally unique modelID of this model
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for
a description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
metricResult: value on the optimizeMetric for this model.
May be None if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured
numRecords: Number of records that have been processed so far by this
model.
retval: Canonicalized result on the optimize metric | Below is the the instruction that describes the task:
### Input:
Insert a new entry or update an existing one. If this is an update
of an existing entry, then modelParams will be None
Parameters:
--------------------------------------------------------------------
modelID: globally unique modelID of this model
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for
a description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
metricResult: value on the optimizeMetric for this model.
May be None if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured
numRecords: Number of records that have been processed so far by this
model.
retval: Canonicalized result on the optimize metric
### Response:
def update(self, modelID, modelParams, modelParamsHash, metricResult,
completed, completionReason, matured, numRecords):
""" Insert a new entry or update an existing one. If this is an update
of an existing entry, then modelParams will be None
Parameters:
--------------------------------------------------------------------
modelID: globally unique modelID of this model
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for
a description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
metricResult: value on the optimizeMetric for this model.
May be None if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured
numRecords: Number of records that have been processed so far by this
model.
retval: Canonicalized result on the optimize metric
"""
# The modelParamsHash must always be provided - it can change after a
# model is inserted into the models table if it got detected as an
# orphan
assert (modelParamsHash is not None)
# We consider a model metricResult as "final" if it has completed or
# matured. By default, assume anything that has completed has matured
if completed:
matured = True
# Get the canonicalized optimize metric results. For this metric, lower
# is always better
if metricResult is not None and matured and \
completionReason in [ClientJobsDAO.CMPL_REASON_EOF,
ClientJobsDAO.CMPL_REASON_STOPPED]:
# Canonicalize the error score so that lower is better
if self._hsObj._maximize:
errScore = -1 * metricResult
else:
errScore = metricResult
if errScore < self._bestResult:
self._bestResult = errScore
self._bestModelID = modelID
self._hsObj.logger.info("New best model after %d evaluations: errScore "
"%g on model %s" % (len(self._allResults), self._bestResult,
self._bestModelID))
else:
errScore = numpy.inf
# If this model completed with an unacceptable completion reason, set the
# errScore to infinite and essentially make this model invisible to
# further queries
if completed and completionReason in [ClientJobsDAO.CMPL_REASON_ORPHAN]:
errScore = numpy.inf
hidden = True
else:
hidden = False
# Update our set of erred models and completed models. These are used
# to determine if we should abort the search because of too many errors
if completed:
self._completedModels.add(modelID)
self._numCompletedModels = len(self._completedModels)
if completionReason == ClientJobsDAO.CMPL_REASON_ERROR:
self._errModels.add(modelID)
self._numErrModels = len(self._errModels)
# Are we creating a new entry?
wasHidden = False
if modelID not in self._modelIDToIdx:
assert (modelParams is not None)
entry = dict(modelID=modelID, modelParams=modelParams,
modelParamsHash=modelParamsHash,
errScore=errScore, completed=completed,
matured=matured, numRecords=numRecords, hidden=hidden)
self._allResults.append(entry)
entryIdx = len(self._allResults) - 1
self._modelIDToIdx[modelID] = entryIdx
self._paramsHashToIndexes[modelParamsHash] = entryIdx
swarmId = modelParams['particleState']['swarmId']
if not hidden:
# Update the list of particles in each swarm
if swarmId in self._swarmIdToIndexes:
self._swarmIdToIndexes[swarmId].append(entryIdx)
else:
self._swarmIdToIndexes[swarmId] = [entryIdx]
# Update number of particles at each generation in this swarm
genIdx = modelParams['particleState']['genIdx']
numPsEntry = self._swarmNumParticlesPerGeneration.get(swarmId, [0])
while genIdx >= len(numPsEntry):
numPsEntry.append(0)
numPsEntry[genIdx] += 1
self._swarmNumParticlesPerGeneration[swarmId] = numPsEntry
# Replacing an existing one
else:
entryIdx = self._modelIDToIdx.get(modelID, None)
assert (entryIdx is not None)
entry = self._allResults[entryIdx]
wasHidden = entry['hidden']
# If the paramsHash changed, note that. This can happen for orphaned
# models
if entry['modelParamsHash'] != modelParamsHash:
self._paramsHashToIndexes.pop(entry['modelParamsHash'])
self._paramsHashToIndexes[modelParamsHash] = entryIdx
entry['modelParamsHash'] = modelParamsHash
# Get the model params, swarmId, and genIdx
modelParams = entry['modelParams']
swarmId = modelParams['particleState']['swarmId']
genIdx = modelParams['particleState']['genIdx']
# If this particle just became hidden, remove it from our swarm counts
if hidden and not wasHidden:
assert (entryIdx in self._swarmIdToIndexes[swarmId])
self._swarmIdToIndexes[swarmId].remove(entryIdx)
self._swarmNumParticlesPerGeneration[swarmId][genIdx] -= 1
# Update the entry for the latest info
entry['errScore'] = errScore
entry['completed'] = completed
entry['matured'] = matured
entry['numRecords'] = numRecords
entry['hidden'] = hidden
# Update the particle best errScore
particleId = modelParams['particleState']['id']
genIdx = modelParams['particleState']['genIdx']
if matured and not hidden:
(oldResult, pos) = self._particleBest.get(particleId, (numpy.inf, None))
if errScore < oldResult:
pos = Particle.getPositionFromState(modelParams['particleState'])
self._particleBest[particleId] = (errScore, pos)
# Update the particle latest generation index
prevGenIdx = self._particleLatestGenIdx.get(particleId, -1)
if not hidden and genIdx > prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx
elif hidden and not wasHidden and genIdx == prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx-1
# Update the swarm best score
if not hidden:
swarmId = modelParams['particleState']['swarmId']
if not swarmId in self._swarmBestOverall:
self._swarmBestOverall[swarmId] = []
bestScores = self._swarmBestOverall[swarmId]
while genIdx >= len(bestScores):
bestScores.append((None, numpy.inf))
if errScore < bestScores[genIdx][1]:
bestScores[genIdx] = (modelID, errScore)
# Update the self._modifiedSwarmGens flags to support the
# getMaturedSwarmGenerations() call.
if not hidden:
key = (swarmId, genIdx)
if not key in self._maturedSwarmGens:
self._modifiedSwarmGens.add(key)
return errScore |
def check_email_status(mx_resolver, recipient_address, sender_address, smtp_timeout=10, helo_hostname=None):
"""
Checks if an email might be valid by getting the status from the SMTP server.
:param mx_resolver: MXResolver
:param recipient_address: string
:param sender_address: string
:param smtp_timeout: integer
:param helo_hostname: string
:return: dict
"""
domain = recipient_address[recipient_address.find('@') + 1:]
if helo_hostname is None:
helo_hostname = domain
ret = {'status': 101, 'extended_status': None, 'message': "The server is unable to connect."}
records = []
try:
records = mx_resolver.get_mx_records(helo_hostname)
except socket.gaierror:
ret['status'] = 512
ret['extended_status'] = "5.1.2 Domain name address resolution failed in MX lookup."
smtp = smtplib.SMTP(timeout=smtp_timeout)
for mx in records:
try:
connection_status, connection_message = smtp.connect(mx.exchange)
if connection_status == 220:
smtp.helo(domain)
smtp.mail(sender_address)
status, message = smtp.rcpt(recipient_address)
ret['status'] = status
pattern = re.compile('(\d+\.\d+\.\d+)')
matches = re.match(pattern, message)
if matches:
ret['extended_status'] = matches.group(1)
ret['message'] = message
smtp.quit()
break
except smtplib.SMTPConnectError:
ret['status'] = 111
ret['message'] = "Connection refused or unable to open an SMTP stream."
except smtplib.SMTPServerDisconnected:
ret['status'] = 111
ret['extended_status'] = "SMTP Server disconnected"
except socket.gaierror:
ret['status'] = 512
ret['extended_status'] = "5.1.2 Domain name address resolution failed."
return ret | Checks if an email might be valid by getting the status from the SMTP server.
:param mx_resolver: MXResolver
:param recipient_address: string
:param sender_address: string
:param smtp_timeout: integer
:param helo_hostname: string
:return: dict | Below is the the instruction that describes the task:
### Input:
Checks if an email might be valid by getting the status from the SMTP server.
:param mx_resolver: MXResolver
:param recipient_address: string
:param sender_address: string
:param smtp_timeout: integer
:param helo_hostname: string
:return: dict
### Response:
def check_email_status(mx_resolver, recipient_address, sender_address, smtp_timeout=10, helo_hostname=None):
"""
Checks if an email might be valid by getting the status from the SMTP server.
:param mx_resolver: MXResolver
:param recipient_address: string
:param sender_address: string
:param smtp_timeout: integer
:param helo_hostname: string
:return: dict
"""
domain = recipient_address[recipient_address.find('@') + 1:]
if helo_hostname is None:
helo_hostname = domain
ret = {'status': 101, 'extended_status': None, 'message': "The server is unable to connect."}
records = []
try:
records = mx_resolver.get_mx_records(helo_hostname)
except socket.gaierror:
ret['status'] = 512
ret['extended_status'] = "5.1.2 Domain name address resolution failed in MX lookup."
smtp = smtplib.SMTP(timeout=smtp_timeout)
for mx in records:
try:
connection_status, connection_message = smtp.connect(mx.exchange)
if connection_status == 220:
smtp.helo(domain)
smtp.mail(sender_address)
status, message = smtp.rcpt(recipient_address)
ret['status'] = status
pattern = re.compile('(\d+\.\d+\.\d+)')
matches = re.match(pattern, message)
if matches:
ret['extended_status'] = matches.group(1)
ret['message'] = message
smtp.quit()
break
except smtplib.SMTPConnectError:
ret['status'] = 111
ret['message'] = "Connection refused or unable to open an SMTP stream."
except smtplib.SMTPServerDisconnected:
ret['status'] = 111
ret['extended_status'] = "SMTP Server disconnected"
except socket.gaierror:
ret['status'] = 512
ret['extended_status'] = "5.1.2 Domain name address resolution failed."
return ret |
def run_breiman2():
"""Run Breiman's other sample problem."""
x, y = build_sample_ace_problem_breiman2(500)
ace_solver = ace.ACESolver()
ace_solver.specify_data_set(x, y)
ace_solver.solve()
try:
plt = ace.plot_transforms(ace_solver, None)
except ImportError:
pass
plt.subplot(1, 2, 1)
phi = numpy.sin(2.0 * numpy.pi * x[0])
plt.plot(x[0], phi, label='analytic')
plt.legend()
plt.subplot(1, 2, 2)
y = numpy.exp(phi)
plt.plot(y, phi, label='analytic')
plt.legend(loc='lower right')
# plt.show()
plt.savefig('no_noise_linear_x.png')
return ace_solver | Run Breiman's other sample problem. | Below is the the instruction that describes the task:
### Input:
Run Breiman's other sample problem.
### Response:
def run_breiman2():
"""Run Breiman's other sample problem."""
x, y = build_sample_ace_problem_breiman2(500)
ace_solver = ace.ACESolver()
ace_solver.specify_data_set(x, y)
ace_solver.solve()
try:
plt = ace.plot_transforms(ace_solver, None)
except ImportError:
pass
plt.subplot(1, 2, 1)
phi = numpy.sin(2.0 * numpy.pi * x[0])
plt.plot(x[0], phi, label='analytic')
plt.legend()
plt.subplot(1, 2, 2)
y = numpy.exp(phi)
plt.plot(y, phi, label='analytic')
plt.legend(loc='lower right')
# plt.show()
plt.savefig('no_noise_linear_x.png')
return ace_solver |
def lstm_seq2seq_internal_attention_bid_encoder(inputs, targets, hparams,
train):
"""LSTM seq2seq model with attention, main step used for training."""
with tf.variable_scope("lstm_seq2seq_attention_bid_encoder"):
inputs_length = common_layers.length_from_embedding(inputs)
# Flatten inputs.
inputs = common_layers.flatten4d3d(inputs)
# LSTM encoder.
encoder_outputs, final_encoder_state = lstm_bid_encoder(
inputs, inputs_length, hparams, train, "encoder")
# LSTM decoder with attention
shifted_targets = common_layers.shift_right(targets)
# Add 1 to account for the padding added to the left from shift_right
targets_length = common_layers.length_from_embedding(shifted_targets) + 1
hparams_decoder = copy.copy(hparams)
hparams_decoder.hidden_size = 2 * hparams.hidden_size
decoder_outputs = lstm_attention_decoder(
common_layers.flatten4d3d(shifted_targets), hparams_decoder, train,
"decoder", final_encoder_state, encoder_outputs,
inputs_length, targets_length)
return tf.expand_dims(decoder_outputs, axis=2) | LSTM seq2seq model with attention, main step used for training. | Below is the the instruction that describes the task:
### Input:
LSTM seq2seq model with attention, main step used for training.
### Response:
def lstm_seq2seq_internal_attention_bid_encoder(inputs, targets, hparams,
train):
"""LSTM seq2seq model with attention, main step used for training."""
with tf.variable_scope("lstm_seq2seq_attention_bid_encoder"):
inputs_length = common_layers.length_from_embedding(inputs)
# Flatten inputs.
inputs = common_layers.flatten4d3d(inputs)
# LSTM encoder.
encoder_outputs, final_encoder_state = lstm_bid_encoder(
inputs, inputs_length, hparams, train, "encoder")
# LSTM decoder with attention
shifted_targets = common_layers.shift_right(targets)
# Add 1 to account for the padding added to the left from shift_right
targets_length = common_layers.length_from_embedding(shifted_targets) + 1
hparams_decoder = copy.copy(hparams)
hparams_decoder.hidden_size = 2 * hparams.hidden_size
decoder_outputs = lstm_attention_decoder(
common_layers.flatten4d3d(shifted_targets), hparams_decoder, train,
"decoder", final_encoder_state, encoder_outputs,
inputs_length, targets_length)
return tf.expand_dims(decoder_outputs, axis=2) |
def get_keyboard_mapping_unchecked(conn):
"""
Return an unchecked keyboard mapping cookie that can be used to fetch the
table of keysyms in the current X environment.
:rtype: xcb.xproto.GetKeyboardMappingCookie
"""
mn, mx = get_min_max_keycode()
return conn.core.GetKeyboardMappingUnchecked(mn, mx - mn + 1) | Return an unchecked keyboard mapping cookie that can be used to fetch the
table of keysyms in the current X environment.
:rtype: xcb.xproto.GetKeyboardMappingCookie | Below is the the instruction that describes the task:
### Input:
Return an unchecked keyboard mapping cookie that can be used to fetch the
table of keysyms in the current X environment.
:rtype: xcb.xproto.GetKeyboardMappingCookie
### Response:
def get_keyboard_mapping_unchecked(conn):
"""
Return an unchecked keyboard mapping cookie that can be used to fetch the
table of keysyms in the current X environment.
:rtype: xcb.xproto.GetKeyboardMappingCookie
"""
mn, mx = get_min_max_keycode()
return conn.core.GetKeyboardMappingUnchecked(mn, mx - mn + 1) |
def tempo_account_get_customers(self, query=None, count_accounts=None):
"""
Gets all or some Attribute whose key or name contain a specific substring.
Attributes can be a Category or Customer.
:param query: OPTIONAL: query for search
:param count_accounts: bool OPTIONAL: provide how many associated Accounts with Customer
:return: list of customers
"""
params = {}
if query is not None:
params['query'] = query
if count_accounts is not None:
params['countAccounts'] = count_accounts
url = 'rest/tempo-accounts/1/customer'
return self.get(url, params=params) | Gets all or some Attribute whose key or name contain a specific substring.
Attributes can be a Category or Customer.
:param query: OPTIONAL: query for search
:param count_accounts: bool OPTIONAL: provide how many associated Accounts with Customer
:return: list of customers | Below is the the instruction that describes the task:
### Input:
Gets all or some Attribute whose key or name contain a specific substring.
Attributes can be a Category or Customer.
:param query: OPTIONAL: query for search
:param count_accounts: bool OPTIONAL: provide how many associated Accounts with Customer
:return: list of customers
### Response:
def tempo_account_get_customers(self, query=None, count_accounts=None):
"""
Gets all or some Attribute whose key or name contain a specific substring.
Attributes can be a Category or Customer.
:param query: OPTIONAL: query for search
:param count_accounts: bool OPTIONAL: provide how many associated Accounts with Customer
:return: list of customers
"""
params = {}
if query is not None:
params['query'] = query
if count_accounts is not None:
params['countAccounts'] = count_accounts
url = 'rest/tempo-accounts/1/customer'
return self.get(url, params=params) |
def line_intersection_2D(abarg, cdarg):
'''
line_intersection((a, b), (c, d)) yields the intersection point between the lines that pass
through the given pairs of points. If any lines are parallel, (numpy.nan, numpy.nan) is
returned; note that a, b, c, and d can all be 2 x n matrices of x and y coordinate row-vectors.
'''
((x1,y1),(x2,y2)) = abarg
((x3,y3),(x4,y4)) = cdarg
dx12 = (x1 - x2)
dx34 = (x3 - x4)
dy12 = (y1 - y2)
dy34 = (y3 - y4)
denom = dx12*dy34 - dy12*dx34
unit = np.isclose(denom, 0)
if unit is True: return (np.nan, np.nan)
denom = unit + denom
q12 = (x1*y2 - y1*x2) / denom
q34 = (x3*y4 - y3*x4) / denom
xi = q12*dx34 - q34*dx12
yi = q12*dy34 - q34*dy12
if unit is False: return (xi, yi)
elif unit is True: return (np.nan, np.nan)
else:
xi = np.asarray(xi)
yi = np.asarray(yi)
xi[unit] = np.nan
yi[unit] = np.nan
return (xi, yi) | line_intersection((a, b), (c, d)) yields the intersection point between the lines that pass
through the given pairs of points. If any lines are parallel, (numpy.nan, numpy.nan) is
returned; note that a, b, c, and d can all be 2 x n matrices of x and y coordinate row-vectors. | Below is the the instruction that describes the task:
### Input:
line_intersection((a, b), (c, d)) yields the intersection point between the lines that pass
through the given pairs of points. If any lines are parallel, (numpy.nan, numpy.nan) is
returned; note that a, b, c, and d can all be 2 x n matrices of x and y coordinate row-vectors.
### Response:
def line_intersection_2D(abarg, cdarg):
'''
line_intersection((a, b), (c, d)) yields the intersection point between the lines that pass
through the given pairs of points. If any lines are parallel, (numpy.nan, numpy.nan) is
returned; note that a, b, c, and d can all be 2 x n matrices of x and y coordinate row-vectors.
'''
((x1,y1),(x2,y2)) = abarg
((x3,y3),(x4,y4)) = cdarg
dx12 = (x1 - x2)
dx34 = (x3 - x4)
dy12 = (y1 - y2)
dy34 = (y3 - y4)
denom = dx12*dy34 - dy12*dx34
unit = np.isclose(denom, 0)
if unit is True: return (np.nan, np.nan)
denom = unit + denom
q12 = (x1*y2 - y1*x2) / denom
q34 = (x3*y4 - y3*x4) / denom
xi = q12*dx34 - q34*dx12
yi = q12*dy34 - q34*dy12
if unit is False: return (xi, yi)
elif unit is True: return (np.nan, np.nan)
else:
xi = np.asarray(xi)
yi = np.asarray(yi)
xi[unit] = np.nan
yi[unit] = np.nan
return (xi, yi) |
def pexpect_monkeypatch():
"""Patch pexpect to prevent unhandled exceptions at VM teardown.
Calling this function will monkeypatch the pexpect.spawn class and modify
its __del__ method to make it more robust in the face of failures that can
occur if it is called when the Python VM is shutting down.
Since Python may fire __del__ methods arbitrarily late, it's possible for
them to execute during the teardown of the Python VM itself. At this
point, various builtin modules have been reset to None. Thus, the call to
self.close() will trigger an exception because it tries to call os.close(),
and os is now None.
"""
if pexpect.__version__[:3] >= '2.2':
# No need to patch, fix is already the upstream version.
return
def __del__(self):
"""This makes sure that no system resources are left open.
Python only garbage collects Python objects. OS file descriptors
are not Python objects, so they must be handled explicitly.
If the child file descriptor was opened outside of this class
(passed to the constructor) then this does not close it.
"""
if not self.closed:
try:
self.close()
except AttributeError:
pass
pexpect.spawn.__del__ = __del__ | Patch pexpect to prevent unhandled exceptions at VM teardown.
Calling this function will monkeypatch the pexpect.spawn class and modify
its __del__ method to make it more robust in the face of failures that can
occur if it is called when the Python VM is shutting down.
Since Python may fire __del__ methods arbitrarily late, it's possible for
them to execute during the teardown of the Python VM itself. At this
point, various builtin modules have been reset to None. Thus, the call to
self.close() will trigger an exception because it tries to call os.close(),
and os is now None. | Below is the the instruction that describes the task:
### Input:
Patch pexpect to prevent unhandled exceptions at VM teardown.
Calling this function will monkeypatch the pexpect.spawn class and modify
its __del__ method to make it more robust in the face of failures that can
occur if it is called when the Python VM is shutting down.
Since Python may fire __del__ methods arbitrarily late, it's possible for
them to execute during the teardown of the Python VM itself. At this
point, various builtin modules have been reset to None. Thus, the call to
self.close() will trigger an exception because it tries to call os.close(),
and os is now None.
### Response:
def pexpect_monkeypatch():
"""Patch pexpect to prevent unhandled exceptions at VM teardown.
Calling this function will monkeypatch the pexpect.spawn class and modify
its __del__ method to make it more robust in the face of failures that can
occur if it is called when the Python VM is shutting down.
Since Python may fire __del__ methods arbitrarily late, it's possible for
them to execute during the teardown of the Python VM itself. At this
point, various builtin modules have been reset to None. Thus, the call to
self.close() will trigger an exception because it tries to call os.close(),
and os is now None.
"""
if pexpect.__version__[:3] >= '2.2':
# No need to patch, fix is already the upstream version.
return
def __del__(self):
"""This makes sure that no system resources are left open.
Python only garbage collects Python objects. OS file descriptors
are not Python objects, so they must be handled explicitly.
If the child file descriptor was opened outside of this class
(passed to the constructor) then this does not close it.
"""
if not self.closed:
try:
self.close()
except AttributeError:
pass
pexpect.spawn.__del__ = __del__ |
def get(self):
"""Get a task from the queue."""
tasks = self._get_avaliable_tasks()
if not tasks:
return None
name, data = tasks[0]
self._client.kv.delete(name)
return data | Get a task from the queue. | Below is the the instruction that describes the task:
### Input:
Get a task from the queue.
### Response:
def get(self):
"""Get a task from the queue."""
tasks = self._get_avaliable_tasks()
if not tasks:
return None
name, data = tasks[0]
self._client.kv.delete(name)
return data |
def do_alias(self, arg):
"""alias [name [command [parameter parameter ...] ]]
Create an alias called 'name' that executes 'command'. The
command must *not* be enclosed in quotes. Replaceable
parameters can be indicated by %1, %2, and so on, while %* is
replaced by all the parameters. If no command is given, the
current alias for name is shown. If no name is given, all
aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is
recursively applied to the first word of the command line; all
other words in the line are left alone.
As an example, here are two useful aliases (especially when
placed in the .pdbrc file):
# Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])
# Print instance variables in self
alias ps pi self
"""
args = arg.split()
if len(args) == 0:
keys = sorted(self.aliases.keys())
for alias in keys:
self.message("%s = %s" % (alias, self.aliases[alias]))
return
if args[0] in self.aliases and len(args) == 1:
self.message("%s = %s" % (args[0], self.aliases[args[0]]))
else:
self.aliases[args[0]] = ' '.join(args[1:]) | alias [name [command [parameter parameter ...] ]]
Create an alias called 'name' that executes 'command'. The
command must *not* be enclosed in quotes. Replaceable
parameters can be indicated by %1, %2, and so on, while %* is
replaced by all the parameters. If no command is given, the
current alias for name is shown. If no name is given, all
aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is
recursively applied to the first word of the command line; all
other words in the line are left alone.
As an example, here are two useful aliases (especially when
placed in the .pdbrc file):
# Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])
# Print instance variables in self
alias ps pi self | Below is the the instruction that describes the task:
### Input:
alias [name [command [parameter parameter ...] ]]
Create an alias called 'name' that executes 'command'. The
command must *not* be enclosed in quotes. Replaceable
parameters can be indicated by %1, %2, and so on, while %* is
replaced by all the parameters. If no command is given, the
current alias for name is shown. If no name is given, all
aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is
recursively applied to the first word of the command line; all
other words in the line are left alone.
As an example, here are two useful aliases (especially when
placed in the .pdbrc file):
# Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])
# Print instance variables in self
alias ps pi self
### Response:
def do_alias(self, arg):
"""alias [name [command [parameter parameter ...] ]]
Create an alias called 'name' that executes 'command'. The
command must *not* be enclosed in quotes. Replaceable
parameters can be indicated by %1, %2, and so on, while %* is
replaced by all the parameters. If no command is given, the
current alias for name is shown. If no name is given, all
aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is
recursively applied to the first word of the command line; all
other words in the line are left alone.
As an example, here are two useful aliases (especially when
placed in the .pdbrc file):
# Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])
# Print instance variables in self
alias ps pi self
"""
args = arg.split()
if len(args) == 0:
keys = sorted(self.aliases.keys())
for alias in keys:
self.message("%s = %s" % (alias, self.aliases[alias]))
return
if args[0] in self.aliases and len(args) == 1:
self.message("%s = %s" % (args[0], self.aliases[args[0]]))
else:
self.aliases[args[0]] = ' '.join(args[1:]) |
def _set_datapath(self, datapath):
""" Set a datapath.
"""
if datapath:
self._datapath = datapath.rstrip(os.sep)
self._fifo = int(stat.S_ISFIFO(os.stat(self.datapath).st_mode))
else:
self._datapath = None
self._fifo = False | Set a datapath. | Below is the the instruction that describes the task:
### Input:
Set a datapath.
### Response:
def _set_datapath(self, datapath):
""" Set a datapath.
"""
if datapath:
self._datapath = datapath.rstrip(os.sep)
self._fifo = int(stat.S_ISFIFO(os.stat(self.datapath).st_mode))
else:
self._datapath = None
self._fifo = False |
def count_snps(mat):
"""
get dstats from the count array and return as a float tuple
"""
## get [aabb, baba, abba, aaab]
snps = np.zeros(4, dtype=np.uint32)
## get concordant (aabb) pis sites
snps[0] = np.uint32(\
mat[0, 5] + mat[0, 10] + mat[0, 15] + \
mat[5, 0] + mat[5, 10] + mat[5, 15] + \
mat[10, 0] + mat[10, 5] + mat[10, 15] + \
mat[15, 0] + mat[15, 5] + mat[15, 10])
## get discordant (baba) sites
for i in range(16):
if i % 5:
snps[1] += mat[i, i]
## get discordant (abba) sites
snps[2] = mat[1, 4] + mat[2, 8] + mat[3, 12] +\
mat[4, 1] + mat[6, 9] + mat[7, 13] +\
mat[8, 2] + mat[9, 6] + mat[11, 14] +\
mat[12, 3] + mat[13, 7] + mat[14, 11]
## get autapomorphy sites
snps[3] = (mat.sum() - np.diag(mat).sum()) - snps[2]
return snps | get dstats from the count array and return as a float tuple | Below is the the instruction that describes the task:
### Input:
get dstats from the count array and return as a float tuple
### Response:
def count_snps(mat):
"""
get dstats from the count array and return as a float tuple
"""
## get [aabb, baba, abba, aaab]
snps = np.zeros(4, dtype=np.uint32)
## get concordant (aabb) pis sites
snps[0] = np.uint32(\
mat[0, 5] + mat[0, 10] + mat[0, 15] + \
mat[5, 0] + mat[5, 10] + mat[5, 15] + \
mat[10, 0] + mat[10, 5] + mat[10, 15] + \
mat[15, 0] + mat[15, 5] + mat[15, 10])
## get discordant (baba) sites
for i in range(16):
if i % 5:
snps[1] += mat[i, i]
## get discordant (abba) sites
snps[2] = mat[1, 4] + mat[2, 8] + mat[3, 12] +\
mat[4, 1] + mat[6, 9] + mat[7, 13] +\
mat[8, 2] + mat[9, 6] + mat[11, 14] +\
mat[12, 3] + mat[13, 7] + mat[14, 11]
## get autapomorphy sites
snps[3] = (mat.sum() - np.diag(mat).sum()) - snps[2]
return snps |
def lhistogram (inlist,numbins=10,defaultreallimits=None,printextras=0):
"""
Returns (i) a list of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. If no sequence object
is given for defaultreallimits, the routine picks (usually non-pretty) bins
spanning all the numbers in the inlist.
Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0)
Returns: list of bin values, lowerreallimit, binsize, extrapoints
"""
if (defaultreallimits != None):
if type(defaultreallimits) not in [ListType,TupleType] or len(defaultreallimits)==1: # only one limit given, assumed to be lower one & upper is calc'd
lowerreallimit = defaultreallimits
upperreallimit = 1.0001 * max(inlist)
else: # assume both limits given
lowerreallimit = defaultreallimits[0]
upperreallimit = defaultreallimits[1]
binsize = (upperreallimit-lowerreallimit)/float(numbins)
else: # no limits given for histogram, both must be calc'd
estbinwidth=(max(inlist)-min(inlist))/float(numbins) + 1 # 1=>cover all
binsize = ((max(inlist)-min(inlist)+estbinwidth))/float(numbins)
lowerreallimit = min(inlist) - binsize/2 #lower real limit,1st bin
bins = [0]*(numbins)
extrapoints = 0
for num in inlist:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit)/float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except:
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print('\nPoints outside given histogram range =',extrapoints)
return (bins, lowerreallimit, binsize, extrapoints) | Returns (i) a list of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. If no sequence object
is given for defaultreallimits, the routine picks (usually non-pretty) bins
spanning all the numbers in the inlist.
Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0)
Returns: list of bin values, lowerreallimit, binsize, extrapoints | Below is the the instruction that describes the task:
### Input:
Returns (i) a list of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. If no sequence object
is given for defaultreallimits, the routine picks (usually non-pretty) bins
spanning all the numbers in the inlist.
Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0)
Returns: list of bin values, lowerreallimit, binsize, extrapoints
### Response:
def lhistogram (inlist,numbins=10,defaultreallimits=None,printextras=0):
"""
Returns (i) a list of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. If no sequence object
is given for defaultreallimits, the routine picks (usually non-pretty) bins
spanning all the numbers in the inlist.
Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0)
Returns: list of bin values, lowerreallimit, binsize, extrapoints
"""
if (defaultreallimits != None):
if type(defaultreallimits) not in [ListType,TupleType] or len(defaultreallimits)==1: # only one limit given, assumed to be lower one & upper is calc'd
lowerreallimit = defaultreallimits
upperreallimit = 1.0001 * max(inlist)
else: # assume both limits given
lowerreallimit = defaultreallimits[0]
upperreallimit = defaultreallimits[1]
binsize = (upperreallimit-lowerreallimit)/float(numbins)
else: # no limits given for histogram, both must be calc'd
estbinwidth=(max(inlist)-min(inlist))/float(numbins) + 1 # 1=>cover all
binsize = ((max(inlist)-min(inlist)+estbinwidth))/float(numbins)
lowerreallimit = min(inlist) - binsize/2 #lower real limit,1st bin
bins = [0]*(numbins)
extrapoints = 0
for num in inlist:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit)/float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except:
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print('\nPoints outside given histogram range =',extrapoints)
return (bins, lowerreallimit, binsize, extrapoints) |
def get_pip(mov=None, api=None, name=None):
"""get value of pip"""
# ~ check args
if mov is None and api is None:
logger.error("need at least one of those")
raise ValueError()
elif mov is not None and api is not None:
logger.error("mov and api are exclusive")
raise ValueError()
if api is not None:
if name is None:
logger.error("need a name")
raise ValueError()
mov = api.new_mov(name)
mov.open()
if mov is not None:
mov._check_open()
# find in the collection
try:
logger.debug(len(Glob().theCollector.collection))
pip = Glob().theCollector.collection['pip']
if name is not None:
pip_res = pip[name]
elif mov is not None:
pip_res = pip[mov.product]
logger.debug("pip found in the collection")
return pip_res
except KeyError:
logger.debug("pip not found in the collection")
# ~ vars
records = []
intervals = [10, 20, 30]
def _check_price(interval=10):
timeout = time.time() + interval
while time.time() < timeout:
records.append(mov.get_price())
time.sleep(0.5)
# find variation
for interval in intervals:
_check_price(interval)
if min(records) == max(records):
logger.debug("no variation in %d seconds" % interval)
if interval == intervals[-1]:
raise TimeoutError("no variation")
else:
break
# find longer price
for price in records:
if 'best_price' not in locals():
best_price = price
if len(str(price)) > len(str(best_price)):
logger.debug("found new best_price %f" % price)
best_price = price
# get pip
pip = get_number_unit(best_price)
Glob().pipHandler.add_val({mov.product: pip})
return pip | get value of pip | Below is the the instruction that describes the task:
### Input:
get value of pip
### Response:
def get_pip(mov=None, api=None, name=None):
"""get value of pip"""
# ~ check args
if mov is None and api is None:
logger.error("need at least one of those")
raise ValueError()
elif mov is not None and api is not None:
logger.error("mov and api are exclusive")
raise ValueError()
if api is not None:
if name is None:
logger.error("need a name")
raise ValueError()
mov = api.new_mov(name)
mov.open()
if mov is not None:
mov._check_open()
# find in the collection
try:
logger.debug(len(Glob().theCollector.collection))
pip = Glob().theCollector.collection['pip']
if name is not None:
pip_res = pip[name]
elif mov is not None:
pip_res = pip[mov.product]
logger.debug("pip found in the collection")
return pip_res
except KeyError:
logger.debug("pip not found in the collection")
# ~ vars
records = []
intervals = [10, 20, 30]
def _check_price(interval=10):
timeout = time.time() + interval
while time.time() < timeout:
records.append(mov.get_price())
time.sleep(0.5)
# find variation
for interval in intervals:
_check_price(interval)
if min(records) == max(records):
logger.debug("no variation in %d seconds" % interval)
if interval == intervals[-1]:
raise TimeoutError("no variation")
else:
break
# find longer price
for price in records:
if 'best_price' not in locals():
best_price = price
if len(str(price)) > len(str(best_price)):
logger.debug("found new best_price %f" % price)
best_price = price
# get pip
pip = get_number_unit(best_price)
Glob().pipHandler.add_val({mov.product: pip})
return pip |
def _dict_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
if _debug: Object._debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class)
# make/extend the dictionary of content
if use_dict is None:
use_dict = as_class()
klasses = list(self.__class__.__mro__)
klasses.reverse()
# build a list of property identifiers "bottom up"
property_names = []
properties_seen = set()
for c in klasses:
for prop in getattr(c, 'properties', []):
if prop.identifier not in properties_seen:
property_names.append(prop.identifier)
properties_seen.add(prop.identifier)
# extract the values
for property_name in property_names:
# get the value
property_value = self._properties.get(property_name).ReadProperty(self)
if property_value is None:
continue
# if the value has a way to convert it to a dict, use it
if hasattr(property_value, "dict_contents"):
property_value = property_value.dict_contents(as_class=as_class)
# save the value
use_dict.__setitem__(property_name, property_value)
# return what we built/updated
return use_dict | Return the contents of an object as a dict. | Below is the the instruction that describes the task:
### Input:
Return the contents of an object as a dict.
### Response:
def _dict_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
if _debug: Object._debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class)
# make/extend the dictionary of content
if use_dict is None:
use_dict = as_class()
klasses = list(self.__class__.__mro__)
klasses.reverse()
# build a list of property identifiers "bottom up"
property_names = []
properties_seen = set()
for c in klasses:
for prop in getattr(c, 'properties', []):
if prop.identifier not in properties_seen:
property_names.append(prop.identifier)
properties_seen.add(prop.identifier)
# extract the values
for property_name in property_names:
# get the value
property_value = self._properties.get(property_name).ReadProperty(self)
if property_value is None:
continue
# if the value has a way to convert it to a dict, use it
if hasattr(property_value, "dict_contents"):
property_value = property_value.dict_contents(as_class=as_class)
# save the value
use_dict.__setitem__(property_name, property_value)
# return what we built/updated
return use_dict |
def strip_docstrings(tokens):
"""Replace docstring tokens with NL tokens in a `tokenize` stream.
Any STRING token not part of an expression is deemed a docstring.
Indented docstrings are not yet recognised.
"""
stack = []
state = 'wait_string'
for t in tokens:
typ = t[0]
if state == 'wait_string':
if typ in (tokenize.NL, tokenize.COMMENT):
yield t
elif typ in (tokenize.DEDENT, tokenize.INDENT, tokenize.STRING):
stack.append(t)
elif typ == tokenize.NEWLINE:
stack.append(t)
start_line, end_line = stack[0][2][0], stack[-1][3][0]+1
for i in range(start_line, end_line):
yield tokenize.NL, '\n', (i, 0), (i,1), '\n'
for t in stack:
if t[0] in (tokenize.DEDENT, tokenize.INDENT):
yield t[0], t[1], (i+1, t[2][1]), (i+1, t[3][1]), t[4]
del stack[:]
else:
stack.append(t)
for t in stack: yield t
del stack[:]
state = 'wait_newline'
elif state == 'wait_newline':
if typ == tokenize.NEWLINE:
state = 'wait_string'
yield t | Replace docstring tokens with NL tokens in a `tokenize` stream.
Any STRING token not part of an expression is deemed a docstring.
Indented docstrings are not yet recognised. | Below is the the instruction that describes the task:
### Input:
Replace docstring tokens with NL tokens in a `tokenize` stream.
Any STRING token not part of an expression is deemed a docstring.
Indented docstrings are not yet recognised.
### Response:
def strip_docstrings(tokens):
"""Replace docstring tokens with NL tokens in a `tokenize` stream.
Any STRING token not part of an expression is deemed a docstring.
Indented docstrings are not yet recognised.
"""
stack = []
state = 'wait_string'
for t in tokens:
typ = t[0]
if state == 'wait_string':
if typ in (tokenize.NL, tokenize.COMMENT):
yield t
elif typ in (tokenize.DEDENT, tokenize.INDENT, tokenize.STRING):
stack.append(t)
elif typ == tokenize.NEWLINE:
stack.append(t)
start_line, end_line = stack[0][2][0], stack[-1][3][0]+1
for i in range(start_line, end_line):
yield tokenize.NL, '\n', (i, 0), (i,1), '\n'
for t in stack:
if t[0] in (tokenize.DEDENT, tokenize.INDENT):
yield t[0], t[1], (i+1, t[2][1]), (i+1, t[3][1]), t[4]
del stack[:]
else:
stack.append(t)
for t in stack: yield t
del stack[:]
state = 'wait_newline'
elif state == 'wait_newline':
if typ == tokenize.NEWLINE:
state = 'wait_string'
yield t |
def get_brandings(self):
"""
Get all account brandings
@return List of brandings
"""
connection = Connection(self.token)
connection.set_url(self.production, self.BRANDINGS_URL)
return connection.get_request() | Get all account brandings
@return List of brandings | Below is the the instruction that describes the task:
### Input:
Get all account brandings
@return List of brandings
### Response:
def get_brandings(self):
"""
Get all account brandings
@return List of brandings
"""
connection = Connection(self.token)
connection.set_url(self.production, self.BRANDINGS_URL)
return connection.get_request() |
def CopyVcardFields(new_vcard, auth_vcard, field_names):
"""Copy vCard field values from an authoritative vCard into a new one."""
for field in field_names:
value_list = auth_vcard.contents.get(field)
new_vcard = SetVcardField(new_vcard, field, value_list)
return new_vcard | Copy vCard field values from an authoritative vCard into a new one. | Below is the the instruction that describes the task:
### Input:
Copy vCard field values from an authoritative vCard into a new one.
### Response:
def CopyVcardFields(new_vcard, auth_vcard, field_names):
"""Copy vCard field values from an authoritative vCard into a new one."""
for field in field_names:
value_list = auth_vcard.contents.get(field)
new_vcard = SetVcardField(new_vcard, field, value_list)
return new_vcard |
def F_(self, X):
"""
computes h()
:param X:
:return:
"""
if self._interpol:
if not hasattr(self, '_F_interp'):
if self._lookup:
x = self._x_lookup
F_x = self._f_lookup
else:
x = np.linspace(0, self._max_interp_X, self._num_interp_X)
F_x = self._F(x)
self._F_interp = interp.interp1d(x, F_x, kind='linear', axis=-1, copy=False, bounds_error=False,
fill_value=0, assume_sorted=True)
return self._F_interp(X)
else:
return self._F(X) | computes h()
:param X:
:return: | Below is the the instruction that describes the task:
### Input:
computes h()
:param X:
:return:
### Response:
def F_(self, X):
"""
computes h()
:param X:
:return:
"""
if self._interpol:
if not hasattr(self, '_F_interp'):
if self._lookup:
x = self._x_lookup
F_x = self._f_lookup
else:
x = np.linspace(0, self._max_interp_X, self._num_interp_X)
F_x = self._F(x)
self._F_interp = interp.interp1d(x, F_x, kind='linear', axis=-1, copy=False, bounds_error=False,
fill_value=0, assume_sorted=True)
return self._F_interp(X)
else:
return self._F(X) |
def write_info (self, url_data):
"""Write url_data.info."""
sep = u"<br/>"+os.linesep
text = sep.join(cgi.escape(x) for x in url_data.info)
self.writeln(u'<tr><td valign="top">' + self.part("info")+
u"</td><td>"+text+u"</td></tr>") | Write url_data.info. | Below is the the instruction that describes the task:
### Input:
Write url_data.info.
### Response:
def write_info (self, url_data):
"""Write url_data.info."""
sep = u"<br/>"+os.linesep
text = sep.join(cgi.escape(x) for x in url_data.info)
self.writeln(u'<tr><td valign="top">' + self.part("info")+
u"</td><td>"+text+u"</td></tr>") |
def status(deps=DEPENDENCIES, linesep=os.linesep):
"""Return a status of dependencies"""
maxwidth = 0
col1 = []
col2 = []
for dependency in deps:
title1 = dependency.modname
title1 += ' ' + dependency.required_version
col1.append(title1)
maxwidth = max([maxwidth, len(title1)])
col2.append(dependency.get_installed_version())
text = ""
for index in range(len(deps)):
text += col1[index].ljust(maxwidth) + ': ' + col2[index] + linesep
return text[:-1] | Return a status of dependencies | Below is the the instruction that describes the task:
### Input:
Return a status of dependencies
### Response:
def status(deps=DEPENDENCIES, linesep=os.linesep):
"""Return a status of dependencies"""
maxwidth = 0
col1 = []
col2 = []
for dependency in deps:
title1 = dependency.modname
title1 += ' ' + dependency.required_version
col1.append(title1)
maxwidth = max([maxwidth, len(title1)])
col2.append(dependency.get_installed_version())
text = ""
for index in range(len(deps)):
text += col1[index].ljust(maxwidth) + ': ' + col2[index] + linesep
return text[:-1] |
Subsets and Splits