code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def decoration(self):
""" The displayed icon.
Shows open icon when node was visited (children are fetched). This allows users
for instance to collapse a directory node but still see that it was visited, which
may be useful if there is a huge list of directories.
"""
rtiIconFactory = RtiIconFactory.singleton()
if self._exception:
return rtiIconFactory.getIcon(rtiIconFactory.ERROR, isOpen=False,
color=rtiIconFactory.COLOR_ERROR)
else:
return rtiIconFactory.getIcon(self.iconGlyph, isOpen=not self.canFetchChildren(),
color=self.iconColor) | The displayed icon.
Shows open icon when node was visited (children are fetched). This allows users
for instance to collapse a directory node but still see that it was visited, which
may be useful if there is a huge list of directories. | Below is the the instruction that describes the task:
### Input:
The displayed icon.
Shows open icon when node was visited (children are fetched). This allows users
for instance to collapse a directory node but still see that it was visited, which
may be useful if there is a huge list of directories.
### Response:
def decoration(self):
""" The displayed icon.
Shows open icon when node was visited (children are fetched). This allows users
for instance to collapse a directory node but still see that it was visited, which
may be useful if there is a huge list of directories.
"""
rtiIconFactory = RtiIconFactory.singleton()
if self._exception:
return rtiIconFactory.getIcon(rtiIconFactory.ERROR, isOpen=False,
color=rtiIconFactory.COLOR_ERROR)
else:
return rtiIconFactory.getIcon(self.iconGlyph, isOpen=not self.canFetchChildren(),
color=self.iconColor) |
def find_actual_caller(self):
"""
Returns the full-qualified module name, full pathname, line number, and
function in which `StreamTeeLogger.write()` was called. For example,
if this instance is used to replace `sys.stdout`, this will return the
location of any print statement.
"""
# Gleaned from code in the logging module itself...
try:
f = sys._getframe(1)
##f = inspect.currentframe(1)
except Exception:
f = None
# On some versions of IronPython, currentframe() returns None if
# IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown module)", "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
mod = inspect.getmodule(f)
if mod is None:
modname = '__main__'
else:
modname = mod.__name__
if modname == __name__:
# Crawl back until the first frame outside of this module
f = f.f_back
continue
rv = (modname, filename, f.f_lineno, co.co_name)
break
return rv | Returns the full-qualified module name, full pathname, line number, and
function in which `StreamTeeLogger.write()` was called. For example,
if this instance is used to replace `sys.stdout`, this will return the
location of any print statement. | Below is the the instruction that describes the task:
### Input:
Returns the full-qualified module name, full pathname, line number, and
function in which `StreamTeeLogger.write()` was called. For example,
if this instance is used to replace `sys.stdout`, this will return the
location of any print statement.
### Response:
def find_actual_caller(self):
"""
Returns the full-qualified module name, full pathname, line number, and
function in which `StreamTeeLogger.write()` was called. For example,
if this instance is used to replace `sys.stdout`, this will return the
location of any print statement.
"""
# Gleaned from code in the logging module itself...
try:
f = sys._getframe(1)
##f = inspect.currentframe(1)
except Exception:
f = None
# On some versions of IronPython, currentframe() returns None if
# IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown module)", "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
mod = inspect.getmodule(f)
if mod is None:
modname = '__main__'
else:
modname = mod.__name__
if modname == __name__:
# Crawl back until the first frame outside of this module
f = f.f_back
continue
rv = (modname, filename, f.f_lineno, co.co_name)
break
return rv |
def add_page(self, slug):
'''
Add new page.
'''
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
if MWiki.get_by_uid(slug):
self.set_status(400)
return False
else:
MWiki.create_page(slug, post_data)
tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh)
self.redirect('/page/{0}'.format(slug)) | Add new page. | Below is the the instruction that describes the task:
### Input:
Add new page.
### Response:
def add_page(self, slug):
'''
Add new page.
'''
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
if MWiki.get_by_uid(slug):
self.set_status(400)
return False
else:
MWiki.create_page(slug, post_data)
tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh)
self.redirect('/page/{0}'.format(slug)) |
def _update_segmentation_mask_if_overlap(toupdate, other, id, otherid):
"""
Merges the segments specified by `id` (found in `toupdate`) and `otherid`
(found in `other`) if they overlap at all. Updates `toupdate` accordingly.
"""
# If there is any overlap or touching, merge the two, otherwise just return
yourmask = other == otherid
mymask = toupdate == id
overlap_exists = np.any(yourmask & mymask)
if not overlap_exists:
return
yourfidxs, yoursidxs = np.where(other == otherid)
toupdate[yourfidxs, yoursidxs] = id | Merges the segments specified by `id` (found in `toupdate`) and `otherid`
(found in `other`) if they overlap at all. Updates `toupdate` accordingly. | Below is the the instruction that describes the task:
### Input:
Merges the segments specified by `id` (found in `toupdate`) and `otherid`
(found in `other`) if they overlap at all. Updates `toupdate` accordingly.
### Response:
def _update_segmentation_mask_if_overlap(toupdate, other, id, otherid):
"""
Merges the segments specified by `id` (found in `toupdate`) and `otherid`
(found in `other`) if they overlap at all. Updates `toupdate` accordingly.
"""
# If there is any overlap or touching, merge the two, otherwise just return
yourmask = other == otherid
mymask = toupdate == id
overlap_exists = np.any(yourmask & mymask)
if not overlap_exists:
return
yourfidxs, yoursidxs = np.where(other == otherid)
toupdate[yourfidxs, yoursidxs] = id |
def _group_kwargs_to_options(cls, obj, kwargs):
"Format option group kwargs into canonical options format"
groups = Options._option_groups
if set(kwargs.keys()) - set(groups):
raise Exception("Keyword options %s must be one of %s" % (groups,
','.join(repr(g) for g in groups)))
elif not all(isinstance(v, dict) for v in kwargs.values()):
raise Exception("The %s options must be specified using dictionary groups" %
','.join(repr(k) for k in kwargs.keys()))
# Check whether the user is specifying targets (such as 'Image.Foo')
targets = [grp and all(k[0].isupper() for k in grp) for grp in kwargs.values()]
if any(targets) and not all(targets):
raise Exception("Cannot mix target specification keys such as 'Image' with non-target keywords.")
elif not any(targets):
# Not targets specified - add current object as target
sanitized_group = util.group_sanitizer(obj.group)
if obj.label:
identifier = ('%s.%s.%s' % (
obj.__class__.__name__, sanitized_group,
util.label_sanitizer(obj.label)))
elif sanitized_group != obj.__class__.__name__:
identifier = '%s.%s' % (obj.__class__.__name__, sanitized_group)
else:
identifier = obj.__class__.__name__
options = {identifier:{grp:kws for (grp,kws) in kwargs.items()}}
else:
dfltdict = defaultdict(dict)
for grp, entries in kwargs.items():
for identifier, kws in entries.items():
dfltdict[identifier][grp] = kws
options = dict(dfltdict)
return options | Format option group kwargs into canonical options format | Below is the the instruction that describes the task:
### Input:
Format option group kwargs into canonical options format
### Response:
def _group_kwargs_to_options(cls, obj, kwargs):
"Format option group kwargs into canonical options format"
groups = Options._option_groups
if set(kwargs.keys()) - set(groups):
raise Exception("Keyword options %s must be one of %s" % (groups,
','.join(repr(g) for g in groups)))
elif not all(isinstance(v, dict) for v in kwargs.values()):
raise Exception("The %s options must be specified using dictionary groups" %
','.join(repr(k) for k in kwargs.keys()))
# Check whether the user is specifying targets (such as 'Image.Foo')
targets = [grp and all(k[0].isupper() for k in grp) for grp in kwargs.values()]
if any(targets) and not all(targets):
raise Exception("Cannot mix target specification keys such as 'Image' with non-target keywords.")
elif not any(targets):
# Not targets specified - add current object as target
sanitized_group = util.group_sanitizer(obj.group)
if obj.label:
identifier = ('%s.%s.%s' % (
obj.__class__.__name__, sanitized_group,
util.label_sanitizer(obj.label)))
elif sanitized_group != obj.__class__.__name__:
identifier = '%s.%s' % (obj.__class__.__name__, sanitized_group)
else:
identifier = obj.__class__.__name__
options = {identifier:{grp:kws for (grp,kws) in kwargs.items()}}
else:
dfltdict = defaultdict(dict)
for grp, entries in kwargs.items():
for identifier, kws in entries.items():
dfltdict[identifier][grp] = kws
options = dict(dfltdict)
return options |
def _connect_mitogen_su(spec):
"""
Return ContextService arguments for su as a first class connection.
"""
return {
'method': 'su',
'kwargs': {
'username': spec.remote_user(),
'password': spec.password(),
'python_path': spec.python_path(),
'su_path': spec.become_exe(),
'connect_timeout': spec.timeout(),
'remote_name': get_remote_name(spec),
}
} | Return ContextService arguments for su as a first class connection. | Below is the the instruction that describes the task:
### Input:
Return ContextService arguments for su as a first class connection.
### Response:
def _connect_mitogen_su(spec):
"""
Return ContextService arguments for su as a first class connection.
"""
return {
'method': 'su',
'kwargs': {
'username': spec.remote_user(),
'password': spec.password(),
'python_path': spec.python_path(),
'su_path': spec.become_exe(),
'connect_timeout': spec.timeout(),
'remote_name': get_remote_name(spec),
}
} |
def connect(self, taskspec):
"""
Connect the *following* task to this one. In other words, the
given task is added as an output task.
:type taskspec: TaskSpec
:param taskspec: The new output task.
"""
self.outputs.append(taskspec)
taskspec._connect_notify(self) | Connect the *following* task to this one. In other words, the
given task is added as an output task.
:type taskspec: TaskSpec
:param taskspec: The new output task. | Below is the the instruction that describes the task:
### Input:
Connect the *following* task to this one. In other words, the
given task is added as an output task.
:type taskspec: TaskSpec
:param taskspec: The new output task.
### Response:
def connect(self, taskspec):
"""
Connect the *following* task to this one. In other words, the
given task is added as an output task.
:type taskspec: TaskSpec
:param taskspec: The new output task.
"""
self.outputs.append(taskspec)
taskspec._connect_notify(self) |
def makeMigrator(context, portal_type, remove_old_value=True):
""" generate a migrator for the given at-based portal type """
meta_type = portal_type
class BlobMigrator(BaseInlineMigrator):
"""in-place migrator for archetypes based content that copies
file/image data from old non-blob fields to new fields with the same
name provided by archetypes.schemaextender.
see `plone3 to 4 migration guide`__
.. __: https://plone.org/documentation/manual/upgrade-guide/version
/upgrading-plone-3-x-to-4.0/updating-add-on-products-for-plone-4.0
/use-plone.app.blob-based-blob-storage
"""
src_portal_type = portal_type
src_meta_type = meta_type
dst_portal_type = portal_type
dst_meta_type = meta_type
fields = []
def getFields(self, obj):
if not self.fields:
# get the blob fields to migrate from the first object
for field in ISchema(obj).fields():
if IBlobField.providedBy(field):
self.fields.append(field.getName())
return self.fields
@property
def fields_map(self):
fields = self.getFields(None)
return dict([(name, None) for name in fields])
def migrate_data(self):
fields = self.getFields(self.obj)
for name in fields:
# access old field by not using schemaextender
oldfield = self.obj.schema[name]
is_imagefield = False
if hasattr(oldfield, 'removeScales'):
# clean up old image scales
is_imagefield = True
oldfield.removeScales(self.obj)
value = oldfield.get(self.obj)
if not value:
# no image/file data: don't copy it over to blob field
# this way it's save to run migration multiple times w/o
# overwriting existing data
continue
if isinstance(aq_base(value), BlobWrapper):
# already a blob field, no need to migrate it
continue
# access new field via schemaextender
field = self.obj.getField(name)
field.getMutator(self.obj)(value)
if remove_old_value:
# Remove data from old field to not end up with data
# stored twice - in ZODB and blobstorage
if is_imagefield:
oldfield.set(self.obj, 'DELETE_IMAGE')
else:
oldfield.set(self.obj, 'DELETE_FILE')
def last_migrate_reindex(self):
# The original method checks the modification date in order to
# keep the old one, but we don't care about it.
self.obj.reindexObject()
return BlobMigrator | generate a migrator for the given at-based portal type | Below is the the instruction that describes the task:
### Input:
generate a migrator for the given at-based portal type
### Response:
def makeMigrator(context, portal_type, remove_old_value=True):
""" generate a migrator for the given at-based portal type """
meta_type = portal_type
class BlobMigrator(BaseInlineMigrator):
"""in-place migrator for archetypes based content that copies
file/image data from old non-blob fields to new fields with the same
name provided by archetypes.schemaextender.
see `plone3 to 4 migration guide`__
.. __: https://plone.org/documentation/manual/upgrade-guide/version
/upgrading-plone-3-x-to-4.0/updating-add-on-products-for-plone-4.0
/use-plone.app.blob-based-blob-storage
"""
src_portal_type = portal_type
src_meta_type = meta_type
dst_portal_type = portal_type
dst_meta_type = meta_type
fields = []
def getFields(self, obj):
if not self.fields:
# get the blob fields to migrate from the first object
for field in ISchema(obj).fields():
if IBlobField.providedBy(field):
self.fields.append(field.getName())
return self.fields
@property
def fields_map(self):
fields = self.getFields(None)
return dict([(name, None) for name in fields])
def migrate_data(self):
fields = self.getFields(self.obj)
for name in fields:
# access old field by not using schemaextender
oldfield = self.obj.schema[name]
is_imagefield = False
if hasattr(oldfield, 'removeScales'):
# clean up old image scales
is_imagefield = True
oldfield.removeScales(self.obj)
value = oldfield.get(self.obj)
if not value:
# no image/file data: don't copy it over to blob field
# this way it's save to run migration multiple times w/o
# overwriting existing data
continue
if isinstance(aq_base(value), BlobWrapper):
# already a blob field, no need to migrate it
continue
# access new field via schemaextender
field = self.obj.getField(name)
field.getMutator(self.obj)(value)
if remove_old_value:
# Remove data from old field to not end up with data
# stored twice - in ZODB and blobstorage
if is_imagefield:
oldfield.set(self.obj, 'DELETE_IMAGE')
else:
oldfield.set(self.obj, 'DELETE_FILE')
def last_migrate_reindex(self):
# The original method checks the modification date in order to
# keep the old one, but we don't care about it.
self.obj.reindexObject()
return BlobMigrator |
def top2_full(votes):
"""
Description:
Top 2 alternatives 16 moment conditions values calculation
Parameters:
votes: ordinal preference data (numpy ndarray of integers)
"""
res = np.zeros(16)
for vote in votes:
# the top ranked alternative is in vote[0][0], second in vote[1][0]
if vote[0][0] == 0: # i.e. the first alt is ranked first
res[0] += 1
if vote[1][0] == 1: # i.e. the second alt is ranked second
res[4] += 1
elif vote[1][0] == 2:
res[5] += 1
elif vote[1][0] == 3:
res[6] += 1
elif vote[0][0] == 1:
res[1] += 1
if vote[1][0] == 0:
res[7] += 1
elif vote[1][0] == 2:
res[8] += 1
elif vote[1][0] == 3:
res[9] += 1
elif vote[0][0] == 2:
res[2] += 1
if vote[1][0] == 0:
res[10] += 1
elif vote[1][0] == 1:
res[11] += 1
elif vote[1][0] == 3:
res[12] += 1
elif vote[0][0] == 3:
res[3] += 1
if vote[1][0] == 0:
res[13] += 1
elif vote[1][0] == 1:
res[14] += 1
elif vote[1][0] == 2:
res[15] += 1
res /= len(votes)
return res | Description:
Top 2 alternatives 16 moment conditions values calculation
Parameters:
votes: ordinal preference data (numpy ndarray of integers) | Below is the the instruction that describes the task:
### Input:
Description:
Top 2 alternatives 16 moment conditions values calculation
Parameters:
votes: ordinal preference data (numpy ndarray of integers)
### Response:
def top2_full(votes):
"""
Description:
Top 2 alternatives 16 moment conditions values calculation
Parameters:
votes: ordinal preference data (numpy ndarray of integers)
"""
res = np.zeros(16)
for vote in votes:
# the top ranked alternative is in vote[0][0], second in vote[1][0]
if vote[0][0] == 0: # i.e. the first alt is ranked first
res[0] += 1
if vote[1][0] == 1: # i.e. the second alt is ranked second
res[4] += 1
elif vote[1][0] == 2:
res[5] += 1
elif vote[1][0] == 3:
res[6] += 1
elif vote[0][0] == 1:
res[1] += 1
if vote[1][0] == 0:
res[7] += 1
elif vote[1][0] == 2:
res[8] += 1
elif vote[1][0] == 3:
res[9] += 1
elif vote[0][0] == 2:
res[2] += 1
if vote[1][0] == 0:
res[10] += 1
elif vote[1][0] == 1:
res[11] += 1
elif vote[1][0] == 3:
res[12] += 1
elif vote[0][0] == 3:
res[3] += 1
if vote[1][0] == 0:
res[13] += 1
elif vote[1][0] == 1:
res[14] += 1
elif vote[1][0] == 2:
res[15] += 1
res /= len(votes)
return res |
def deserialize_frame(stream, header, verifier=None):
"""Deserializes a frame from a body.
:param stream: Source data stream
:type stream: io.BytesIO
:param header: Deserialized header
:type header: aws_encryption_sdk.structures.MessageHeader
:param verifier: Signature verifier object (optional)
:type verifier: aws_encryption_sdk.internal.crypto.Verifier
:returns: Deserialized frame and a boolean stating if this is the final frame
:rtype: :class:`aws_encryption_sdk.internal.structures.MessageFrameBody` and bool
"""
_LOGGER.debug("Starting frame deserialization")
frame_data = {}
final_frame = False
(sequence_number,) = unpack_values(">I", stream, verifier)
if sequence_number == SequenceIdentifier.SEQUENCE_NUMBER_END.value:
_LOGGER.debug("Deserializing final frame")
(sequence_number,) = unpack_values(">I", stream, verifier)
final_frame = True
else:
_LOGGER.debug("Deserializing frame sequence number %d", int(sequence_number))
frame_data["final_frame"] = final_frame
frame_data["sequence_number"] = sequence_number
(frame_iv,) = unpack_values(">{iv_len}s".format(iv_len=header.algorithm.iv_len), stream, verifier)
frame_data["iv"] = frame_iv
if final_frame is True:
(content_length,) = unpack_values(">I", stream, verifier)
if content_length >= header.frame_length:
raise SerializationError(
"Invalid final frame length: {final} >= {normal}".format(
final=content_length, normal=header.frame_length
)
)
else:
content_length = header.frame_length
(frame_content, frame_tag) = unpack_values(
">{content_len}s{auth_len}s".format(content_len=content_length, auth_len=header.algorithm.auth_len),
stream,
verifier,
)
frame_data["ciphertext"] = frame_content
frame_data["tag"] = frame_tag
return MessageFrameBody(**frame_data), final_frame | Deserializes a frame from a body.
:param stream: Source data stream
:type stream: io.BytesIO
:param header: Deserialized header
:type header: aws_encryption_sdk.structures.MessageHeader
:param verifier: Signature verifier object (optional)
:type verifier: aws_encryption_sdk.internal.crypto.Verifier
:returns: Deserialized frame and a boolean stating if this is the final frame
:rtype: :class:`aws_encryption_sdk.internal.structures.MessageFrameBody` and bool | Below is the the instruction that describes the task:
### Input:
Deserializes a frame from a body.
:param stream: Source data stream
:type stream: io.BytesIO
:param header: Deserialized header
:type header: aws_encryption_sdk.structures.MessageHeader
:param verifier: Signature verifier object (optional)
:type verifier: aws_encryption_sdk.internal.crypto.Verifier
:returns: Deserialized frame and a boolean stating if this is the final frame
:rtype: :class:`aws_encryption_sdk.internal.structures.MessageFrameBody` and bool
### Response:
def deserialize_frame(stream, header, verifier=None):
"""Deserializes a frame from a body.
:param stream: Source data stream
:type stream: io.BytesIO
:param header: Deserialized header
:type header: aws_encryption_sdk.structures.MessageHeader
:param verifier: Signature verifier object (optional)
:type verifier: aws_encryption_sdk.internal.crypto.Verifier
:returns: Deserialized frame and a boolean stating if this is the final frame
:rtype: :class:`aws_encryption_sdk.internal.structures.MessageFrameBody` and bool
"""
_LOGGER.debug("Starting frame deserialization")
frame_data = {}
final_frame = False
(sequence_number,) = unpack_values(">I", stream, verifier)
if sequence_number == SequenceIdentifier.SEQUENCE_NUMBER_END.value:
_LOGGER.debug("Deserializing final frame")
(sequence_number,) = unpack_values(">I", stream, verifier)
final_frame = True
else:
_LOGGER.debug("Deserializing frame sequence number %d", int(sequence_number))
frame_data["final_frame"] = final_frame
frame_data["sequence_number"] = sequence_number
(frame_iv,) = unpack_values(">{iv_len}s".format(iv_len=header.algorithm.iv_len), stream, verifier)
frame_data["iv"] = frame_iv
if final_frame is True:
(content_length,) = unpack_values(">I", stream, verifier)
if content_length >= header.frame_length:
raise SerializationError(
"Invalid final frame length: {final} >= {normal}".format(
final=content_length, normal=header.frame_length
)
)
else:
content_length = header.frame_length
(frame_content, frame_tag) = unpack_values(
">{content_len}s{auth_len}s".format(content_len=content_length, auth_len=header.algorithm.auth_len),
stream,
verifier,
)
frame_data["ciphertext"] = frame_content
frame_data["tag"] = frame_tag
return MessageFrameBody(**frame_data), final_frame |
def install(
engine,
n_creatures=5,
n_sickles=3,
malaria_chance=.05,
mate_chance=.05,
mapsize=(1, 1),
startpos=(0, 0)
):
"""Natural Selection on Sickle Cell Anemia
If anyone carries a pair of sickle betaglobin genes, they die of
sickle cell anemia.
Individuals with 1x betaglobin, 1x sickle betaglobin are immune to
malaria.
"""
initmap = nx.grid_2d_graph(*mapsize)
phys = engine.new_character("physical", data=initmap)
species = engine.new_character(
"species",
mate_chance=mate_chance,
malaria_chance=malaria_chance,
n_creatures=n_creatures,
)
for n in range(0, n_creatures):
name = "critter" + str(n)
phys.add_thing(
name=name,
location=startpos,
sickle_a=(n < n_sickles),
sickle_b=False,
male=engine.coinflip(),
last_mate_turn=-1
)
assert name in phys.thing
assert name not in phys.place
assert name in phys.node, "couldn't add node {} to phys.node".format(name)
assert hasattr(phys.node[name], 'location')
species.add_avatar("physical", name)
assert hasattr(species.avatar['physical'][name], 'location')
# putting dieoff earlier in the code than mate means that dieoff will
# be followed before mate is
@species.avatar.rule
def dieoff(critter):
critter.delete()
assert (critter.name not in critter.character.node)
if critter['from_malaria']:
return 'malaria'
else:
return 'anemia'
@species.avatar.rule
def mate(critter):
"""If I share my location with another critter, attempt to mate"""
suitors = list(
oc for oc in critter.location.contents()
if oc['male'] != critter['male']
)
assert (len(suitors) > 0)
other_critter = critter.engine.choice(suitors)
sickles = [
critter['sickle_a'],
critter['sickle_b'],
other_critter['sickle_a'],
other_critter['sickle_b']
]
engine.shuffle(sickles)
name = "critter" + str(species.stat["n_creatures"])
species.stat["n_creatures"] += 1
engine.character["physical"].add_thing(
name,
critter["location"],
sickle_a=sickles.pop(),
sickle_b=sickles.pop(),
male=engine.coinflip(),
last_mate_turn=engine.turn
)
species.add_avatar("physical", name)
critter['last_mate_turn'] = other_critter['last_mate_turn'] = \
engine.turn
return 'mated'
@mate.prereq
def once_per_turn(critter):
return critter['last_mate_turn'] < critter.engine.turn
@mate.prereq
def mate_present(critter):
for oc in critter.location.contents():
if oc['male'] != critter['male']:
return True
return False
@mate.trigger
def in_the_mood(critter):
return critter.engine.random() < critter.user.stat['mate_chance']
@dieoff.trigger
def sickle2(critter):
r = critter['sickle_a'] and critter['sickle_b']
if r:
critter['from_malaria'] = False
return r
@dieoff.trigger
def malaria(critter):
r = (
critter.engine.random() < critter.user.stat['malaria_chance'] and not
(critter['sickle_a'] or critter['sickle_b'])
)
if r:
critter['from_malaria'] = True
return r
# it would make more sense to keep using species.avatar.rule, this
# is just a test
@phys.thing.rule
def wander(critter):
dests = list(critter.character.place.keys())
dests.remove(critter['location'])
dest = critter.engine.choice(dests)
critter.travel_to(dest)
@wander.trigger
def not_travelling(critter):
return critter.next_location is None
@wander.prereq
def big_map(critter):
return len(critter.character.place) > 1 | Natural Selection on Sickle Cell Anemia
If anyone carries a pair of sickle betaglobin genes, they die of
sickle cell anemia.
Individuals with 1x betaglobin, 1x sickle betaglobin are immune to
malaria. | Below is the the instruction that describes the task:
### Input:
Natural Selection on Sickle Cell Anemia
If anyone carries a pair of sickle betaglobin genes, they die of
sickle cell anemia.
Individuals with 1x betaglobin, 1x sickle betaglobin are immune to
malaria.
### Response:
def install(
engine,
n_creatures=5,
n_sickles=3,
malaria_chance=.05,
mate_chance=.05,
mapsize=(1, 1),
startpos=(0, 0)
):
"""Natural Selection on Sickle Cell Anemia
If anyone carries a pair of sickle betaglobin genes, they die of
sickle cell anemia.
Individuals with 1x betaglobin, 1x sickle betaglobin are immune to
malaria.
"""
initmap = nx.grid_2d_graph(*mapsize)
phys = engine.new_character("physical", data=initmap)
species = engine.new_character(
"species",
mate_chance=mate_chance,
malaria_chance=malaria_chance,
n_creatures=n_creatures,
)
for n in range(0, n_creatures):
name = "critter" + str(n)
phys.add_thing(
name=name,
location=startpos,
sickle_a=(n < n_sickles),
sickle_b=False,
male=engine.coinflip(),
last_mate_turn=-1
)
assert name in phys.thing
assert name not in phys.place
assert name in phys.node, "couldn't add node {} to phys.node".format(name)
assert hasattr(phys.node[name], 'location')
species.add_avatar("physical", name)
assert hasattr(species.avatar['physical'][name], 'location')
# putting dieoff earlier in the code than mate means that dieoff will
# be followed before mate is
@species.avatar.rule
def dieoff(critter):
critter.delete()
assert (critter.name not in critter.character.node)
if critter['from_malaria']:
return 'malaria'
else:
return 'anemia'
@species.avatar.rule
def mate(critter):
"""If I share my location with another critter, attempt to mate"""
suitors = list(
oc for oc in critter.location.contents()
if oc['male'] != critter['male']
)
assert (len(suitors) > 0)
other_critter = critter.engine.choice(suitors)
sickles = [
critter['sickle_a'],
critter['sickle_b'],
other_critter['sickle_a'],
other_critter['sickle_b']
]
engine.shuffle(sickles)
name = "critter" + str(species.stat["n_creatures"])
species.stat["n_creatures"] += 1
engine.character["physical"].add_thing(
name,
critter["location"],
sickle_a=sickles.pop(),
sickle_b=sickles.pop(),
male=engine.coinflip(),
last_mate_turn=engine.turn
)
species.add_avatar("physical", name)
critter['last_mate_turn'] = other_critter['last_mate_turn'] = \
engine.turn
return 'mated'
@mate.prereq
def once_per_turn(critter):
return critter['last_mate_turn'] < critter.engine.turn
@mate.prereq
def mate_present(critter):
for oc in critter.location.contents():
if oc['male'] != critter['male']:
return True
return False
@mate.trigger
def in_the_mood(critter):
return critter.engine.random() < critter.user.stat['mate_chance']
@dieoff.trigger
def sickle2(critter):
r = critter['sickle_a'] and critter['sickle_b']
if r:
critter['from_malaria'] = False
return r
@dieoff.trigger
def malaria(critter):
r = (
critter.engine.random() < critter.user.stat['malaria_chance'] and not
(critter['sickle_a'] or critter['sickle_b'])
)
if r:
critter['from_malaria'] = True
return r
# it would make more sense to keep using species.avatar.rule, this
# is just a test
@phys.thing.rule
def wander(critter):
dests = list(critter.character.place.keys())
dests.remove(critter['location'])
dest = critter.engine.choice(dests)
critter.travel_to(dest)
@wander.trigger
def not_travelling(critter):
return critter.next_location is None
@wander.prereq
def big_map(critter):
return len(critter.character.place) > 1 |
def RelaxNGValidateCtxt(self, reader, options):
"""Use RelaxNG schema context to validate the document as it
is processed. Activation is only possible before the first
Read(). If @ctxt is None, then RelaxNG schema validation is
deactivated. """
if reader is None: reader__o = None
else: reader__o = reader._o
ret = libxml2mod.xmlTextReaderRelaxNGValidateCtxt(reader__o, self._o, options)
return ret | Use RelaxNG schema context to validate the document as it
is processed. Activation is only possible before the first
Read(). If @ctxt is None, then RelaxNG schema validation is
deactivated. | Below is the the instruction that describes the task:
### Input:
Use RelaxNG schema context to validate the document as it
is processed. Activation is only possible before the first
Read(). If @ctxt is None, then RelaxNG schema validation is
deactivated.
### Response:
def RelaxNGValidateCtxt(self, reader, options):
"""Use RelaxNG schema context to validate the document as it
is processed. Activation is only possible before the first
Read(). If @ctxt is None, then RelaxNG schema validation is
deactivated. """
if reader is None: reader__o = None
else: reader__o = reader._o
ret = libxml2mod.xmlTextReaderRelaxNGValidateCtxt(reader__o, self._o, options)
return ret |
def _iter_unfolded_lines(self):
"""Iter input unfoled lines. Skip comments."""
line = self._input_file.readline()
while line:
self.line_counter += 1
self.byte_counter += len(line)
line = self._strip_line_sep(line)
nextline = self._input_file.readline()
while nextline and nextline[:1] == b' ':
line += self._strip_line_sep(nextline)[1:]
nextline = self._input_file.readline()
if not line.startswith(b'#'):
yield line
line = nextline | Iter input unfoled lines. Skip comments. | Below is the the instruction that describes the task:
### Input:
Iter input unfoled lines. Skip comments.
### Response:
def _iter_unfolded_lines(self):
"""Iter input unfoled lines. Skip comments."""
line = self._input_file.readline()
while line:
self.line_counter += 1
self.byte_counter += len(line)
line = self._strip_line_sep(line)
nextline = self._input_file.readline()
while nextline and nextline[:1] == b' ':
line += self._strip_line_sep(nextline)[1:]
nextline = self._input_file.readline()
if not line.startswith(b'#'):
yield line
line = nextline |
def get_select_items(items):
"""Return list of possible select items."""
option_items = list()
for item in items:
if isinstance(item, dict) and defs.VALUE in item and defs.LABEL in item:
option_items.append(item[defs.VALUE])
else:
raise exceptions.ParametersFieldError(item, "a dictionary with {} and {}"
.format(defs.LABEL, defs.VALUE))
return option_items | Return list of possible select items. | Below is the the instruction that describes the task:
### Input:
Return list of possible select items.
### Response:
def get_select_items(items):
"""Return list of possible select items."""
option_items = list()
for item in items:
if isinstance(item, dict) and defs.VALUE in item and defs.LABEL in item:
option_items.append(item[defs.VALUE])
else:
raise exceptions.ParametersFieldError(item, "a dictionary with {} and {}"
.format(defs.LABEL, defs.VALUE))
return option_items |
def _set_name_server(self, v, load=False):
"""
Setter method for name_server, mapped from YANG variable /ip/dns/name_server (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_name_server is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name_server() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name_server_ip",name_server.name_server, yang_name="name-server", rest_name="name-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name-server-ip', extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'cli-suppress-mode': None, u'callpoint': u'IpadmNameSrvCallpoint', u'info': u' Name Server configurations', u'cli-suppress-list-no': None}}), is_container='list', yang_name="name-server", rest_name="name-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'cli-suppress-mode': None, u'callpoint': u'IpadmNameSrvCallpoint', u'info': u' Name Server configurations', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-administration', defining_module='brocade-ip-administration', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name_server must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name_server_ip",name_server.name_server, yang_name="name-server", rest_name="name-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name-server-ip', extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'cli-suppress-mode': None, u'callpoint': u'IpadmNameSrvCallpoint', u'info': u' Name Server configurations', u'cli-suppress-list-no': None}}), is_container='list', yang_name="name-server", rest_name="name-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'cli-suppress-mode': None, u'callpoint': u'IpadmNameSrvCallpoint', u'info': u' Name Server configurations', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-administration', defining_module='brocade-ip-administration', yang_type='list', is_config=True)""",
})
self.__name_server = t
if hasattr(self, '_set'):
self._set() | Setter method for name_server, mapped from YANG variable /ip/dns/name_server (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_name_server is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name_server() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for name_server, mapped from YANG variable /ip/dns/name_server (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_name_server is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name_server() directly.
### Response:
def _set_name_server(self, v, load=False):
"""
Setter method for name_server, mapped from YANG variable /ip/dns/name_server (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_name_server is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name_server() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name_server_ip",name_server.name_server, yang_name="name-server", rest_name="name-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name-server-ip', extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'cli-suppress-mode': None, u'callpoint': u'IpadmNameSrvCallpoint', u'info': u' Name Server configurations', u'cli-suppress-list-no': None}}), is_container='list', yang_name="name-server", rest_name="name-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'cli-suppress-mode': None, u'callpoint': u'IpadmNameSrvCallpoint', u'info': u' Name Server configurations', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-administration', defining_module='brocade-ip-administration', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name_server must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name_server_ip",name_server.name_server, yang_name="name-server", rest_name="name-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name-server-ip', extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'cli-suppress-mode': None, u'callpoint': u'IpadmNameSrvCallpoint', u'info': u' Name Server configurations', u'cli-suppress-list-no': None}}), is_container='list', yang_name="name-server", rest_name="name-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'cli-suppress-mode': None, u'callpoint': u'IpadmNameSrvCallpoint', u'info': u' Name Server configurations', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-administration', defining_module='brocade-ip-administration', yang_type='list', is_config=True)""",
})
self.__name_server = t
if hasattr(self, '_set'):
self._set() |
def visitLexerBlock(self, ctx: jsgParser.LexerBlockContext):
""" lexerBlock: OPREN lexeraltList CPREN """
self._rulePattern += '('
self.visitChildren(ctx)
self._rulePattern += ')' | lexerBlock: OPREN lexeraltList CPREN | Below is the the instruction that describes the task:
### Input:
lexerBlock: OPREN lexeraltList CPREN
### Response:
def visitLexerBlock(self, ctx: jsgParser.LexerBlockContext):
""" lexerBlock: OPREN lexeraltList CPREN """
self._rulePattern += '('
self.visitChildren(ctx)
self._rulePattern += ')' |
def from_cif_string(cif_string, transformations=None, primitive=True,
occupancy_tolerance=1.):
"""
Generates TransformedStructure from a cif string.
Args:
cif_string (str): Input cif string. Should contain only one
structure. For cifs containing multiple structures, please use
CifTransmuter.
transformations ([Transformations]): Sequence of transformations
to be applied to the input structure.
primitive (bool): Option to set if the primitive cell should be
extracted. Defaults to True. However, there are certain
instances where you might want to use a non-primitive cell,
e.g., if you are trying to generate all possible orderings of
partial removals or order a disordered structure.
occupancy_tolerance (float): If total occupancy of a site is
between 1 and occupancy_tolerance, the occupancies will be
scaled down to 1.
Returns:
TransformedStructure
"""
parser = CifParser.from_string(cif_string, occupancy_tolerance)
raw_string = re.sub(r"'", "\"", cif_string)
cif_dict = parser.as_dict()
cif_keys = list(cif_dict.keys())
s = parser.get_structures(primitive)[0]
partial_cif = cif_dict[cif_keys[0]]
if "_database_code_ICSD" in partial_cif:
source = partial_cif["_database_code_ICSD"] + "-ICSD"
else:
source = "uploaded cif"
source_info = {"source": source,
"datetime": str(datetime.datetime.now()),
"original_file": raw_string,
"cif_data": cif_dict[cif_keys[0]]}
return TransformedStructure(s, transformations, history=[source_info]) | Generates TransformedStructure from a cif string.
Args:
cif_string (str): Input cif string. Should contain only one
structure. For cifs containing multiple structures, please use
CifTransmuter.
transformations ([Transformations]): Sequence of transformations
to be applied to the input structure.
primitive (bool): Option to set if the primitive cell should be
extracted. Defaults to True. However, there are certain
instances where you might want to use a non-primitive cell,
e.g., if you are trying to generate all possible orderings of
partial removals or order a disordered structure.
occupancy_tolerance (float): If total occupancy of a site is
between 1 and occupancy_tolerance, the occupancies will be
scaled down to 1.
Returns:
TransformedStructure | Below is the the instruction that describes the task:
### Input:
Generates TransformedStructure from a cif string.
Args:
cif_string (str): Input cif string. Should contain only one
structure. For cifs containing multiple structures, please use
CifTransmuter.
transformations ([Transformations]): Sequence of transformations
to be applied to the input structure.
primitive (bool): Option to set if the primitive cell should be
extracted. Defaults to True. However, there are certain
instances where you might want to use a non-primitive cell,
e.g., if you are trying to generate all possible orderings of
partial removals or order a disordered structure.
occupancy_tolerance (float): If total occupancy of a site is
between 1 and occupancy_tolerance, the occupancies will be
scaled down to 1.
Returns:
TransformedStructure
### Response:
def from_cif_string(cif_string, transformations=None, primitive=True,
occupancy_tolerance=1.):
"""
Generates TransformedStructure from a cif string.
Args:
cif_string (str): Input cif string. Should contain only one
structure. For cifs containing multiple structures, please use
CifTransmuter.
transformations ([Transformations]): Sequence of transformations
to be applied to the input structure.
primitive (bool): Option to set if the primitive cell should be
extracted. Defaults to True. However, there are certain
instances where you might want to use a non-primitive cell,
e.g., if you are trying to generate all possible orderings of
partial removals or order a disordered structure.
occupancy_tolerance (float): If total occupancy of a site is
between 1 and occupancy_tolerance, the occupancies will be
scaled down to 1.
Returns:
TransformedStructure
"""
parser = CifParser.from_string(cif_string, occupancy_tolerance)
raw_string = re.sub(r"'", "\"", cif_string)
cif_dict = parser.as_dict()
cif_keys = list(cif_dict.keys())
s = parser.get_structures(primitive)[0]
partial_cif = cif_dict[cif_keys[0]]
if "_database_code_ICSD" in partial_cif:
source = partial_cif["_database_code_ICSD"] + "-ICSD"
else:
source = "uploaded cif"
source_info = {"source": source,
"datetime": str(datetime.datetime.now()),
"original_file": raw_string,
"cif_data": cif_dict[cif_keys[0]]}
return TransformedStructure(s, transformations, history=[source_info]) |
def crypto_kx_seed_keypair(seed):
"""
Generate a keypair with a given seed.
This is functionally the same as crypto_box_seed_keypair, however
it uses the blake2b hash primitive instead of sha512.
It is included mainly for api consistency when using crypto_kx.
:param seed: random seed
:type seed: bytes
:return: (public_key, secret_key)
:rtype: (bytes, bytes)
"""
public_key = ffi.new("unsigned char[]", crypto_kx_PUBLIC_KEY_BYTES)
secret_key = ffi.new("unsigned char[]", crypto_kx_SECRET_KEY_BYTES)
ensure(isinstance(seed, bytes) and
len(seed) == crypto_kx_SEED_BYTES,
'Seed must be a {0} byte long bytes sequence'.format(
crypto_kx_SEED_BYTES),
raising=exc.TypeError)
res = lib.crypto_kx_seed_keypair(public_key, secret_key, seed)
ensure(res == 0, "Key generation failed.", raising=exc.CryptoError)
return (ffi.buffer(public_key, crypto_kx_PUBLIC_KEY_BYTES)[:],
ffi.buffer(secret_key, crypto_kx_SECRET_KEY_BYTES)[:]) | Generate a keypair with a given seed.
This is functionally the same as crypto_box_seed_keypair, however
it uses the blake2b hash primitive instead of sha512.
It is included mainly for api consistency when using crypto_kx.
:param seed: random seed
:type seed: bytes
:return: (public_key, secret_key)
:rtype: (bytes, bytes) | Below is the the instruction that describes the task:
### Input:
Generate a keypair with a given seed.
This is functionally the same as crypto_box_seed_keypair, however
it uses the blake2b hash primitive instead of sha512.
It is included mainly for api consistency when using crypto_kx.
:param seed: random seed
:type seed: bytes
:return: (public_key, secret_key)
:rtype: (bytes, bytes)
### Response:
def crypto_kx_seed_keypair(seed):
"""
Generate a keypair with a given seed.
This is functionally the same as crypto_box_seed_keypair, however
it uses the blake2b hash primitive instead of sha512.
It is included mainly for api consistency when using crypto_kx.
:param seed: random seed
:type seed: bytes
:return: (public_key, secret_key)
:rtype: (bytes, bytes)
"""
public_key = ffi.new("unsigned char[]", crypto_kx_PUBLIC_KEY_BYTES)
secret_key = ffi.new("unsigned char[]", crypto_kx_SECRET_KEY_BYTES)
ensure(isinstance(seed, bytes) and
len(seed) == crypto_kx_SEED_BYTES,
'Seed must be a {0} byte long bytes sequence'.format(
crypto_kx_SEED_BYTES),
raising=exc.TypeError)
res = lib.crypto_kx_seed_keypair(public_key, secret_key, seed)
ensure(res == 0, "Key generation failed.", raising=exc.CryptoError)
return (ffi.buffer(public_key, crypto_kx_PUBLIC_KEY_BYTES)[:],
ffi.buffer(secret_key, crypto_kx_SECRET_KEY_BYTES)[:]) |
def input_loop():
'''wait for user input'''
while mpstate.status.exit != True:
try:
if mpstate.status.exit != True:
line = input(mpstate.rl.prompt)
except EOFError:
mpstate.status.exit = True
sys.exit(1)
mpstate.input_queue.put(line) | wait for user input | Below is the the instruction that describes the task:
### Input:
wait for user input
### Response:
def input_loop():
'''wait for user input'''
while mpstate.status.exit != True:
try:
if mpstate.status.exit != True:
line = input(mpstate.rl.prompt)
except EOFError:
mpstate.status.exit = True
sys.exit(1)
mpstate.input_queue.put(line) |
def merge_deployment_data(dict1: DeployedContracts, dict2: DeployedContracts) -> DeployedContracts:
""" Take contents of two deployment JSON files and merge them
The dictionary under 'contracts' key will be merged. The 'contracts'
contents from different JSON files must not overlap. The contents
under other keys must be identical.
"""
if not dict1:
return dict2
if not dict2:
return dict1
common_contracts: Dict[str, DeployedContract] = deepcopy(dict1['contracts'])
assert not common_contracts.keys() & dict2['contracts'].keys()
common_contracts.update(dict2['contracts'])
assert dict2['chain_id'] == dict1['chain_id']
assert dict2['contracts_version'] == dict1['contracts_version']
return {
'contracts': common_contracts,
'chain_id': dict1['chain_id'],
'contracts_version': dict1['contracts_version'],
} | Take contents of two deployment JSON files and merge them
The dictionary under 'contracts' key will be merged. The 'contracts'
contents from different JSON files must not overlap. The contents
under other keys must be identical. | Below is the the instruction that describes the task:
### Input:
Take contents of two deployment JSON files and merge them
The dictionary under 'contracts' key will be merged. The 'contracts'
contents from different JSON files must not overlap. The contents
under other keys must be identical.
### Response:
def merge_deployment_data(dict1: DeployedContracts, dict2: DeployedContracts) -> DeployedContracts:
""" Take contents of two deployment JSON files and merge them
The dictionary under 'contracts' key will be merged. The 'contracts'
contents from different JSON files must not overlap. The contents
under other keys must be identical.
"""
if not dict1:
return dict2
if not dict2:
return dict1
common_contracts: Dict[str, DeployedContract] = deepcopy(dict1['contracts'])
assert not common_contracts.keys() & dict2['contracts'].keys()
common_contracts.update(dict2['contracts'])
assert dict2['chain_id'] == dict1['chain_id']
assert dict2['contracts_version'] == dict1['contracts_version']
return {
'contracts': common_contracts,
'chain_id': dict1['chain_id'],
'contracts_version': dict1['contracts_version'],
} |
def transpose(self, name=None):
"""Returns matching `Conv1D` module.
Args:
name: Optional string assigning name of transpose module. The default name
is constructed by appending "_transpose" to `self.name`.
Returns:
`Conv1D` module.
"""
if name is None:
name = self.module_name + "_transpose"
if self._data_format == DATA_FORMAT_NWC:
stride = self._stride[1:-1]
else: # self._data_format == DATA_FORMAT_NCW
stride = self._stride[2:]
return Conv1D(output_channels=lambda: self.input_channels,
kernel_shape=self.kernel_shape,
stride=stride,
padding=self.padding,
use_bias=self._use_bias,
initializers=self.initializers,
partitioners=self.partitioners,
regularizers=self.regularizers,
data_format=self._data_format,
custom_getter=self._custom_getter,
name=name) | Returns matching `Conv1D` module.
Args:
name: Optional string assigning name of transpose module. The default name
is constructed by appending "_transpose" to `self.name`.
Returns:
`Conv1D` module. | Below is the the instruction that describes the task:
### Input:
Returns matching `Conv1D` module.
Args:
name: Optional string assigning name of transpose module. The default name
is constructed by appending "_transpose" to `self.name`.
Returns:
`Conv1D` module.
### Response:
def transpose(self, name=None):
"""Returns matching `Conv1D` module.
Args:
name: Optional string assigning name of transpose module. The default name
is constructed by appending "_transpose" to `self.name`.
Returns:
`Conv1D` module.
"""
if name is None:
name = self.module_name + "_transpose"
if self._data_format == DATA_FORMAT_NWC:
stride = self._stride[1:-1]
else: # self._data_format == DATA_FORMAT_NCW
stride = self._stride[2:]
return Conv1D(output_channels=lambda: self.input_channels,
kernel_shape=self.kernel_shape,
stride=stride,
padding=self.padding,
use_bias=self._use_bias,
initializers=self.initializers,
partitioners=self.partitioners,
regularizers=self.regularizers,
data_format=self._data_format,
custom_getter=self._custom_getter,
name=name) |
def _mul8(ins):
""" Multiplies 2 las values from the stack.
Optimizations:
* If any of the ops is ZERO,
then do A = 0 ==> XOR A, cause A * 0 = 0 * A = 0
* If any ot the ops is ONE, do NOTHING
A * 1 = 1 * A = A
"""
op1, op2 = tuple(ins.quad[2:])
if _int_ops(op1, op2) is not None:
op1, op2 = _int_ops(op1, op2)
output = _8bit_oper(op1)
if op2 == 1: # A * 1 = 1 * A = A
output.append('push af')
return output
if op2 == 0:
output.append('xor a')
output.append('push af')
return output
if op2 == 2: # A * 2 == A SLA 1
output.append('add a, a')
output.append('push af')
return output
if op2 == 4: # A * 4 == A SLA 2
output.append('add a, a')
output.append('add a, a')
output.append('push af')
return output
output.append('ld h, %i' % int8(op2))
else:
if op2[0] == '_': # stack optimization
op1, op2 = op2, op1
output = _8bit_oper(op1, op2)
output.append('call __MUL8_FAST') # Inmmediate
output.append('push af')
REQUIRES.add('mul8.asm')
return output | Multiplies 2 las values from the stack.
Optimizations:
* If any of the ops is ZERO,
then do A = 0 ==> XOR A, cause A * 0 = 0 * A = 0
* If any ot the ops is ONE, do NOTHING
A * 1 = 1 * A = A | Below is the the instruction that describes the task:
### Input:
Multiplies 2 las values from the stack.
Optimizations:
* If any of the ops is ZERO,
then do A = 0 ==> XOR A, cause A * 0 = 0 * A = 0
* If any ot the ops is ONE, do NOTHING
A * 1 = 1 * A = A
### Response:
def _mul8(ins):
""" Multiplies 2 las values from the stack.
Optimizations:
* If any of the ops is ZERO,
then do A = 0 ==> XOR A, cause A * 0 = 0 * A = 0
* If any ot the ops is ONE, do NOTHING
A * 1 = 1 * A = A
"""
op1, op2 = tuple(ins.quad[2:])
if _int_ops(op1, op2) is not None:
op1, op2 = _int_ops(op1, op2)
output = _8bit_oper(op1)
if op2 == 1: # A * 1 = 1 * A = A
output.append('push af')
return output
if op2 == 0:
output.append('xor a')
output.append('push af')
return output
if op2 == 2: # A * 2 == A SLA 1
output.append('add a, a')
output.append('push af')
return output
if op2 == 4: # A * 4 == A SLA 2
output.append('add a, a')
output.append('add a, a')
output.append('push af')
return output
output.append('ld h, %i' % int8(op2))
else:
if op2[0] == '_': # stack optimization
op1, op2 = op2, op1
output = _8bit_oper(op1, op2)
output.append('call __MUL8_FAST') # Inmmediate
output.append('push af')
REQUIRES.add('mul8.asm')
return output |
def create_iopub_stream(self, kernel_id):
"""Create a new iopub stream."""
self._check_kernel_id(kernel_id)
return super(MappingKernelManager, self).create_iopub_stream(kernel_id) | Create a new iopub stream. | Below is the the instruction that describes the task:
### Input:
Create a new iopub stream.
### Response:
def create_iopub_stream(self, kernel_id):
"""Create a new iopub stream."""
self._check_kernel_id(kernel_id)
return super(MappingKernelManager, self).create_iopub_stream(kernel_id) |
def dotplot(adata, var_names, groupby=None, use_raw=None, log=False, num_categories=7,
expression_cutoff=0., mean_only_expressed=False, color_map='Reds', dot_max=None,
dot_min=None, figsize=None, dendrogram=False, gene_symbols=None,
var_group_positions=None, standard_scale=None, smallest_dot=0.,
var_group_labels=None, var_group_rotation=None, layer=None, show=None,
save=None, **kwds):
"""\
Makes a *dot plot* of the expression values of `var_names`.
For each var_name and each `groupby` category a dot is plotted. Each dot
represents two values: mean expression within each category (visualized by
color) and fraction of cells expressing the var_name in the
category (visualized by the size of the dot). If groupby is not given, the
dotplot assumes that all data belongs to a single category.
**Note**: A gene is considered expressed if the expression value in the adata
(or adata.raw) is above the specified threshold which is zero by default.
An example of dotplot usage is to visualize, for multiple marker genes,
the mean value and the percentage of cells expressing the gene accross multiple clusters.
Parameters
----------
{common_plot_args}
expression_cutoff : `float` (default: `0.`)
Expression cutoff that is used for binarizing the gene expression and determining the fraction
of cells expressing given genes. A gene is expressed only if the expression value is greater than
this threshold.
mean_only_expressed : `bool` (default: `False`)
If True, gene expression is averaged only over the cells expressing the given genes.
color_map : `str`, optional (default: `Reds`)
String denoting matplotlib color map.
dot_max : `float` optional (default: `None`)
If none, the maximum dot size is set to the maximum fraction value found (e.g. 0.6). If given,
the value should be a number between 0 and 1. All fractions larger than dot_max are clipped to
this value.
dot_min : `float` optional (default: `None`)
If none, the minimum dot size is set to 0. If given,
the value should be a number between 0 and 1. All fractions smaller than dot_min are clipped to
this value.
standard_scale : {{'var', 'group'}}, optional (default: None)
Whether or not to standardize that dimension between 0 and 1, meaning for each variable or group,
subtract the minimum and divide each by its maximum.
smallest_dot : `float` optional (default: 0.)
If none, the smallest dot has size 0. All expression levels with `dot_min` are potted with
`smallest_dot` dot size.
{show_save_ax}
**kwds : keyword arguments
Are passed to `matplotlib.pyplot.scatter`.
Returns
-------
List of :class:`~matplotlib.axes.Axes`
Examples
-------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.pl.dotplot(adata, ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ'],
... groupby='bulk_labels', dendrogram=True)
"""
if use_raw is None and adata.raw is not None: use_raw = True
if isinstance(var_names, str):
var_names = [var_names]
categories, obs_tidy = _prepare_dataframe(adata, var_names, groupby, use_raw, log, num_categories,
layer=layer, gene_symbols=gene_symbols)
# for if category defined by groupby (if any) compute for each var_name
# 1. the fraction of cells in the category having a value > expression_cutoff
# 2. the mean value over the category
# 1. compute fraction of cells having value > expression_cutoff
# transform obs_tidy into boolean matrix using the expression_cutoff
obs_bool = obs_tidy > expression_cutoff
# compute the sum per group which in the boolean matrix this is the number
# of values > expression_cutoff, and divide the result by the total number of values
# in the group (given by `count()`)
fraction_obs = obs_bool.groupby(level=0).sum() / obs_bool.groupby(level=0).count()
# 2. compute mean value
if mean_only_expressed:
mean_obs = obs_tidy.mask(~obs_bool).groupby(level=0).mean().fillna(0)
else:
mean_obs = obs_tidy.groupby(level=0).mean()
if standard_scale == 'group':
mean_obs = mean_obs.sub(mean_obs.min(1), axis=0)
mean_obs = mean_obs.div(mean_obs.max(1), axis=0).fillna(0)
elif standard_scale == 'var':
mean_obs -= mean_obs.min(0)
mean_obs = (mean_obs / mean_obs.max(0)).fillna(0)
elif standard_scale is None:
pass
else:
logg.warn('Unknown type for standard_scale, ignored')
dendro_width = 0.8 if dendrogram else 0
colorbar_width = 0.2
colorbar_width_spacer = 0.5
size_legend_width = 0.25
if figsize is None:
height = len(categories) * 0.3 + 1 # +1 for labels
# if the number of categories is small (eg 1 or 2) use
# a larger height
height = max([1.5, height])
heatmap_width = len(var_names) * 0.35
width = heatmap_width + colorbar_width + size_legend_width + dendro_width + colorbar_width_spacer
else:
width, height = figsize
heatmap_width = width - (colorbar_width + size_legend_width + dendro_width + colorbar_width_spacer)
# colorbar ax width should not change with differences in the width of the image
# otherwise can become too small
if var_group_positions is not None and len(var_group_positions) > 0:
# add some space in case 'brackets' want to be plotted on top of the image
height_ratios = [0.5, 10]
else:
height_ratios = [0, 10.5]
# define a layout of 2 rows x 5 columns
# first row is for 'brackets' (if no brackets needed, the height of this row is zero)
# second row is for main content. This second row
# is divided into 4 axes:
# first ax is for the main figure
# second ax is for dendrogram (if present)
# third ax is for the color bar legend
# fourth ax is for an spacer that avoids the ticks
# from the color bar to be hidden beneath the size lengend axis
# fifth ax is to plot the size legend
fig = pl.figure(figsize=(width, height))
axs = gridspec.GridSpec(nrows=2, ncols=5, wspace=0.02, hspace=0.04,
width_ratios=[heatmap_width, dendro_width, colorbar_width, colorbar_width_spacer, size_legend_width],
height_ratios=height_ratios)
if len(categories) < 4:
# when few categories are shown, the colorbar and size legend
# need to be larger than the main plot, otherwise they would look
# compressed. For this, the dotplot ax is split into two:
axs2 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=axs[1, 0],
height_ratios=[len(categories) * 0.3, 1])
dot_ax = fig.add_subplot(axs2[0])
else:
dot_ax = fig.add_subplot(axs[1, 0])
color_legend = fig.add_subplot(axs[1, 2])
if groupby is None or len(categories) <= 1:
# dendrogram can only be computed between groupby categories
dendrogram = False
if dendrogram:
dendro_data = _reorder_categories_after_dendrogram(adata, groupby, dendrogram,
var_names=var_names,
var_group_labels=var_group_labels,
var_group_positions=var_group_positions)
var_group_labels = dendro_data['var_group_labels']
var_group_positions = dendro_data['var_group_positions']
# reorder matrix
if dendro_data['var_names_idx_ordered'] is not None:
# reorder columns (usually genes) if needed. This only happens when
# var_group_positions and var_group_labels is set
mean_obs = mean_obs.iloc[:,dendro_data['var_names_idx_ordered']]
fraction_obs = fraction_obs.iloc[:, dendro_data['var_names_idx_ordered']]
# reorder rows (categories) to match the dendrogram order
mean_obs = mean_obs.iloc[dendro_data['categories_idx_ordered'], :]
fraction_obs = fraction_obs.iloc[dendro_data['categories_idx_ordered'], :]
y_ticks = range(mean_obs.shape[0])
dendro_ax = fig.add_subplot(axs[1, 1], sharey=dot_ax)
_plot_dendrogram(dendro_ax, adata, groupby, dendrogram_key=dendrogram, ticks=y_ticks)
# to keep the size_legen of about the same height, irrespective
# of the number of categories, the fourth ax is subdivided into two parts
size_legend_height = min(1.3, height)
# wspace is proportional to the width but a constant value is
# needed such that the spacing is the same for thinner or wider images.
wspace = 10.5 / width
axs3 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=axs[1, 4], wspace=wspace,
height_ratios=[size_legend_height / height,
(height - size_legend_height) / height])
# make scatter plot in which
# x = var_names
# y = groupby category
# size = fraction
# color = mean expression
y, x = np.indices(mean_obs.shape)
y = y.flatten()
x = x.flatten()
frac = fraction_obs.values.flatten()
mean_flat = mean_obs.values.flatten()
cmap = pl.get_cmap(color_map)
if dot_max is None:
dot_max = np.ceil(max(frac) * 10) / 10
else:
if dot_max < 0 or dot_max > 1:
raise ValueError("`dot_max` value has to be between 0 and 1")
if dot_min is None:
dot_min = 0
else:
if dot_min < 0 or dot_min > 1:
raise ValueError("`dot_min` value has to be between 0 and 1")
if dot_min != 0 or dot_max != 1:
# clip frac between dot_min and dot_max
frac = np.clip(frac, dot_min, dot_max)
old_range = dot_max - dot_min
# re-scale frac between 0 and 1
frac = ((frac - dot_min) / old_range)
size = (frac * 10) ** 2
size += smallest_dot
import matplotlib.colors
normalize = matplotlib.colors.Normalize(vmin=kwds.get('vmin'), vmax=kwds.get('vmax'))
colors = cmap(normalize(mean_flat))
dot_ax.scatter(x, y, color=colors, s=size, cmap=cmap, norm=None, edgecolor='none', **kwds)
y_ticks = range(mean_obs.shape[0])
dot_ax.set_yticks(y_ticks)
dot_ax.set_yticklabels([mean_obs.index[idx] for idx in y_ticks])
x_ticks = range(mean_obs.shape[1])
dot_ax.set_xticks(x_ticks)
dot_ax.set_xticklabels([mean_obs.columns[idx] for idx in x_ticks], rotation=90)
dot_ax.tick_params(axis='both', labelsize='small')
dot_ax.grid(False)
dot_ax.set_xlim(-0.5, len(var_names) + 0.5)
dot_ax.set_ylabel(groupby)
# to be consistent with the heatmap plot, is better to
# invert the order of the y-axis, such that the first group is on
# top
ymin, ymax = dot_ax.get_ylim()
dot_ax.set_ylim(ymax+0.5, ymin - 0.5)
dot_ax.set_xlim(-1, len(var_names))
# plot group legends on top of dot_ax (if given)
if var_group_positions is not None and len(var_group_positions) > 0:
gene_groups_ax = fig.add_subplot(axs[0, 0], sharex=dot_ax)
_plot_gene_groups_brackets(gene_groups_ax, group_positions=var_group_positions,
group_labels=var_group_labels,
rotation=var_group_rotation)
# plot colorbar
import matplotlib.colorbar
matplotlib.colorbar.ColorbarBase(color_legend, cmap=cmap, norm=normalize)
# for the dot size legend, use step between dot_max and dot_min
# based on how different they are.
diff = dot_max - dot_min
if 0.3 < diff <= 0.6:
step = 0.1
elif diff <= 0.3:
step = 0.05
else:
step = 0.2
# a descending range that is afterwards inverted is used
# to guarantee that dot_max is in the legend.
fracs_legends = np.arange(dot_max, dot_min, step * -1)[::-1]
if dot_min != 0 or dot_max != 1:
fracs_values = ((fracs_legends - dot_min) / old_range)
else:
fracs_values = fracs_legends
size = (fracs_values * 10) ** 2
size += smallest_dot
color = [cmap(normalize(value)) for value in np.repeat(max(mean_flat) * 0.7, len(size))]
# plot size bar
size_legend = fig.add_subplot(axs3[0])
size_legend.scatter(np.repeat(0, len(size)), range(len(size)), s=size, color=color)
size_legend.set_yticks(range(len(size)))
labels = ["{:.0%}".format(x) for x in fracs_legends]
if dot_max < 1:
labels[-1] = ">" + labels[-1]
size_legend.set_yticklabels(labels)
size_legend.set_yticklabels(["{:.0%}".format(x) for x in fracs_legends])
size_legend.tick_params(axis='y', left=False, labelleft=False, labelright=True)
# remove x ticks and labels
size_legend.tick_params(axis='x', bottom=False, labelbottom=False)
# remove surrounding lines
size_legend.spines['right'].set_visible(False)
size_legend.spines['top'].set_visible(False)
size_legend.spines['left'].set_visible(False)
size_legend.spines['bottom'].set_visible(False)
size_legend.grid(False)
ymin, ymax = size_legend.get_ylim()
size_legend.set_ylim(ymin, ymax+0.5)
utils.savefig_or_show('dotplot', show=show, save=save)
return axs | \
Makes a *dot plot* of the expression values of `var_names`.
For each var_name and each `groupby` category a dot is plotted. Each dot
represents two values: mean expression within each category (visualized by
color) and fraction of cells expressing the var_name in the
category (visualized by the size of the dot). If groupby is not given, the
dotplot assumes that all data belongs to a single category.
**Note**: A gene is considered expressed if the expression value in the adata
(or adata.raw) is above the specified threshold which is zero by default.
An example of dotplot usage is to visualize, for multiple marker genes,
the mean value and the percentage of cells expressing the gene accross multiple clusters.
Parameters
----------
{common_plot_args}
expression_cutoff : `float` (default: `0.`)
Expression cutoff that is used for binarizing the gene expression and determining the fraction
of cells expressing given genes. A gene is expressed only if the expression value is greater than
this threshold.
mean_only_expressed : `bool` (default: `False`)
If True, gene expression is averaged only over the cells expressing the given genes.
color_map : `str`, optional (default: `Reds`)
String denoting matplotlib color map.
dot_max : `float` optional (default: `None`)
If none, the maximum dot size is set to the maximum fraction value found (e.g. 0.6). If given,
the value should be a number between 0 and 1. All fractions larger than dot_max are clipped to
this value.
dot_min : `float` optional (default: `None`)
If none, the minimum dot size is set to 0. If given,
the value should be a number between 0 and 1. All fractions smaller than dot_min are clipped to
this value.
standard_scale : {{'var', 'group'}}, optional (default: None)
Whether or not to standardize that dimension between 0 and 1, meaning for each variable or group,
subtract the minimum and divide each by its maximum.
smallest_dot : `float` optional (default: 0.)
If none, the smallest dot has size 0. All expression levels with `dot_min` are potted with
`smallest_dot` dot size.
{show_save_ax}
**kwds : keyword arguments
Are passed to `matplotlib.pyplot.scatter`.
Returns
-------
List of :class:`~matplotlib.axes.Axes`
Examples
-------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.pl.dotplot(adata, ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ'],
... groupby='bulk_labels', dendrogram=True) | Below is the the instruction that describes the task:
### Input:
\
Makes a *dot plot* of the expression values of `var_names`.
For each var_name and each `groupby` category a dot is plotted. Each dot
represents two values: mean expression within each category (visualized by
color) and fraction of cells expressing the var_name in the
category (visualized by the size of the dot). If groupby is not given, the
dotplot assumes that all data belongs to a single category.
**Note**: A gene is considered expressed if the expression value in the adata
(or adata.raw) is above the specified threshold which is zero by default.
An example of dotplot usage is to visualize, for multiple marker genes,
the mean value and the percentage of cells expressing the gene accross multiple clusters.
Parameters
----------
{common_plot_args}
expression_cutoff : `float` (default: `0.`)
Expression cutoff that is used for binarizing the gene expression and determining the fraction
of cells expressing given genes. A gene is expressed only if the expression value is greater than
this threshold.
mean_only_expressed : `bool` (default: `False`)
If True, gene expression is averaged only over the cells expressing the given genes.
color_map : `str`, optional (default: `Reds`)
String denoting matplotlib color map.
dot_max : `float` optional (default: `None`)
If none, the maximum dot size is set to the maximum fraction value found (e.g. 0.6). If given,
the value should be a number between 0 and 1. All fractions larger than dot_max are clipped to
this value.
dot_min : `float` optional (default: `None`)
If none, the minimum dot size is set to 0. If given,
the value should be a number between 0 and 1. All fractions smaller than dot_min are clipped to
this value.
standard_scale : {{'var', 'group'}}, optional (default: None)
Whether or not to standardize that dimension between 0 and 1, meaning for each variable or group,
subtract the minimum and divide each by its maximum.
smallest_dot : `float` optional (default: 0.)
If none, the smallest dot has size 0. All expression levels with `dot_min` are potted with
`smallest_dot` dot size.
{show_save_ax}
**kwds : keyword arguments
Are passed to `matplotlib.pyplot.scatter`.
Returns
-------
List of :class:`~matplotlib.axes.Axes`
Examples
-------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.pl.dotplot(adata, ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ'],
... groupby='bulk_labels', dendrogram=True)
### Response:
def dotplot(adata, var_names, groupby=None, use_raw=None, log=False, num_categories=7,
expression_cutoff=0., mean_only_expressed=False, color_map='Reds', dot_max=None,
dot_min=None, figsize=None, dendrogram=False, gene_symbols=None,
var_group_positions=None, standard_scale=None, smallest_dot=0.,
var_group_labels=None, var_group_rotation=None, layer=None, show=None,
save=None, **kwds):
"""\
Makes a *dot plot* of the expression values of `var_names`.
For each var_name and each `groupby` category a dot is plotted. Each dot
represents two values: mean expression within each category (visualized by
color) and fraction of cells expressing the var_name in the
category (visualized by the size of the dot). If groupby is not given, the
dotplot assumes that all data belongs to a single category.
**Note**: A gene is considered expressed if the expression value in the adata
(or adata.raw) is above the specified threshold which is zero by default.
An example of dotplot usage is to visualize, for multiple marker genes,
the mean value and the percentage of cells expressing the gene accross multiple clusters.
Parameters
----------
{common_plot_args}
expression_cutoff : `float` (default: `0.`)
Expression cutoff that is used for binarizing the gene expression and determining the fraction
of cells expressing given genes. A gene is expressed only if the expression value is greater than
this threshold.
mean_only_expressed : `bool` (default: `False`)
If True, gene expression is averaged only over the cells expressing the given genes.
color_map : `str`, optional (default: `Reds`)
String denoting matplotlib color map.
dot_max : `float` optional (default: `None`)
If none, the maximum dot size is set to the maximum fraction value found (e.g. 0.6). If given,
the value should be a number between 0 and 1. All fractions larger than dot_max are clipped to
this value.
dot_min : `float` optional (default: `None`)
If none, the minimum dot size is set to 0. If given,
the value should be a number between 0 and 1. All fractions smaller than dot_min are clipped to
this value.
standard_scale : {{'var', 'group'}}, optional (default: None)
Whether or not to standardize that dimension between 0 and 1, meaning for each variable or group,
subtract the minimum and divide each by its maximum.
smallest_dot : `float` optional (default: 0.)
If none, the smallest dot has size 0. All expression levels with `dot_min` are potted with
`smallest_dot` dot size.
{show_save_ax}
**kwds : keyword arguments
Are passed to `matplotlib.pyplot.scatter`.
Returns
-------
List of :class:`~matplotlib.axes.Axes`
Examples
-------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.pl.dotplot(adata, ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ'],
... groupby='bulk_labels', dendrogram=True)
"""
if use_raw is None and adata.raw is not None: use_raw = True
if isinstance(var_names, str):
var_names = [var_names]
categories, obs_tidy = _prepare_dataframe(adata, var_names, groupby, use_raw, log, num_categories,
layer=layer, gene_symbols=gene_symbols)
# for if category defined by groupby (if any) compute for each var_name
# 1. the fraction of cells in the category having a value > expression_cutoff
# 2. the mean value over the category
# 1. compute fraction of cells having value > expression_cutoff
# transform obs_tidy into boolean matrix using the expression_cutoff
obs_bool = obs_tidy > expression_cutoff
# compute the sum per group which in the boolean matrix this is the number
# of values > expression_cutoff, and divide the result by the total number of values
# in the group (given by `count()`)
fraction_obs = obs_bool.groupby(level=0).sum() / obs_bool.groupby(level=0).count()
# 2. compute mean value
if mean_only_expressed:
mean_obs = obs_tidy.mask(~obs_bool).groupby(level=0).mean().fillna(0)
else:
mean_obs = obs_tidy.groupby(level=0).mean()
if standard_scale == 'group':
mean_obs = mean_obs.sub(mean_obs.min(1), axis=0)
mean_obs = mean_obs.div(mean_obs.max(1), axis=0).fillna(0)
elif standard_scale == 'var':
mean_obs -= mean_obs.min(0)
mean_obs = (mean_obs / mean_obs.max(0)).fillna(0)
elif standard_scale is None:
pass
else:
logg.warn('Unknown type for standard_scale, ignored')
dendro_width = 0.8 if dendrogram else 0
colorbar_width = 0.2
colorbar_width_spacer = 0.5
size_legend_width = 0.25
if figsize is None:
height = len(categories) * 0.3 + 1 # +1 for labels
# if the number of categories is small (eg 1 or 2) use
# a larger height
height = max([1.5, height])
heatmap_width = len(var_names) * 0.35
width = heatmap_width + colorbar_width + size_legend_width + dendro_width + colorbar_width_spacer
else:
width, height = figsize
heatmap_width = width - (colorbar_width + size_legend_width + dendro_width + colorbar_width_spacer)
# colorbar ax width should not change with differences in the width of the image
# otherwise can become too small
if var_group_positions is not None and len(var_group_positions) > 0:
# add some space in case 'brackets' want to be plotted on top of the image
height_ratios = [0.5, 10]
else:
height_ratios = [0, 10.5]
# define a layout of 2 rows x 5 columns
# first row is for 'brackets' (if no brackets needed, the height of this row is zero)
# second row is for main content. This second row
# is divided into 4 axes:
# first ax is for the main figure
# second ax is for dendrogram (if present)
# third ax is for the color bar legend
# fourth ax is for an spacer that avoids the ticks
# from the color bar to be hidden beneath the size lengend axis
# fifth ax is to plot the size legend
fig = pl.figure(figsize=(width, height))
axs = gridspec.GridSpec(nrows=2, ncols=5, wspace=0.02, hspace=0.04,
width_ratios=[heatmap_width, dendro_width, colorbar_width, colorbar_width_spacer, size_legend_width],
height_ratios=height_ratios)
if len(categories) < 4:
# when few categories are shown, the colorbar and size legend
# need to be larger than the main plot, otherwise they would look
# compressed. For this, the dotplot ax is split into two:
axs2 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=axs[1, 0],
height_ratios=[len(categories) * 0.3, 1])
dot_ax = fig.add_subplot(axs2[0])
else:
dot_ax = fig.add_subplot(axs[1, 0])
color_legend = fig.add_subplot(axs[1, 2])
if groupby is None or len(categories) <= 1:
# dendrogram can only be computed between groupby categories
dendrogram = False
if dendrogram:
dendro_data = _reorder_categories_after_dendrogram(adata, groupby, dendrogram,
var_names=var_names,
var_group_labels=var_group_labels,
var_group_positions=var_group_positions)
var_group_labels = dendro_data['var_group_labels']
var_group_positions = dendro_data['var_group_positions']
# reorder matrix
if dendro_data['var_names_idx_ordered'] is not None:
# reorder columns (usually genes) if needed. This only happens when
# var_group_positions and var_group_labels is set
mean_obs = mean_obs.iloc[:,dendro_data['var_names_idx_ordered']]
fraction_obs = fraction_obs.iloc[:, dendro_data['var_names_idx_ordered']]
# reorder rows (categories) to match the dendrogram order
mean_obs = mean_obs.iloc[dendro_data['categories_idx_ordered'], :]
fraction_obs = fraction_obs.iloc[dendro_data['categories_idx_ordered'], :]
y_ticks = range(mean_obs.shape[0])
dendro_ax = fig.add_subplot(axs[1, 1], sharey=dot_ax)
_plot_dendrogram(dendro_ax, adata, groupby, dendrogram_key=dendrogram, ticks=y_ticks)
# to keep the size_legen of about the same height, irrespective
# of the number of categories, the fourth ax is subdivided into two parts
size_legend_height = min(1.3, height)
# wspace is proportional to the width but a constant value is
# needed such that the spacing is the same for thinner or wider images.
wspace = 10.5 / width
axs3 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=axs[1, 4], wspace=wspace,
height_ratios=[size_legend_height / height,
(height - size_legend_height) / height])
# make scatter plot in which
# x = var_names
# y = groupby category
# size = fraction
# color = mean expression
y, x = np.indices(mean_obs.shape)
y = y.flatten()
x = x.flatten()
frac = fraction_obs.values.flatten()
mean_flat = mean_obs.values.flatten()
cmap = pl.get_cmap(color_map)
if dot_max is None:
dot_max = np.ceil(max(frac) * 10) / 10
else:
if dot_max < 0 or dot_max > 1:
raise ValueError("`dot_max` value has to be between 0 and 1")
if dot_min is None:
dot_min = 0
else:
if dot_min < 0 or dot_min > 1:
raise ValueError("`dot_min` value has to be between 0 and 1")
if dot_min != 0 or dot_max != 1:
# clip frac between dot_min and dot_max
frac = np.clip(frac, dot_min, dot_max)
old_range = dot_max - dot_min
# re-scale frac between 0 and 1
frac = ((frac - dot_min) / old_range)
size = (frac * 10) ** 2
size += smallest_dot
import matplotlib.colors
normalize = matplotlib.colors.Normalize(vmin=kwds.get('vmin'), vmax=kwds.get('vmax'))
colors = cmap(normalize(mean_flat))
dot_ax.scatter(x, y, color=colors, s=size, cmap=cmap, norm=None, edgecolor='none', **kwds)
y_ticks = range(mean_obs.shape[0])
dot_ax.set_yticks(y_ticks)
dot_ax.set_yticklabels([mean_obs.index[idx] for idx in y_ticks])
x_ticks = range(mean_obs.shape[1])
dot_ax.set_xticks(x_ticks)
dot_ax.set_xticklabels([mean_obs.columns[idx] for idx in x_ticks], rotation=90)
dot_ax.tick_params(axis='both', labelsize='small')
dot_ax.grid(False)
dot_ax.set_xlim(-0.5, len(var_names) + 0.5)
dot_ax.set_ylabel(groupby)
# to be consistent with the heatmap plot, is better to
# invert the order of the y-axis, such that the first group is on
# top
ymin, ymax = dot_ax.get_ylim()
dot_ax.set_ylim(ymax+0.5, ymin - 0.5)
dot_ax.set_xlim(-1, len(var_names))
# plot group legends on top of dot_ax (if given)
if var_group_positions is not None and len(var_group_positions) > 0:
gene_groups_ax = fig.add_subplot(axs[0, 0], sharex=dot_ax)
_plot_gene_groups_brackets(gene_groups_ax, group_positions=var_group_positions,
group_labels=var_group_labels,
rotation=var_group_rotation)
# plot colorbar
import matplotlib.colorbar
matplotlib.colorbar.ColorbarBase(color_legend, cmap=cmap, norm=normalize)
# for the dot size legend, use step between dot_max and dot_min
# based on how different they are.
diff = dot_max - dot_min
if 0.3 < diff <= 0.6:
step = 0.1
elif diff <= 0.3:
step = 0.05
else:
step = 0.2
# a descending range that is afterwards inverted is used
# to guarantee that dot_max is in the legend.
fracs_legends = np.arange(dot_max, dot_min, step * -1)[::-1]
if dot_min != 0 or dot_max != 1:
fracs_values = ((fracs_legends - dot_min) / old_range)
else:
fracs_values = fracs_legends
size = (fracs_values * 10) ** 2
size += smallest_dot
color = [cmap(normalize(value)) for value in np.repeat(max(mean_flat) * 0.7, len(size))]
# plot size bar
size_legend = fig.add_subplot(axs3[0])
size_legend.scatter(np.repeat(0, len(size)), range(len(size)), s=size, color=color)
size_legend.set_yticks(range(len(size)))
labels = ["{:.0%}".format(x) for x in fracs_legends]
if dot_max < 1:
labels[-1] = ">" + labels[-1]
size_legend.set_yticklabels(labels)
size_legend.set_yticklabels(["{:.0%}".format(x) for x in fracs_legends])
size_legend.tick_params(axis='y', left=False, labelleft=False, labelright=True)
# remove x ticks and labels
size_legend.tick_params(axis='x', bottom=False, labelbottom=False)
# remove surrounding lines
size_legend.spines['right'].set_visible(False)
size_legend.spines['top'].set_visible(False)
size_legend.spines['left'].set_visible(False)
size_legend.spines['bottom'].set_visible(False)
size_legend.grid(False)
ymin, ymax = size_legend.get_ylim()
size_legend.set_ylim(ymin, ymax+0.5)
utils.savefig_or_show('dotplot', show=show, save=save)
return axs |
def iter_starred(self, login=None, sort=None, direction=None, number=-1,
etag=None):
"""Iterate over repositories starred by ``login`` or the authenticated
user.
.. versionchanged:: 0.5
Added sort and direction parameters (optional) as per the change in
GitHub's API.
:param str login: (optional), name of user whose stars you want to see
:param str sort: (optional), either 'created' (when the star was
created) or 'updated' (when the repository was last pushed to)
:param str direction: (optional), either 'asc' or 'desc'. Default:
'desc'
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
"""
if login:
return self.user(login).iter_starred(sort, direction)
params = {'sort': sort, 'direction': direction}
self._remove_none(params)
url = self._build_url('user', 'starred')
return self._iter(int(number), url, Repository, params, etag) | Iterate over repositories starred by ``login`` or the authenticated
user.
.. versionchanged:: 0.5
Added sort and direction parameters (optional) as per the change in
GitHub's API.
:param str login: (optional), name of user whose stars you want to see
:param str sort: (optional), either 'created' (when the star was
created) or 'updated' (when the repository was last pushed to)
:param str direction: (optional), either 'asc' or 'desc'. Default:
'desc'
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>` | Below is the the instruction that describes the task:
### Input:
Iterate over repositories starred by ``login`` or the authenticated
user.
.. versionchanged:: 0.5
Added sort and direction parameters (optional) as per the change in
GitHub's API.
:param str login: (optional), name of user whose stars you want to see
:param str sort: (optional), either 'created' (when the star was
created) or 'updated' (when the repository was last pushed to)
:param str direction: (optional), either 'asc' or 'desc'. Default:
'desc'
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
### Response:
def iter_starred(self, login=None, sort=None, direction=None, number=-1,
etag=None):
"""Iterate over repositories starred by ``login`` or the authenticated
user.
.. versionchanged:: 0.5
Added sort and direction parameters (optional) as per the change in
GitHub's API.
:param str login: (optional), name of user whose stars you want to see
:param str sort: (optional), either 'created' (when the star was
created) or 'updated' (when the repository was last pushed to)
:param str direction: (optional), either 'asc' or 'desc'. Default:
'desc'
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
"""
if login:
return self.user(login).iter_starred(sort, direction)
params = {'sort': sort, 'direction': direction}
self._remove_none(params)
url = self._build_url('user', 'starred')
return self._iter(int(number), url, Repository, params, etag) |
def set_consistent(self, consistent_config):
""" Indicates that the stream is the start of a consistent region.
Args:
consistent_config(consistent.ConsistentRegionConfig): the configuration of the consistent region.
Returns:
Stream: Returns this stream.
.. versionadded:: 1.11
"""
# add job control plane if needed
self.topology._add_job_control_plane()
self.oport.operator.consistent(consistent_config)
return self._make_placeable() | Indicates that the stream is the start of a consistent region.
Args:
consistent_config(consistent.ConsistentRegionConfig): the configuration of the consistent region.
Returns:
Stream: Returns this stream.
.. versionadded:: 1.11 | Below is the the instruction that describes the task:
### Input:
Indicates that the stream is the start of a consistent region.
Args:
consistent_config(consistent.ConsistentRegionConfig): the configuration of the consistent region.
Returns:
Stream: Returns this stream.
.. versionadded:: 1.11
### Response:
def set_consistent(self, consistent_config):
""" Indicates that the stream is the start of a consistent region.
Args:
consistent_config(consistent.ConsistentRegionConfig): the configuration of the consistent region.
Returns:
Stream: Returns this stream.
.. versionadded:: 1.11
"""
# add job control plane if needed
self.topology._add_job_control_plane()
self.oport.operator.consistent(consistent_config)
return self._make_placeable() |
def delete_variable(self, name):
"""Deletes a variable from a DataFrame."""
del self.variables[name]
self.signal_variable_changed.emit(self, name, "delete") | Deletes a variable from a DataFrame. | Below is the the instruction that describes the task:
### Input:
Deletes a variable from a DataFrame.
### Response:
def delete_variable(self, name):
"""Deletes a variable from a DataFrame."""
del self.variables[name]
self.signal_variable_changed.emit(self, name, "delete") |
def audit_1_1(self):
"""1.1 Avoid the use of the "root" account (Scored)"""
for row in self.credential_report:
if row["user"] == "<root_account>":
for field in "password_last_used", "access_key_1_last_used_date", "access_key_2_last_used_date":
if row[field] != "N/A" and self.parse_date(row[field]) > datetime.now(tzutc()) - timedelta(days=1):
raise Exception("Root account last used less than a day ago ({})".format(field)) | 1.1 Avoid the use of the "root" account (Scored) | Below is the the instruction that describes the task:
### Input:
1.1 Avoid the use of the "root" account (Scored)
### Response:
def audit_1_1(self):
"""1.1 Avoid the use of the "root" account (Scored)"""
for row in self.credential_report:
if row["user"] == "<root_account>":
for field in "password_last_used", "access_key_1_last_used_date", "access_key_2_last_used_date":
if row[field] != "N/A" and self.parse_date(row[field]) > datetime.now(tzutc()) - timedelta(days=1):
raise Exception("Root account last used less than a day ago ({})".format(field)) |
def add_lines(self, txt, indent=0):
"""Adds a list of lines.
The list can be indented with the optional argument 'indent'.
"""
for line in txt:
self.add_line(line, indent) | Adds a list of lines.
The list can be indented with the optional argument 'indent'. | Below is the the instruction that describes the task:
### Input:
Adds a list of lines.
The list can be indented with the optional argument 'indent'.
### Response:
def add_lines(self, txt, indent=0):
"""Adds a list of lines.
The list can be indented with the optional argument 'indent'.
"""
for line in txt:
self.add_line(line, indent) |
def generate_message_doc(message_descriptor, locations, path, name_prefix=''):
"""Generate docs for message and nested messages and enums.
Args:
message_descriptor: descriptor_pb2.DescriptorProto instance for message
to generate docs for.
locations: Dictionary of location paths tuples to
descriptor_pb2.SourceCodeInfo.Location instances.
path: Path tuple to the message definition.
name_prefix: Optional prefix for this message's name.
"""
# message_type is 4
prefixed_name = name_prefix + message_descriptor.name
print(make_subsection(prefixed_name))
location = locations[path]
if location.HasField('leading_comments'):
print(textwrap.dedent(location.leading_comments))
row_tuples = []
for field_index, field in enumerate(message_descriptor.field):
field_location = locations[path + (2, field_index)]
if field.type not in [11, 14]:
type_str = TYPE_TO_STR[field.type]
else:
type_str = make_link(field.type_name.lstrip('.'))
row_tuples.append((
make_code(field.name),
field.number,
type_str,
LABEL_TO_STR[field.label],
textwrap.fill(get_comment_from_location(field_location), INFINITY),
))
print_table(('Field', 'Number', 'Type', 'Label', 'Description'),
row_tuples)
# Generate nested messages
nested_types = enumerate(message_descriptor.nested_type)
for index, nested_message_desc in nested_types:
generate_message_doc(nested_message_desc, locations,
path + (3, index),
name_prefix=prefixed_name + '.')
# Generate nested enums
for index, nested_enum_desc in enumerate(message_descriptor.enum_type):
generate_enum_doc(nested_enum_desc, locations, path + (4, index),
name_prefix=prefixed_name + '.') | Generate docs for message and nested messages and enums.
Args:
message_descriptor: descriptor_pb2.DescriptorProto instance for message
to generate docs for.
locations: Dictionary of location paths tuples to
descriptor_pb2.SourceCodeInfo.Location instances.
path: Path tuple to the message definition.
name_prefix: Optional prefix for this message's name. | Below is the the instruction that describes the task:
### Input:
Generate docs for message and nested messages and enums.
Args:
message_descriptor: descriptor_pb2.DescriptorProto instance for message
to generate docs for.
locations: Dictionary of location paths tuples to
descriptor_pb2.SourceCodeInfo.Location instances.
path: Path tuple to the message definition.
name_prefix: Optional prefix for this message's name.
### Response:
def generate_message_doc(message_descriptor, locations, path, name_prefix=''):
"""Generate docs for message and nested messages and enums.
Args:
message_descriptor: descriptor_pb2.DescriptorProto instance for message
to generate docs for.
locations: Dictionary of location paths tuples to
descriptor_pb2.SourceCodeInfo.Location instances.
path: Path tuple to the message definition.
name_prefix: Optional prefix for this message's name.
"""
# message_type is 4
prefixed_name = name_prefix + message_descriptor.name
print(make_subsection(prefixed_name))
location = locations[path]
if location.HasField('leading_comments'):
print(textwrap.dedent(location.leading_comments))
row_tuples = []
for field_index, field in enumerate(message_descriptor.field):
field_location = locations[path + (2, field_index)]
if field.type not in [11, 14]:
type_str = TYPE_TO_STR[field.type]
else:
type_str = make_link(field.type_name.lstrip('.'))
row_tuples.append((
make_code(field.name),
field.number,
type_str,
LABEL_TO_STR[field.label],
textwrap.fill(get_comment_from_location(field_location), INFINITY),
))
print_table(('Field', 'Number', 'Type', 'Label', 'Description'),
row_tuples)
# Generate nested messages
nested_types = enumerate(message_descriptor.nested_type)
for index, nested_message_desc in nested_types:
generate_message_doc(nested_message_desc, locations,
path + (3, index),
name_prefix=prefixed_name + '.')
# Generate nested enums
for index, nested_enum_desc in enumerate(message_descriptor.enum_type):
generate_enum_doc(nested_enum_desc, locations, path + (4, index),
name_prefix=prefixed_name + '.') |
def is_access_granted(self, agreement_id, did, consumer_address):
"""
Check permission for the agreement.
Verify on-chain that the `consumer_address` has permission to access the given asset `did`
according to the `agreement_id`.
:param agreement_id: id of the agreement, hex str
:param did: DID, str
:param consumer_address: ethereum account address of consumer, hex str
:return: bool True if user has permission
"""
agreement_consumer = self._keeper.escrow_access_secretstore_template.get_agreement_consumer(
agreement_id)
if agreement_consumer != consumer_address:
logger.warning(f'Invalid consumer address {consumer_address} and/or '
f'service agreement id {agreement_id} (did {did})'
f', agreement consumer is {agreement_consumer}')
return False
document_id = did_to_id(did)
return self._keeper.access_secret_store_condition.check_permissions(
document_id, consumer_address
) | Check permission for the agreement.
Verify on-chain that the `consumer_address` has permission to access the given asset `did`
according to the `agreement_id`.
:param agreement_id: id of the agreement, hex str
:param did: DID, str
:param consumer_address: ethereum account address of consumer, hex str
:return: bool True if user has permission | Below is the the instruction that describes the task:
### Input:
Check permission for the agreement.
Verify on-chain that the `consumer_address` has permission to access the given asset `did`
according to the `agreement_id`.
:param agreement_id: id of the agreement, hex str
:param did: DID, str
:param consumer_address: ethereum account address of consumer, hex str
:return: bool True if user has permission
### Response:
def is_access_granted(self, agreement_id, did, consumer_address):
"""
Check permission for the agreement.
Verify on-chain that the `consumer_address` has permission to access the given asset `did`
according to the `agreement_id`.
:param agreement_id: id of the agreement, hex str
:param did: DID, str
:param consumer_address: ethereum account address of consumer, hex str
:return: bool True if user has permission
"""
agreement_consumer = self._keeper.escrow_access_secretstore_template.get_agreement_consumer(
agreement_id)
if agreement_consumer != consumer_address:
logger.warning(f'Invalid consumer address {consumer_address} and/or '
f'service agreement id {agreement_id} (did {did})'
f', agreement consumer is {agreement_consumer}')
return False
document_id = did_to_id(did)
return self._keeper.access_secret_store_condition.check_permissions(
document_id, consumer_address
) |
def as_sql(self):
"""Gets report as json
:return: json-formatted report
"""
labels, data = self._get_table()
table = SqlTable(labels, data, "{:.3f}", "\n")
return str(table) | Gets report as json
:return: json-formatted report | Below is the the instruction that describes the task:
### Input:
Gets report as json
:return: json-formatted report
### Response:
def as_sql(self):
"""Gets report as json
:return: json-formatted report
"""
labels, data = self._get_table()
table = SqlTable(labels, data, "{:.3f}", "\n")
return str(table) |
def discover_base_dir(start_dir):
'''Return start_dir or the parent dir that has the s2 marker.
Starting from the specified directory, and going up the parent
chain, check each directory to see if it's a base_dir (contains
the "marker" directory *s2*) and return it. Otherwise, return
the start_dir.
'''
if is_base_dir(start_dir):
return start_dir
pcl = start_dir.split('/') #path component list
found_base_dir = None
for i in range(1, len(pcl)+1):
d2c = '/'.join(pcl[:-i])
if (d2c == ''):
d2c = '/'
if is_base_dir(d2c):
found_base_dir = d2c
break
return found_base_dir | Return start_dir or the parent dir that has the s2 marker.
Starting from the specified directory, and going up the parent
chain, check each directory to see if it's a base_dir (contains
the "marker" directory *s2*) and return it. Otherwise, return
the start_dir. | Below is the the instruction that describes the task:
### Input:
Return start_dir or the parent dir that has the s2 marker.
Starting from the specified directory, and going up the parent
chain, check each directory to see if it's a base_dir (contains
the "marker" directory *s2*) and return it. Otherwise, return
the start_dir.
### Response:
def discover_base_dir(start_dir):
'''Return start_dir or the parent dir that has the s2 marker.
Starting from the specified directory, and going up the parent
chain, check each directory to see if it's a base_dir (contains
the "marker" directory *s2*) and return it. Otherwise, return
the start_dir.
'''
if is_base_dir(start_dir):
return start_dir
pcl = start_dir.split('/') #path component list
found_base_dir = None
for i in range(1, len(pcl)+1):
d2c = '/'.join(pcl[:-i])
if (d2c == ''):
d2c = '/'
if is_base_dir(d2c):
found_base_dir = d2c
break
return found_base_dir |
def login():
""" Enables the user to login to the remote GMQL service.
If both username and password are None, the user will be connected as guest.
"""
from .RemoteConnection.RemoteManager import RemoteManager
global __remote_manager, __session_manager
logger = logging.getLogger()
remote_address = get_remote_address()
res = __session_manager.get_session(remote_address)
if res is None:
# there is no session for this address, let's login as guest
warnings.warn("There is no active session for address {}. Logging as Guest user".format(remote_address))
rm = RemoteManager(address=remote_address)
rm.login()
session_type = "guest"
else:
# there is a previous session for this address, let's do an auto login
# using that access token
logger.info("Logging using stored authentication token")
rm = RemoteManager(address=remote_address, auth_token=res[1])
# if the access token is not valid anymore (therefore we are in guest mode)
# the auto_login function will perform a guest login from scratch
session_type = rm.auto_login(how=res[2])
# store the new session
__remote_manager = rm
access_time = int(time.time())
auth_token = rm.auth_token
__session_manager.add_session(remote_address, auth_token, access_time, session_type) | Enables the user to login to the remote GMQL service.
If both username and password are None, the user will be connected as guest. | Below is the the instruction that describes the task:
### Input:
Enables the user to login to the remote GMQL service.
If both username and password are None, the user will be connected as guest.
### Response:
def login():
""" Enables the user to login to the remote GMQL service.
If both username and password are None, the user will be connected as guest.
"""
from .RemoteConnection.RemoteManager import RemoteManager
global __remote_manager, __session_manager
logger = logging.getLogger()
remote_address = get_remote_address()
res = __session_manager.get_session(remote_address)
if res is None:
# there is no session for this address, let's login as guest
warnings.warn("There is no active session for address {}. Logging as Guest user".format(remote_address))
rm = RemoteManager(address=remote_address)
rm.login()
session_type = "guest"
else:
# there is a previous session for this address, let's do an auto login
# using that access token
logger.info("Logging using stored authentication token")
rm = RemoteManager(address=remote_address, auth_token=res[1])
# if the access token is not valid anymore (therefore we are in guest mode)
# the auto_login function will perform a guest login from scratch
session_type = rm.auto_login(how=res[2])
# store the new session
__remote_manager = rm
access_time = int(time.time())
auth_token = rm.auth_token
__session_manager.add_session(remote_address, auth_token, access_time, session_type) |
def _build_jss_object_list(self, response, obj_class):
"""Build a JSSListData object from response."""
response_objects = [item for item in response
if item is not None and
item.tag != "size"]
objects = [
JSSListData(obj_class, {i.tag: i.text for i in response_object},
self) for response_object in response_objects]
return JSSObjectList(self, obj_class, objects) | Build a JSSListData object from response. | Below is the the instruction that describes the task:
### Input:
Build a JSSListData object from response.
### Response:
def _build_jss_object_list(self, response, obj_class):
"""Build a JSSListData object from response."""
response_objects = [item for item in response
if item is not None and
item.tag != "size"]
objects = [
JSSListData(obj_class, {i.tag: i.text for i in response_object},
self) for response_object in response_objects]
return JSSObjectList(self, obj_class, objects) |
def detach_events(self, *events):
"""Detach one or more events from the bot instance"""
reg = self.registry
delete = defaultdict(list)
# remove from self.events
all_events = reg.events
for e in events:
regexp = getattr(e.regexp, 're', e.regexp)
iotype = e.iotype
if e in all_events[iotype].get(regexp, []):
all_events[iotype][regexp].remove(e)
if not all_events[iotype][regexp]:
del all_events[iotype][regexp]
# need to delete from self.events_re
delete[iotype].append(regexp)
# delete from events_re
for iotype, regexps in delete.items():
reg.events_re[iotype] = [r for r in reg.events_re[iotype]
if r[0] not in regexps] | Detach one or more events from the bot instance | Below is the the instruction that describes the task:
### Input:
Detach one or more events from the bot instance
### Response:
def detach_events(self, *events):
"""Detach one or more events from the bot instance"""
reg = self.registry
delete = defaultdict(list)
# remove from self.events
all_events = reg.events
for e in events:
regexp = getattr(e.regexp, 're', e.regexp)
iotype = e.iotype
if e in all_events[iotype].get(regexp, []):
all_events[iotype][regexp].remove(e)
if not all_events[iotype][regexp]:
del all_events[iotype][regexp]
# need to delete from self.events_re
delete[iotype].append(regexp)
# delete from events_re
for iotype, regexps in delete.items():
reg.events_re[iotype] = [r for r in reg.events_re[iotype]
if r[0] not in regexps] |
def _collect_potential_merges(dag, barriers):
"""
Returns a dict of DAGNode : Barrier objects, where the barrier needs to be
inserted where the corresponding DAGNode appears in the main DAG
"""
# if only got 1 or 0 barriers then can't merge
if len(barriers) < 2:
return None
# mapping from the node that will be the main barrier to the
# barrier object that gets built up
node_to_barrier_qubits = {}
# Start from the first barrier
current_barrier = barriers[0]
end_of_barrier = current_barrier
current_barrier_nodes = [current_barrier]
current_qubits = set(current_barrier.qargs)
current_ancestors = dag.ancestors(current_barrier)
current_descendants = dag.descendants(current_barrier)
barrier_to_add = Barrier(len(current_qubits))
for next_barrier in barriers[1:]:
# Remove all barriers that have already been included in this new barrier from the set
# of ancestors/descendants as they will be removed from the new DAG when it is created
next_ancestors = {nd for nd in dag.ancestors(next_barrier)
if nd not in current_barrier_nodes}
next_descendants = {nd for nd in dag.descendants(next_barrier)
if nd not in current_barrier_nodes}
next_qubits = set(next_barrier.qargs)
if (
not current_qubits.isdisjoint(next_qubits)
and current_ancestors.isdisjoint(next_descendants)
and current_descendants.isdisjoint(next_ancestors)
):
# can be merged
current_ancestors = current_ancestors | next_ancestors
current_descendants = current_descendants | next_descendants
current_qubits = current_qubits | next_qubits
# update the barrier that will be added back to include this barrier
barrier_to_add = Barrier(len(current_qubits))
else:
# store the previously made barrier
if barrier_to_add:
node_to_barrier_qubits[end_of_barrier] = current_qubits
# reset the properties
current_qubits = set(next_barrier.qargs)
current_ancestors = dag.ancestors(next_barrier)
current_descendants = dag.descendants(next_barrier)
barrier_to_add = Barrier(len(current_qubits))
current_barrier_nodes = []
end_of_barrier = next_barrier
current_barrier_nodes.append(end_of_barrier)
if barrier_to_add:
node_to_barrier_qubits[end_of_barrier] = current_qubits
return node_to_barrier_qubits | Returns a dict of DAGNode : Barrier objects, where the barrier needs to be
inserted where the corresponding DAGNode appears in the main DAG | Below is the the instruction that describes the task:
### Input:
Returns a dict of DAGNode : Barrier objects, where the barrier needs to be
inserted where the corresponding DAGNode appears in the main DAG
### Response:
def _collect_potential_merges(dag, barriers):
"""
Returns a dict of DAGNode : Barrier objects, where the barrier needs to be
inserted where the corresponding DAGNode appears in the main DAG
"""
# if only got 1 or 0 barriers then can't merge
if len(barriers) < 2:
return None
# mapping from the node that will be the main barrier to the
# barrier object that gets built up
node_to_barrier_qubits = {}
# Start from the first barrier
current_barrier = barriers[0]
end_of_barrier = current_barrier
current_barrier_nodes = [current_barrier]
current_qubits = set(current_barrier.qargs)
current_ancestors = dag.ancestors(current_barrier)
current_descendants = dag.descendants(current_barrier)
barrier_to_add = Barrier(len(current_qubits))
for next_barrier in barriers[1:]:
# Remove all barriers that have already been included in this new barrier from the set
# of ancestors/descendants as they will be removed from the new DAG when it is created
next_ancestors = {nd for nd in dag.ancestors(next_barrier)
if nd not in current_barrier_nodes}
next_descendants = {nd for nd in dag.descendants(next_barrier)
if nd not in current_barrier_nodes}
next_qubits = set(next_barrier.qargs)
if (
not current_qubits.isdisjoint(next_qubits)
and current_ancestors.isdisjoint(next_descendants)
and current_descendants.isdisjoint(next_ancestors)
):
# can be merged
current_ancestors = current_ancestors | next_ancestors
current_descendants = current_descendants | next_descendants
current_qubits = current_qubits | next_qubits
# update the barrier that will be added back to include this barrier
barrier_to_add = Barrier(len(current_qubits))
else:
# store the previously made barrier
if barrier_to_add:
node_to_barrier_qubits[end_of_barrier] = current_qubits
# reset the properties
current_qubits = set(next_barrier.qargs)
current_ancestors = dag.ancestors(next_barrier)
current_descendants = dag.descendants(next_barrier)
barrier_to_add = Barrier(len(current_qubits))
current_barrier_nodes = []
end_of_barrier = next_barrier
current_barrier_nodes.append(end_of_barrier)
if barrier_to_add:
node_to_barrier_qubits[end_of_barrier] = current_qubits
return node_to_barrier_qubits |
def get_scoped_variable_m(self, data_port_id):
"""Returns the scoped variable model for the given data port id
:param data_port_id: The data port id to search for
:return: The model of the scoped variable with the given id
"""
for scoped_variable_m in self.scoped_variables:
if scoped_variable_m.scoped_variable.data_port_id == data_port_id:
return scoped_variable_m
return None | Returns the scoped variable model for the given data port id
:param data_port_id: The data port id to search for
:return: The model of the scoped variable with the given id | Below is the the instruction that describes the task:
### Input:
Returns the scoped variable model for the given data port id
:param data_port_id: The data port id to search for
:return: The model of the scoped variable with the given id
### Response:
def get_scoped_variable_m(self, data_port_id):
"""Returns the scoped variable model for the given data port id
:param data_port_id: The data port id to search for
:return: The model of the scoped variable with the given id
"""
for scoped_variable_m in self.scoped_variables:
if scoped_variable_m.scoped_variable.data_port_id == data_port_id:
return scoped_variable_m
return None |
def ndhess(f, delta=DELTA):
"""
Returns numerical hessian function of given input function
Input: f, scalar function of an numpy array object
delta(optional), finite difference step
Output: hessian function object
"""
def hess_f(*args, **kwargs):
x = args[0]
hess_val = numpy.zeros(x.shape + x.shape)
it = numpy.nditer(x, op_flags=['readwrite'], flags=['multi_index'])
for xi in it:
i = it.multi_index
jt = numpy.nditer(x, op_flags=['readwrite'], flags=['multi_index'])
for xj in jt:
j = jt.multi_index
xi += delta/2
xj += delta/2
fpp = f(x)
xj -= delta
fpm = f(x)
xi -= delta
fmm = f(x)
xj += delta
fmp = f(x)
xi += delta/2
xj -= delta/2
hess_val[i + j] = (fpp + fmm - fpm - fmp)/delta**2
return hess_val
return hess_f | Returns numerical hessian function of given input function
Input: f, scalar function of an numpy array object
delta(optional), finite difference step
Output: hessian function object | Below is the the instruction that describes the task:
### Input:
Returns numerical hessian function of given input function
Input: f, scalar function of an numpy array object
delta(optional), finite difference step
Output: hessian function object
### Response:
def ndhess(f, delta=DELTA):
"""
Returns numerical hessian function of given input function
Input: f, scalar function of an numpy array object
delta(optional), finite difference step
Output: hessian function object
"""
def hess_f(*args, **kwargs):
x = args[0]
hess_val = numpy.zeros(x.shape + x.shape)
it = numpy.nditer(x, op_flags=['readwrite'], flags=['multi_index'])
for xi in it:
i = it.multi_index
jt = numpy.nditer(x, op_flags=['readwrite'], flags=['multi_index'])
for xj in jt:
j = jt.multi_index
xi += delta/2
xj += delta/2
fpp = f(x)
xj -= delta
fpm = f(x)
xi -= delta
fmm = f(x)
xj += delta
fmp = f(x)
xi += delta/2
xj -= delta/2
hess_val[i + j] = (fpp + fmm - fpm - fmp)/delta**2
return hess_val
return hess_f |
def parse_notifier_name(name):
"""Convert the name argument to a list of names.
Examples
--------
>>> parse_notifier_name('a')
['a']
>>> parse_notifier_name(['a','b'])
['a', 'b']
>>> parse_notifier_name(None)
['anytrait']
"""
if isinstance(name, str):
return [name]
elif name is None:
return ['anytrait']
elif isinstance(name, (list, tuple)):
for n in name:
assert isinstance(n, str), "names must be strings"
return name | Convert the name argument to a list of names.
Examples
--------
>>> parse_notifier_name('a')
['a']
>>> parse_notifier_name(['a','b'])
['a', 'b']
>>> parse_notifier_name(None)
['anytrait'] | Below is the the instruction that describes the task:
### Input:
Convert the name argument to a list of names.
Examples
--------
>>> parse_notifier_name('a')
['a']
>>> parse_notifier_name(['a','b'])
['a', 'b']
>>> parse_notifier_name(None)
['anytrait']
### Response:
def parse_notifier_name(name):
"""Convert the name argument to a list of names.
Examples
--------
>>> parse_notifier_name('a')
['a']
>>> parse_notifier_name(['a','b'])
['a', 'b']
>>> parse_notifier_name(None)
['anytrait']
"""
if isinstance(name, str):
return [name]
elif name is None:
return ['anytrait']
elif isinstance(name, (list, tuple)):
for n in name:
assert isinstance(n, str), "names must be strings"
return name |
def calculate_heading_longpath(locator1, locator2):
"""calculates the heading from the first to the second locator (long path)
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Long path heading in deg
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the long path heading from locator1 to locator2
>>> from pyhamtools.locator import calculate_heading_longpath
>>> calculate_heading_longpath("JN48QM", "QF67bf")
254.3136
"""
heading = calculate_heading(locator1, locator2)
lp = (heading + 180)%360
return lp | calculates the heading from the first to the second locator (long path)
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Long path heading in deg
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the long path heading from locator1 to locator2
>>> from pyhamtools.locator import calculate_heading_longpath
>>> calculate_heading_longpath("JN48QM", "QF67bf")
254.3136 | Below is the the instruction that describes the task:
### Input:
calculates the heading from the first to the second locator (long path)
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Long path heading in deg
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the long path heading from locator1 to locator2
>>> from pyhamtools.locator import calculate_heading_longpath
>>> calculate_heading_longpath("JN48QM", "QF67bf")
254.3136
### Response:
def calculate_heading_longpath(locator1, locator2):
"""calculates the heading from the first to the second locator (long path)
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Long path heading in deg
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the long path heading from locator1 to locator2
>>> from pyhamtools.locator import calculate_heading_longpath
>>> calculate_heading_longpath("JN48QM", "QF67bf")
254.3136
"""
heading = calculate_heading(locator1, locator2)
lp = (heading + 180)%360
return lp |
def value_attr(attr_name):
"""
Creates a getter that will retrieve value's attribute
with specified name.
@param attr_name: the name of an attribute belonging to the value.
@type attr_name: str
"""
def value_attr(value, context, **_params):
value = getattr(value, attr_name)
return _attr(value)
return value_attr | Creates a getter that will retrieve value's attribute
with specified name.
@param attr_name: the name of an attribute belonging to the value.
@type attr_name: str | Below is the the instruction that describes the task:
### Input:
Creates a getter that will retrieve value's attribute
with specified name.
@param attr_name: the name of an attribute belonging to the value.
@type attr_name: str
### Response:
def value_attr(attr_name):
"""
Creates a getter that will retrieve value's attribute
with specified name.
@param attr_name: the name of an attribute belonging to the value.
@type attr_name: str
"""
def value_attr(value, context, **_params):
value = getattr(value, attr_name)
return _attr(value)
return value_attr |
def uniqualize(l,**kwargs):
'''
from elist.elist import *
l = [1, 2, 2]
new = uniqualize(l)
new
id(l)
id(new)
####
l = [1, 2, 2]
rslt = uniqualize(l,mode="original")
rslt
id(l)
id(rslt)
'''
if('mode' in kwargs):
mode = kwargs['mode']
else:
mode = 'new'
pt = copy.deepcopy(l)
seqs =[]
freq = {}
for i in range(0,pt.__len__()):
v = pt[i]
if(v in freq):
freq[v] = freq[v] + 1
else:
freq[v] = 0
seqs.append(i)
#####下面是影响速度的关键,append特别耗时
npt = select_seqs(pt,seqs)
########################
pt = npt
if(mode == 'new'):
return(npt)
else:
l.clear()
l.extend(npt)
return(l) | from elist.elist import *
l = [1, 2, 2]
new = uniqualize(l)
new
id(l)
id(new)
####
l = [1, 2, 2]
rslt = uniqualize(l,mode="original")
rslt
id(l)
id(rslt) | Below is the the instruction that describes the task:
### Input:
from elist.elist import *
l = [1, 2, 2]
new = uniqualize(l)
new
id(l)
id(new)
####
l = [1, 2, 2]
rslt = uniqualize(l,mode="original")
rslt
id(l)
id(rslt)
### Response:
def uniqualize(l,**kwargs):
'''
from elist.elist import *
l = [1, 2, 2]
new = uniqualize(l)
new
id(l)
id(new)
####
l = [1, 2, 2]
rslt = uniqualize(l,mode="original")
rslt
id(l)
id(rslt)
'''
if('mode' in kwargs):
mode = kwargs['mode']
else:
mode = 'new'
pt = copy.deepcopy(l)
seqs =[]
freq = {}
for i in range(0,pt.__len__()):
v = pt[i]
if(v in freq):
freq[v] = freq[v] + 1
else:
freq[v] = 0
seqs.append(i)
#####下面是影响速度的关键,append特别耗时
npt = select_seqs(pt,seqs)
########################
pt = npt
if(mode == 'new'):
return(npt)
else:
l.clear()
l.extend(npt)
return(l) |
def list_build_records(page_size=200, page_index=0, sort="", q=""):
"""
List all BuildRecords
"""
data = list_build_records_raw(page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) | List all BuildRecords | Below is the the instruction that describes the task:
### Input:
List all BuildRecords
### Response:
def list_build_records(page_size=200, page_index=0, sort="", q=""):
"""
List all BuildRecords
"""
data = list_build_records_raw(page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) |
def setup(app):
""" Initializer for Sphinx extension API.
See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions.
"""
lexer = MarkdownLexer()
for alias in lexer.aliases:
app.add_lexer(alias, lexer)
return dict(version=__version__) | Initializer for Sphinx extension API.
See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions. | Below is the the instruction that describes the task:
### Input:
Initializer for Sphinx extension API.
See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions.
### Response:
def setup(app):
""" Initializer for Sphinx extension API.
See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions.
"""
lexer = MarkdownLexer()
for alias in lexer.aliases:
app.add_lexer(alias, lexer)
return dict(version=__version__) |
def generate_k(order, secexp, hash_func, data):
'''
order - order of the DSA generator used in the signature
secexp - secure exponent (private key) in numeric form
hash_func - reference to the same hash function used for generating hash
data - hash in binary form of the signing data
'''
qlen = bit_length(order)
holen = hash_func().digest_size
rolen = (qlen + 7) / 8
bx = number_to_string(secexp, order) + bits2octets(data, order)
# Step B
v = b('\x01') * holen
# Step C
k = b('\x00') * holen
# Step D
k = hmac.new(k, v+b('\x00')+bx, hash_func).digest()
# Step E
v = hmac.new(k, v, hash_func).digest()
# Step F
k = hmac.new(k, v+b('\x01')+bx, hash_func).digest()
# Step G
v = hmac.new(k, v, hash_func).digest()
# Step H
while True:
# Step H1
t = b('')
# Step H2
while len(t) < rolen:
v = hmac.new(k, v, hash_func).digest()
t += v
# Step H3
secret = bits2int(t, qlen)
if secret >= 1 and secret < order:
return secret
k = hmac.new(k, v+b('\x00'), hash_func).digest()
v = hmac.new(k, v, hash_func).digest() | order - order of the DSA generator used in the signature
secexp - secure exponent (private key) in numeric form
hash_func - reference to the same hash function used for generating hash
data - hash in binary form of the signing data | Below is the the instruction that describes the task:
### Input:
order - order of the DSA generator used in the signature
secexp - secure exponent (private key) in numeric form
hash_func - reference to the same hash function used for generating hash
data - hash in binary form of the signing data
### Response:
def generate_k(order, secexp, hash_func, data):
'''
order - order of the DSA generator used in the signature
secexp - secure exponent (private key) in numeric form
hash_func - reference to the same hash function used for generating hash
data - hash in binary form of the signing data
'''
qlen = bit_length(order)
holen = hash_func().digest_size
rolen = (qlen + 7) / 8
bx = number_to_string(secexp, order) + bits2octets(data, order)
# Step B
v = b('\x01') * holen
# Step C
k = b('\x00') * holen
# Step D
k = hmac.new(k, v+b('\x00')+bx, hash_func).digest()
# Step E
v = hmac.new(k, v, hash_func).digest()
# Step F
k = hmac.new(k, v+b('\x01')+bx, hash_func).digest()
# Step G
v = hmac.new(k, v, hash_func).digest()
# Step H
while True:
# Step H1
t = b('')
# Step H2
while len(t) < rolen:
v = hmac.new(k, v, hash_func).digest()
t += v
# Step H3
secret = bits2int(t, qlen)
if secret >= 1 and secret < order:
return secret
k = hmac.new(k, v+b('\x00'), hash_func).digest()
v = hmac.new(k, v, hash_func).digest() |
def as_dict(self, join='.'):
"""
Returns the error as a path to message dictionary. Paths are joined
with the ``join`` string.
"""
if self.path:
path = [str(node) for node in self.path]
else:
path = ''
return { join.join(path): self.message } | Returns the error as a path to message dictionary. Paths are joined
with the ``join`` string. | Below is the the instruction that describes the task:
### Input:
Returns the error as a path to message dictionary. Paths are joined
with the ``join`` string.
### Response:
def as_dict(self, join='.'):
"""
Returns the error as a path to message dictionary. Paths are joined
with the ``join`` string.
"""
if self.path:
path = [str(node) for node in self.path]
else:
path = ''
return { join.join(path): self.message } |
def add_row(self, data: list=None):
"""
Add a row of data to the current widget, add a <Tab> \
binding to the last element of the last row, and set \
the focus at the beginning of the next row.
:param data: a row of data
:return: None
"""
# validation
if self.headers and data:
if len(self.headers) != len(data):
raise ValueError
offset = 0 if not self.headers else 1
row = list()
if data:
for i, element in enumerate(data):
contents = '' if element is None else str(element)
entry = ttk.Entry(self)
entry.insert(0, contents)
entry.grid(row=len(self._rows) + offset,
column=i,
sticky='E,W')
row.append(entry)
else:
for i in range(self.num_of_columns):
entry = ttk.Entry(self)
entry.grid(row=len(self._rows) + offset,
column=i,
sticky='E,W')
row.append(entry)
self._rows.append(row)
# clear all bindings
for row in self._rows:
for widget in row:
widget.unbind('<Tab>')
def add(e):
self.add_row()
last_entry = self._rows[-1][-1]
last_entry.bind('<Tab>', add)
e = self._rows[-1][0]
e.focus_set()
self._redraw() | Add a row of data to the current widget, add a <Tab> \
binding to the last element of the last row, and set \
the focus at the beginning of the next row.
:param data: a row of data
:return: None | Below is the the instruction that describes the task:
### Input:
Add a row of data to the current widget, add a <Tab> \
binding to the last element of the last row, and set \
the focus at the beginning of the next row.
:param data: a row of data
:return: None
### Response:
def add_row(self, data: list=None):
"""
Add a row of data to the current widget, add a <Tab> \
binding to the last element of the last row, and set \
the focus at the beginning of the next row.
:param data: a row of data
:return: None
"""
# validation
if self.headers and data:
if len(self.headers) != len(data):
raise ValueError
offset = 0 if not self.headers else 1
row = list()
if data:
for i, element in enumerate(data):
contents = '' if element is None else str(element)
entry = ttk.Entry(self)
entry.insert(0, contents)
entry.grid(row=len(self._rows) + offset,
column=i,
sticky='E,W')
row.append(entry)
else:
for i in range(self.num_of_columns):
entry = ttk.Entry(self)
entry.grid(row=len(self._rows) + offset,
column=i,
sticky='E,W')
row.append(entry)
self._rows.append(row)
# clear all bindings
for row in self._rows:
for widget in row:
widget.unbind('<Tab>')
def add(e):
self.add_row()
last_entry = self._rows[-1][-1]
last_entry.bind('<Tab>', add)
e = self._rows[-1][0]
e.focus_set()
self._redraw() |
def visit_For(self, node):
"""
Create For representation for Cxx generation.
Examples
--------
>> for i in xrange(10):
>> ... work ...
Becomes
>> typename returnable<decltype(__builtin__.xrange(10))>::type __iterX
= __builtin__.xrange(10);
>> ... possible container size reservation ...
>> for (auto&& i: __iterX)
>> ... the work ...
This function also handle assignment for local variables.
We can notice that three kind of loop are possible:
- Normal for loop on iterator
- Autofor loop.
- Normal for loop using integer variable iteration
Kind of loop used depend on OpenMP, yield use and variable scope.
"""
if not isinstance(node.target, ast.Name):
raise PythranSyntaxError(
"Using something other than an identifier as loop target",
node.target)
target = self.visit(node.target)
# Handle the body of the for loop
loop_body = Block([self.visit(stmt) for stmt in node.body])
# Declare local variables at the top of the loop body
loop_body = self.process_locals(node, loop_body, node.target.id)
iterable = self.visit(node.iter)
if self.can_use_c_for(node):
header, loop = self.gen_c_for(node, target, loop_body)
else:
if self.can_use_autofor(node):
header = []
self.ldecls.remove(node.target.id)
autofor = AutoFor(target, iterable, loop_body)
loop = [self.process_omp_attachements(node, autofor)]
else:
# Iterator declaration
local_iter = "__iter{0}".format(id(node))
local_iter_decl = self.types.builder.Assignable(
self.types[node.iter])
self.handle_omp_for(node, local_iter)
# Assign iterable
# For C loop, it avoids issues
# if the upper bound is assigned in the loop
asgnt = self.make_assign(local_iter_decl, local_iter, iterable)
header = [Statement(asgnt)]
loop = self.gen_for(node, target, local_iter, local_iter_decl,
loop_body)
# For xxxComprehension, it is replaced by a for loop. In this case,
# pre-allocate size of container.
for comp in metadata.get(node, metadata.Comprehension):
header.append(Statement("pythonic::utils::reserve({0},{1})".format(
comp.target,
iterable)))
return Block(header + loop) | Create For representation for Cxx generation.
Examples
--------
>> for i in xrange(10):
>> ... work ...
Becomes
>> typename returnable<decltype(__builtin__.xrange(10))>::type __iterX
= __builtin__.xrange(10);
>> ... possible container size reservation ...
>> for (auto&& i: __iterX)
>> ... the work ...
This function also handle assignment for local variables.
We can notice that three kind of loop are possible:
- Normal for loop on iterator
- Autofor loop.
- Normal for loop using integer variable iteration
Kind of loop used depend on OpenMP, yield use and variable scope. | Below is the the instruction that describes the task:
### Input:
Create For representation for Cxx generation.
Examples
--------
>> for i in xrange(10):
>> ... work ...
Becomes
>> typename returnable<decltype(__builtin__.xrange(10))>::type __iterX
= __builtin__.xrange(10);
>> ... possible container size reservation ...
>> for (auto&& i: __iterX)
>> ... the work ...
This function also handle assignment for local variables.
We can notice that three kind of loop are possible:
- Normal for loop on iterator
- Autofor loop.
- Normal for loop using integer variable iteration
Kind of loop used depend on OpenMP, yield use and variable scope.
### Response:
def visit_For(self, node):
"""
Create For representation for Cxx generation.
Examples
--------
>> for i in xrange(10):
>> ... work ...
Becomes
>> typename returnable<decltype(__builtin__.xrange(10))>::type __iterX
= __builtin__.xrange(10);
>> ... possible container size reservation ...
>> for (auto&& i: __iterX)
>> ... the work ...
This function also handle assignment for local variables.
We can notice that three kind of loop are possible:
- Normal for loop on iterator
- Autofor loop.
- Normal for loop using integer variable iteration
Kind of loop used depend on OpenMP, yield use and variable scope.
"""
if not isinstance(node.target, ast.Name):
raise PythranSyntaxError(
"Using something other than an identifier as loop target",
node.target)
target = self.visit(node.target)
# Handle the body of the for loop
loop_body = Block([self.visit(stmt) for stmt in node.body])
# Declare local variables at the top of the loop body
loop_body = self.process_locals(node, loop_body, node.target.id)
iterable = self.visit(node.iter)
if self.can_use_c_for(node):
header, loop = self.gen_c_for(node, target, loop_body)
else:
if self.can_use_autofor(node):
header = []
self.ldecls.remove(node.target.id)
autofor = AutoFor(target, iterable, loop_body)
loop = [self.process_omp_attachements(node, autofor)]
else:
# Iterator declaration
local_iter = "__iter{0}".format(id(node))
local_iter_decl = self.types.builder.Assignable(
self.types[node.iter])
self.handle_omp_for(node, local_iter)
# Assign iterable
# For C loop, it avoids issues
# if the upper bound is assigned in the loop
asgnt = self.make_assign(local_iter_decl, local_iter, iterable)
header = [Statement(asgnt)]
loop = self.gen_for(node, target, local_iter, local_iter_decl,
loop_body)
# For xxxComprehension, it is replaced by a for loop. In this case,
# pre-allocate size of container.
for comp in metadata.get(node, metadata.Comprehension):
header.append(Statement("pythonic::utils::reserve({0},{1})".format(
comp.target,
iterable)))
return Block(header + loop) |
def result(self):
"""Formats the result."""
self.__result.sort(cmp = self.__cmp, key = self.__key, reverse = self.__reverse)
return self.__result | Formats the result. | Below is the the instruction that describes the task:
### Input:
Formats the result.
### Response:
def result(self):
"""Formats the result."""
self.__result.sort(cmp = self.__cmp, key = self.__key, reverse = self.__reverse)
return self.__result |
def communicate(self, input=None, timeout=-1):
"""Communicate with the child and return its output.
If *input* is provided, it is sent to the client. Concurrent with
sending the input, the child's standard output and standard error are
read, until the child exits.
The return value is a tuple ``(stdout_data, stderr_data)`` containing
the data read from standard output and standard error.
"""
if self._process is None:
raise RuntimeError('no child process')
if timeout == -1:
timeout = self._timeout
output = [[], []]
def writer(stream, data):
offset = 0
while offset < len(data):
buf = data[offset:offset+4096]
stream.write(buf)
offset += len(buf)
stream.close()
def reader(stream, data):
while True:
if self._encoding:
buf = stream.read(4096)
else:
buf = stream.read1()
if not buf:
break
data.append(buf)
if self.stdin:
fibers.spawn(writer, self.stdin, input or b'')
if self.stdout:
fibers.spawn(reader, self.stdout, output[0])
if self.stderr:
fibers.spawn(reader, self.stderr, output[1])
self.wait(timeout)
empty = '' if self._encoding else b''
stdout_data = empty.join(output[0])
stderr_data = empty.join(output[1])
return (stdout_data, stderr_data) | Communicate with the child and return its output.
If *input* is provided, it is sent to the client. Concurrent with
sending the input, the child's standard output and standard error are
read, until the child exits.
The return value is a tuple ``(stdout_data, stderr_data)`` containing
the data read from standard output and standard error. | Below is the the instruction that describes the task:
### Input:
Communicate with the child and return its output.
If *input* is provided, it is sent to the client. Concurrent with
sending the input, the child's standard output and standard error are
read, until the child exits.
The return value is a tuple ``(stdout_data, stderr_data)`` containing
the data read from standard output and standard error.
### Response:
def communicate(self, input=None, timeout=-1):
"""Communicate with the child and return its output.
If *input* is provided, it is sent to the client. Concurrent with
sending the input, the child's standard output and standard error are
read, until the child exits.
The return value is a tuple ``(stdout_data, stderr_data)`` containing
the data read from standard output and standard error.
"""
if self._process is None:
raise RuntimeError('no child process')
if timeout == -1:
timeout = self._timeout
output = [[], []]
def writer(stream, data):
offset = 0
while offset < len(data):
buf = data[offset:offset+4096]
stream.write(buf)
offset += len(buf)
stream.close()
def reader(stream, data):
while True:
if self._encoding:
buf = stream.read(4096)
else:
buf = stream.read1()
if not buf:
break
data.append(buf)
if self.stdin:
fibers.spawn(writer, self.stdin, input or b'')
if self.stdout:
fibers.spawn(reader, self.stdout, output[0])
if self.stderr:
fibers.spawn(reader, self.stderr, output[1])
self.wait(timeout)
empty = '' if self._encoding else b''
stdout_data = empty.join(output[0])
stderr_data = empty.join(output[1])
return (stdout_data, stderr_data) |
def evaluate_cartesian(self, s, t, _verify=True):
r"""Compute a point on the surface.
Evaluates :math:`B\left(1 - s - t, s, t\right)` by calling
:meth:`evaluate_barycentric`:
This method acts as a (partial) inverse to :meth:`locate`.
.. testsetup:: surface-cartesian
import numpy as np
import bezier
.. doctest:: surface-cartesian
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 0.5, 1.0 , 0.0, 0.5, 0.25],
... [0.0, 0.5, 0.625, 0.5, 0.5, 1.0 ],
... ])
>>> surface = bezier.Surface(nodes, degree=2)
>>> point = surface.evaluate_cartesian(0.125, 0.375)
>>> point
array([[0.16015625],
[0.44726562]])
>>> surface.evaluate_barycentric(0.5, 0.125, 0.375)
array([[0.16015625],
[0.44726562]])
Args:
s (float): Parameter along the reference triangle.
t (float): Parameter along the reference triangle.
_verify (Optional[bool]): Indicates if the coordinates should be
verified inside of the reference triangle. Defaults to
:data:`True`.
Returns:
numpy.ndarray: The point on the surface (as a two dimensional
NumPy array).
"""
if _verify:
self._verify_cartesian(s, t)
return _surface_helpers.evaluate_barycentric(
self._nodes, self._degree, 1.0 - s - t, s, t
) | r"""Compute a point on the surface.
Evaluates :math:`B\left(1 - s - t, s, t\right)` by calling
:meth:`evaluate_barycentric`:
This method acts as a (partial) inverse to :meth:`locate`.
.. testsetup:: surface-cartesian
import numpy as np
import bezier
.. doctest:: surface-cartesian
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 0.5, 1.0 , 0.0, 0.5, 0.25],
... [0.0, 0.5, 0.625, 0.5, 0.5, 1.0 ],
... ])
>>> surface = bezier.Surface(nodes, degree=2)
>>> point = surface.evaluate_cartesian(0.125, 0.375)
>>> point
array([[0.16015625],
[0.44726562]])
>>> surface.evaluate_barycentric(0.5, 0.125, 0.375)
array([[0.16015625],
[0.44726562]])
Args:
s (float): Parameter along the reference triangle.
t (float): Parameter along the reference triangle.
_verify (Optional[bool]): Indicates if the coordinates should be
verified inside of the reference triangle. Defaults to
:data:`True`.
Returns:
numpy.ndarray: The point on the surface (as a two dimensional
NumPy array). | Below is the the instruction that describes the task:
### Input:
r"""Compute a point on the surface.
Evaluates :math:`B\left(1 - s - t, s, t\right)` by calling
:meth:`evaluate_barycentric`:
This method acts as a (partial) inverse to :meth:`locate`.
.. testsetup:: surface-cartesian
import numpy as np
import bezier
.. doctest:: surface-cartesian
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 0.5, 1.0 , 0.0, 0.5, 0.25],
... [0.0, 0.5, 0.625, 0.5, 0.5, 1.0 ],
... ])
>>> surface = bezier.Surface(nodes, degree=2)
>>> point = surface.evaluate_cartesian(0.125, 0.375)
>>> point
array([[0.16015625],
[0.44726562]])
>>> surface.evaluate_barycentric(0.5, 0.125, 0.375)
array([[0.16015625],
[0.44726562]])
Args:
s (float): Parameter along the reference triangle.
t (float): Parameter along the reference triangle.
_verify (Optional[bool]): Indicates if the coordinates should be
verified inside of the reference triangle. Defaults to
:data:`True`.
Returns:
numpy.ndarray: The point on the surface (as a two dimensional
NumPy array).
### Response:
def evaluate_cartesian(self, s, t, _verify=True):
r"""Compute a point on the surface.
Evaluates :math:`B\left(1 - s - t, s, t\right)` by calling
:meth:`evaluate_barycentric`:
This method acts as a (partial) inverse to :meth:`locate`.
.. testsetup:: surface-cartesian
import numpy as np
import bezier
.. doctest:: surface-cartesian
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 0.5, 1.0 , 0.0, 0.5, 0.25],
... [0.0, 0.5, 0.625, 0.5, 0.5, 1.0 ],
... ])
>>> surface = bezier.Surface(nodes, degree=2)
>>> point = surface.evaluate_cartesian(0.125, 0.375)
>>> point
array([[0.16015625],
[0.44726562]])
>>> surface.evaluate_barycentric(0.5, 0.125, 0.375)
array([[0.16015625],
[0.44726562]])
Args:
s (float): Parameter along the reference triangle.
t (float): Parameter along the reference triangle.
_verify (Optional[bool]): Indicates if the coordinates should be
verified inside of the reference triangle. Defaults to
:data:`True`.
Returns:
numpy.ndarray: The point on the surface (as a two dimensional
NumPy array).
"""
if _verify:
self._verify_cartesian(s, t)
return _surface_helpers.evaluate_barycentric(
self._nodes, self._degree, 1.0 - s - t, s, t
) |
def fetch(self):
"""
Fetch & return a new `SSHKey` object representing the SSH key's current
state
:rtype: SSHKey
:raises DOAPIError: if the API endpoint replies with an error (e.g., if
the SSH key no longer exists)
"""
api = self.doapi_manager
return api._ssh_key(api.request(self.url)["ssh_key"]) | Fetch & return a new `SSHKey` object representing the SSH key's current
state
:rtype: SSHKey
:raises DOAPIError: if the API endpoint replies with an error (e.g., if
the SSH key no longer exists) | Below is the the instruction that describes the task:
### Input:
Fetch & return a new `SSHKey` object representing the SSH key's current
state
:rtype: SSHKey
:raises DOAPIError: if the API endpoint replies with an error (e.g., if
the SSH key no longer exists)
### Response:
def fetch(self):
"""
Fetch & return a new `SSHKey` object representing the SSH key's current
state
:rtype: SSHKey
:raises DOAPIError: if the API endpoint replies with an error (e.g., if
the SSH key no longer exists)
"""
api = self.doapi_manager
return api._ssh_key(api.request(self.url)["ssh_key"]) |
def jaccard(seq1, seq2):
"""Compute the Jaccard distance between the two sequences `seq1` and `seq2`.
They should contain hashable items.
The return value is a float between 0 and 1, where 0 means equal, and 1 totally different.
"""
set1, set2 = set(seq1), set(seq2)
return 1 - len(set1 & set2) / float(len(set1 | set2)) | Compute the Jaccard distance between the two sequences `seq1` and `seq2`.
They should contain hashable items.
The return value is a float between 0 and 1, where 0 means equal, and 1 totally different. | Below is the the instruction that describes the task:
### Input:
Compute the Jaccard distance between the two sequences `seq1` and `seq2`.
They should contain hashable items.
The return value is a float between 0 and 1, where 0 means equal, and 1 totally different.
### Response:
def jaccard(seq1, seq2):
"""Compute the Jaccard distance between the two sequences `seq1` and `seq2`.
They should contain hashable items.
The return value is a float between 0 and 1, where 0 means equal, and 1 totally different.
"""
set1, set2 = set(seq1), set(seq2)
return 1 - len(set1 & set2) / float(len(set1 | set2)) |
def has_tsm(self):
"""True if this particular server have a TSM based service processor
"""
if (self.oemid['manufacturer_id'] == 19046 and
self.oemid['device_id'] == 32):
try:
self.ipmicmd.xraw_command(netfn=0x3a, command=0xf)
except pygexc.IpmiException as ie:
if ie.ipmicode == 193:
return False
raise
return True
return False | True if this particular server have a TSM based service processor | Below is the the instruction that describes the task:
### Input:
True if this particular server have a TSM based service processor
### Response:
def has_tsm(self):
"""True if this particular server have a TSM based service processor
"""
if (self.oemid['manufacturer_id'] == 19046 and
self.oemid['device_id'] == 32):
try:
self.ipmicmd.xraw_command(netfn=0x3a, command=0xf)
except pygexc.IpmiException as ie:
if ie.ipmicode == 193:
return False
raise
return True
return False |
def summary(processors, metrics, context):
"""Print the summary"""
# display aggregated metric values on language level
def display_header(processors, before='', after=''):
"""Display the header for the summary results."""
print(before, end=' ')
for processor in processors:
processor.display_header()
print(after)
def display_separator(processors, before='', after=''):
"""Display the header for the summary results."""
print(before, end=' ')
for processor in processors:
processor.display_separator()
print(after)
def display_metrics(processors, before='', after='', metrics=[]):
"""Display the header for the summary results."""
print(before, end=' ')
for processor in processors:
processor.display_metrics(metrics)
print(after)
summary = {}
for m in metrics:
lang = metrics[m]['language']
has_key = lang in summary
if not has_key:
summary[lang] = {'file_count': 0, 'language': lang}
summary[lang]['file_count'] += 1
for i in metrics[m]:
if i not in ['sloc', 'comments', 'mccabe']: # include metrics to be used
continue
if not has_key:
summary[lang][i] = 0
summary[lang][i] += metrics[m][i]
total = {'language': 'Total'}
for m in summary:
for i in summary[m]:
if i == 'language':
continue
if i not in total:
total[i] = 0
total[i] += summary[m][i]
print('Metrics Summary:')
display_header(processors, 'Files', '')
display_separator(processors, '-'*5, '')
for k in sorted(summary.keys(), key=str.lower):
display_metrics(processors, '%5d' %
summary[k]['file_count'], '', summary[k])
display_separator(processors, '-'*5, '')
display_metrics(processors, '%5d' % total['file_count'],
'', total) | Print the summary | Below is the the instruction that describes the task:
### Input:
Print the summary
### Response:
def summary(processors, metrics, context):
"""Print the summary"""
# display aggregated metric values on language level
def display_header(processors, before='', after=''):
"""Display the header for the summary results."""
print(before, end=' ')
for processor in processors:
processor.display_header()
print(after)
def display_separator(processors, before='', after=''):
"""Display the header for the summary results."""
print(before, end=' ')
for processor in processors:
processor.display_separator()
print(after)
def display_metrics(processors, before='', after='', metrics=[]):
"""Display the header for the summary results."""
print(before, end=' ')
for processor in processors:
processor.display_metrics(metrics)
print(after)
summary = {}
for m in metrics:
lang = metrics[m]['language']
has_key = lang in summary
if not has_key:
summary[lang] = {'file_count': 0, 'language': lang}
summary[lang]['file_count'] += 1
for i in metrics[m]:
if i not in ['sloc', 'comments', 'mccabe']: # include metrics to be used
continue
if not has_key:
summary[lang][i] = 0
summary[lang][i] += metrics[m][i]
total = {'language': 'Total'}
for m in summary:
for i in summary[m]:
if i == 'language':
continue
if i not in total:
total[i] = 0
total[i] += summary[m][i]
print('Metrics Summary:')
display_header(processors, 'Files', '')
display_separator(processors, '-'*5, '')
for k in sorted(summary.keys(), key=str.lower):
display_metrics(processors, '%5d' %
summary[k]['file_count'], '', summary[k])
display_separator(processors, '-'*5, '')
display_metrics(processors, '%5d' % total['file_count'],
'', total) |
def energies(self, samples_like, dtype=np.float):
"""Determine the energies of the given samples.
Args:
samples_like (samples_like):
A collection of raw samples. `samples_like` is an extension of NumPy's array_like
structure. See :func:`.as_samples`.
dtype (:class:`numpy.dtype`):
The data type of the returned energies.
Returns:
:obj:`numpy.ndarray`: The energies.
"""
samples, labels = as_samples(samples_like)
if all(v == idx for idx, v in enumerate(labels)):
ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(dtype=dtype)
else:
ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(variable_order=labels, dtype=dtype)
energies = samples.dot(ldata) + (samples[:, irow]*samples[:, icol]).dot(qdata) + offset
return np.asarray(energies, dtype=dtype) | Determine the energies of the given samples.
Args:
samples_like (samples_like):
A collection of raw samples. `samples_like` is an extension of NumPy's array_like
structure. See :func:`.as_samples`.
dtype (:class:`numpy.dtype`):
The data type of the returned energies.
Returns:
:obj:`numpy.ndarray`: The energies. | Below is the the instruction that describes the task:
### Input:
Determine the energies of the given samples.
Args:
samples_like (samples_like):
A collection of raw samples. `samples_like` is an extension of NumPy's array_like
structure. See :func:`.as_samples`.
dtype (:class:`numpy.dtype`):
The data type of the returned energies.
Returns:
:obj:`numpy.ndarray`: The energies.
### Response:
def energies(self, samples_like, dtype=np.float):
"""Determine the energies of the given samples.
Args:
samples_like (samples_like):
A collection of raw samples. `samples_like` is an extension of NumPy's array_like
structure. See :func:`.as_samples`.
dtype (:class:`numpy.dtype`):
The data type of the returned energies.
Returns:
:obj:`numpy.ndarray`: The energies.
"""
samples, labels = as_samples(samples_like)
if all(v == idx for idx, v in enumerate(labels)):
ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(dtype=dtype)
else:
ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(variable_order=labels, dtype=dtype)
energies = samples.dot(ldata) + (samples[:, irow]*samples[:, icol]).dot(qdata) + offset
return np.asarray(energies, dtype=dtype) |
def get_pubmed_for_beleditor(pmid: str) -> Mapping[str, Any]:
"""Get fully annotated pubmed doc with Pubtator and full entity/annotation_types
Args:
pmid: Pubmed PMID
Returns:
Mapping[str, Any]: pubmed dictionary
"""
pubmed = get_pubmed(pmid)
pubtator = get_pubtator(pmid)
pubmed["annotations"] = copy.deepcopy(pubtator["annotations"])
# Add entity types and annotation types to annotations
pubmed = enhance_pubmed_annotations(pubmed)
return pubmed | Get fully annotated pubmed doc with Pubtator and full entity/annotation_types
Args:
pmid: Pubmed PMID
Returns:
Mapping[str, Any]: pubmed dictionary | Below is the the instruction that describes the task:
### Input:
Get fully annotated pubmed doc with Pubtator and full entity/annotation_types
Args:
pmid: Pubmed PMID
Returns:
Mapping[str, Any]: pubmed dictionary
### Response:
def get_pubmed_for_beleditor(pmid: str) -> Mapping[str, Any]:
"""Get fully annotated pubmed doc with Pubtator and full entity/annotation_types
Args:
pmid: Pubmed PMID
Returns:
Mapping[str, Any]: pubmed dictionary
"""
pubmed = get_pubmed(pmid)
pubtator = get_pubtator(pmid)
pubmed["annotations"] = copy.deepcopy(pubtator["annotations"])
# Add entity types and annotation types to annotations
pubmed = enhance_pubmed_annotations(pubmed)
return pubmed |
def newLayer(self, effect=''):
"""
Creates a new :py:class:`Layer` and set that as the active.
:param effect: A string with the blend mode for that layer that will be used when during the rendering process. The accepted values are: :code:`'soft_light','lighten','screen','dodge','addition','darken','multiply','hard_light','difference','subtract','grain_extract','grain_merge','divide','overlay'`.
:rtype: Nothing.
"""
self.layers.append(Layer(effect = effect))
self.activeLayer = len(self.layers)-1 | Creates a new :py:class:`Layer` and set that as the active.
:param effect: A string with the blend mode for that layer that will be used when during the rendering process. The accepted values are: :code:`'soft_light','lighten','screen','dodge','addition','darken','multiply','hard_light','difference','subtract','grain_extract','grain_merge','divide','overlay'`.
:rtype: Nothing. | Below is the the instruction that describes the task:
### Input:
Creates a new :py:class:`Layer` and set that as the active.
:param effect: A string with the blend mode for that layer that will be used when during the rendering process. The accepted values are: :code:`'soft_light','lighten','screen','dodge','addition','darken','multiply','hard_light','difference','subtract','grain_extract','grain_merge','divide','overlay'`.
:rtype: Nothing.
### Response:
def newLayer(self, effect=''):
"""
Creates a new :py:class:`Layer` and set that as the active.
:param effect: A string with the blend mode for that layer that will be used when during the rendering process. The accepted values are: :code:`'soft_light','lighten','screen','dodge','addition','darken','multiply','hard_light','difference','subtract','grain_extract','grain_merge','divide','overlay'`.
:rtype: Nothing.
"""
self.layers.append(Layer(effect = effect))
self.activeLayer = len(self.layers)-1 |
def open(self) -> bool:
"""
Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
connection_params = {'local_hostname': DNS_NAME.get_fqdn()}
if self.timeout is not None:
connection_params['timeout'] = self.timeout
try:
self.connection = smtplib.SMTP(self.host, self.port,
**connection_params)
# TLS
context = ssl.SSLContext(self._protocol())
if self.ssl_certfile:
context.load_cert_chain(certfile=self.ssl_certfile,
keyfile=self.ssl_keyfile)
self.connection.ehlo()
self.connection.starttls(context=context)
self.connection.ehlo()
if self.username and self.password:
self.connection.login(self.username, self.password)
log.debug("Successful SMTP connection/login")
else:
log.debug("Successful SMTP connection (without login)")
return True
except smtplib.SMTPException:
log.debug("SMTP connection and/or login failed")
if not self.fail_silently:
raise | Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False). | Below is the the instruction that describes the task:
### Input:
Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).
### Response:
def open(self) -> bool:
"""
Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
connection_params = {'local_hostname': DNS_NAME.get_fqdn()}
if self.timeout is not None:
connection_params['timeout'] = self.timeout
try:
self.connection = smtplib.SMTP(self.host, self.port,
**connection_params)
# TLS
context = ssl.SSLContext(self._protocol())
if self.ssl_certfile:
context.load_cert_chain(certfile=self.ssl_certfile,
keyfile=self.ssl_keyfile)
self.connection.ehlo()
self.connection.starttls(context=context)
self.connection.ehlo()
if self.username and self.password:
self.connection.login(self.username, self.password)
log.debug("Successful SMTP connection/login")
else:
log.debug("Successful SMTP connection (without login)")
return True
except smtplib.SMTPException:
log.debug("SMTP connection and/or login failed")
if not self.fail_silently:
raise |
def Module(EPIC, campaign=None):
'''
Returns the module number for a given EPIC target.
'''
channel = Channel(EPIC, campaign=campaign)
nums = {2: 1, 3: 5, 4: 9, 6: 13, 7: 17, 8: 21, 9: 25,
10: 29, 11: 33, 12: 37, 13: 41, 14: 45, 15: 49,
16: 53, 17: 57, 18: 61, 19: 65, 20: 69, 22: 73,
23: 77, 24: 81}
for c in [channel, channel - 1, channel - 2, channel - 3]:
if c in nums.values():
for mod, chan in nums.items():
if chan == c:
return mod
return None | Returns the module number for a given EPIC target. | Below is the the instruction that describes the task:
### Input:
Returns the module number for a given EPIC target.
### Response:
def Module(EPIC, campaign=None):
'''
Returns the module number for a given EPIC target.
'''
channel = Channel(EPIC, campaign=campaign)
nums = {2: 1, 3: 5, 4: 9, 6: 13, 7: 17, 8: 21, 9: 25,
10: 29, 11: 33, 12: 37, 13: 41, 14: 45, 15: 49,
16: 53, 17: 57, 18: 61, 19: 65, 20: 69, 22: 73,
23: 77, 24: 81}
for c in [channel, channel - 1, channel - 2, channel - 3]:
if c in nums.values():
for mod, chan in nums.items():
if chan == c:
return mod
return None |
def determine_extended_chord5(chord, shorthand=False, no_inversions=False,
no_polychords=False):
"""Determine the names of an extended chord."""
if len(chord) != 5:
# warning raise exeption: not an extended chord
return False
def inversion_exhauster(chord, shorthand, tries, result, polychords):
"""Recursive helper function."""
def add_result(short):
result.append((short, tries, chord[0]))
triads = determine_triad(chord[:3], True, True)
sevenths = determine_seventh(chord[:4], True, True, True)
# Determine polychords
if tries == 1 and not no_polychords:
polychords += determine_polychords(chord, shorthand)
intval4 = intervals.determine(chord[0], chord[4])
for seventh in sevenths:
seventh = seventh[len(chord[0]):]
if seventh == 'M7':
if intval4 == 'major second':
add_result('M9')
elif seventh == 'm7':
if intval4 == 'major second':
add_result('m9')
elif intval4 == 'perfect fourth':
add_result('m11')
elif seventh == '7':
if intval4 == 'major second':
add_result('9')
elif intval4 == 'minor second':
add_result('7b9')
elif intval4 == 'augmented second':
add_result('7#9')
elif intval4 == 'minor third':
add_result('7b12')
elif intval4 == 'augmented fourth':
add_result('7#11')
elif intval4 == 'major sixth':
add_result('13')
elif seventh == 'M6':
if intval4 == 'major second':
add_result('6/9')
elif intval4 == 'minor seventh':
add_result('6/7')
if tries != 5 and not no_inversions:
return inversion_exhauster([chord[-1]] + chord[:-1], shorthand,
tries + 1, result, polychords)
else:
res = []
for r in result:
if shorthand:
res.append(r[2] + r[0])
else:
res.append(r[2] + chord_shorthand_meaning[r[0]]
+ int_desc(r[1]))
return res + polychords
return inversion_exhauster(chord, shorthand, 1, [], []) | Determine the names of an extended chord. | Below is the the instruction that describes the task:
### Input:
Determine the names of an extended chord.
### Response:
def determine_extended_chord5(chord, shorthand=False, no_inversions=False,
no_polychords=False):
"""Determine the names of an extended chord."""
if len(chord) != 5:
# warning raise exeption: not an extended chord
return False
def inversion_exhauster(chord, shorthand, tries, result, polychords):
"""Recursive helper function."""
def add_result(short):
result.append((short, tries, chord[0]))
triads = determine_triad(chord[:3], True, True)
sevenths = determine_seventh(chord[:4], True, True, True)
# Determine polychords
if tries == 1 and not no_polychords:
polychords += determine_polychords(chord, shorthand)
intval4 = intervals.determine(chord[0], chord[4])
for seventh in sevenths:
seventh = seventh[len(chord[0]):]
if seventh == 'M7':
if intval4 == 'major second':
add_result('M9')
elif seventh == 'm7':
if intval4 == 'major second':
add_result('m9')
elif intval4 == 'perfect fourth':
add_result('m11')
elif seventh == '7':
if intval4 == 'major second':
add_result('9')
elif intval4 == 'minor second':
add_result('7b9')
elif intval4 == 'augmented second':
add_result('7#9')
elif intval4 == 'minor third':
add_result('7b12')
elif intval4 == 'augmented fourth':
add_result('7#11')
elif intval4 == 'major sixth':
add_result('13')
elif seventh == 'M6':
if intval4 == 'major second':
add_result('6/9')
elif intval4 == 'minor seventh':
add_result('6/7')
if tries != 5 and not no_inversions:
return inversion_exhauster([chord[-1]] + chord[:-1], shorthand,
tries + 1, result, polychords)
else:
res = []
for r in result:
if shorthand:
res.append(r[2] + r[0])
else:
res.append(r[2] + chord_shorthand_meaning[r[0]]
+ int_desc(r[1]))
return res + polychords
return inversion_exhauster(chord, shorthand, 1, [], []) |
def _get_args_contents(self):
"""
Mimic the argument formatting behaviour of
ActionBase._execute_module().
"""
return ' '.join(
'%s=%s' % (key, shlex_quote(str(self.args[key])))
for key in self.args
) + ' ' | Mimic the argument formatting behaviour of
ActionBase._execute_module(). | Below is the the instruction that describes the task:
### Input:
Mimic the argument formatting behaviour of
ActionBase._execute_module().
### Response:
def _get_args_contents(self):
"""
Mimic the argument formatting behaviour of
ActionBase._execute_module().
"""
return ' '.join(
'%s=%s' % (key, shlex_quote(str(self.args[key])))
for key in self.args
) + ' ' |
def _missing_(cls, value):
"""Lookup function used when value is not found."""
if not (isinstance(value, int) and 0x0000 <= value <= 0xFFFF):
raise ValueError('%r is not a valid %s' % (value, cls.__name__))
if 0x0001 <= value <= 0x0BB8:
extend_enum(cls, 'Registered by Xerox [0x%s]' % hex(value)[2:].upper().zfill(4), value)
return cls(value)
if 0x0020 <= value <= 0x003F:
extend_enum(cls, 'Experimental [0x%s]' % hex(value)[2:].upper().zfill(4), value)
return cls(value)
if 0x0BB9 <= value <= 0xFFFF:
extend_enum(cls, 'Dynamically Assigned [0x%s]' % hex(value)[2:].upper().zfill(4), value)
return cls(value)
if 0x4000 <= value <= 0x4FFF:
extend_enum(cls, 'Dynamically Assigned Socket Numbers [0x%s]' % hex(value)[2:].upper().zfill(4), value)
return cls(value)
if 0x8000 <= value <= 0xFFFF:
extend_enum(cls, 'Statically Assigned Socket Numbers [0x%s]' % hex(value)[2:].upper().zfill(4), value)
return cls(value)
super()._missing_(value) | Lookup function used when value is not found. | Below is the the instruction that describes the task:
### Input:
Lookup function used when value is not found.
### Response:
def _missing_(cls, value):
"""Lookup function used when value is not found."""
if not (isinstance(value, int) and 0x0000 <= value <= 0xFFFF):
raise ValueError('%r is not a valid %s' % (value, cls.__name__))
if 0x0001 <= value <= 0x0BB8:
extend_enum(cls, 'Registered by Xerox [0x%s]' % hex(value)[2:].upper().zfill(4), value)
return cls(value)
if 0x0020 <= value <= 0x003F:
extend_enum(cls, 'Experimental [0x%s]' % hex(value)[2:].upper().zfill(4), value)
return cls(value)
if 0x0BB9 <= value <= 0xFFFF:
extend_enum(cls, 'Dynamically Assigned [0x%s]' % hex(value)[2:].upper().zfill(4), value)
return cls(value)
if 0x4000 <= value <= 0x4FFF:
extend_enum(cls, 'Dynamically Assigned Socket Numbers [0x%s]' % hex(value)[2:].upper().zfill(4), value)
return cls(value)
if 0x8000 <= value <= 0xFFFF:
extend_enum(cls, 'Statically Assigned Socket Numbers [0x%s]' % hex(value)[2:].upper().zfill(4), value)
return cls(value)
super()._missing_(value) |
def marks(value):
"""list or KeyedList of ``Mark`` : Mark definitions
Marks are the visual objects (such as lines, bars, etc.) that
represent the data in the visualization space. See the :class:`Mark`
class for details.
"""
for i, entry in enumerate(value):
_assert_is_type('marks[{0}]'.format(i), entry, Mark) | list or KeyedList of ``Mark`` : Mark definitions
Marks are the visual objects (such as lines, bars, etc.) that
represent the data in the visualization space. See the :class:`Mark`
class for details. | Below is the the instruction that describes the task:
### Input:
list or KeyedList of ``Mark`` : Mark definitions
Marks are the visual objects (such as lines, bars, etc.) that
represent the data in the visualization space. See the :class:`Mark`
class for details.
### Response:
def marks(value):
"""list or KeyedList of ``Mark`` : Mark definitions
Marks are the visual objects (such as lines, bars, etc.) that
represent the data in the visualization space. See the :class:`Mark`
class for details.
"""
for i, entry in enumerate(value):
_assert_is_type('marks[{0}]'.format(i), entry, Mark) |
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-07-09 - Written - Bovy (NYU)
"""
Rz= R**2.+z**2.
sqrtRz= numpy.sqrt(Rz)
return R*(1./Rz/(self.a+sqrtRz)-numpy.log(1.+sqrtRz/self.a)/sqrtRz/Rz) | NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-07-09 - Written - Bovy (NYU) | Below is the the instruction that describes the task:
### Input:
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-07-09 - Written - Bovy (NYU)
### Response:
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-07-09 - Written - Bovy (NYU)
"""
Rz= R**2.+z**2.
sqrtRz= numpy.sqrt(Rz)
return R*(1./Rz/(self.a+sqrtRz)-numpy.log(1.+sqrtRz/self.a)/sqrtRz/Rz) |
def guest_reset(self, userid):
"""Reset z/VM instance."""
LOG.info("Begin to reset vm %s", userid)
self._smtclient.guest_reset(userid)
LOG.info("Complete reset vm %s", userid) | Reset z/VM instance. | Below is the the instruction that describes the task:
### Input:
Reset z/VM instance.
### Response:
def guest_reset(self, userid):
"""Reset z/VM instance."""
LOG.info("Begin to reset vm %s", userid)
self._smtclient.guest_reset(userid)
LOG.info("Complete reset vm %s", userid) |
def calcEndOfPrdvPP(self):
'''
Calculates end-of-period marginal marginal value using a pre-defined
array of next period market resources in self.mNrmNext.
Parameters
----------
none
Returns
-------
EndOfPrdvPP : np.array
End-of-period marginal marginal value of assets at each value in
the grid of assets.
'''
EndOfPrdvPP = self.DiscFacEff*self.Rfree*self.Rfree*self.PermGroFac**(-self.CRRA-1.0)*\
np.sum(self.PermShkVals_temp**(-self.CRRA-1.0)*self.vPPfuncNext(self.mNrmNext)
*self.ShkPrbs_temp,axis=0)
return EndOfPrdvPP | Calculates end-of-period marginal marginal value using a pre-defined
array of next period market resources in self.mNrmNext.
Parameters
----------
none
Returns
-------
EndOfPrdvPP : np.array
End-of-period marginal marginal value of assets at each value in
the grid of assets. | Below is the the instruction that describes the task:
### Input:
Calculates end-of-period marginal marginal value using a pre-defined
array of next period market resources in self.mNrmNext.
Parameters
----------
none
Returns
-------
EndOfPrdvPP : np.array
End-of-period marginal marginal value of assets at each value in
the grid of assets.
### Response:
def calcEndOfPrdvPP(self):
'''
Calculates end-of-period marginal marginal value using a pre-defined
array of next period market resources in self.mNrmNext.
Parameters
----------
none
Returns
-------
EndOfPrdvPP : np.array
End-of-period marginal marginal value of assets at each value in
the grid of assets.
'''
EndOfPrdvPP = self.DiscFacEff*self.Rfree*self.Rfree*self.PermGroFac**(-self.CRRA-1.0)*\
np.sum(self.PermShkVals_temp**(-self.CRRA-1.0)*self.vPPfuncNext(self.mNrmNext)
*self.ShkPrbs_temp,axis=0)
return EndOfPrdvPP |
def getElements(self, zero_based=True, pared=False):
"""
Get the elements of the mesh as a list of point index list.
:param zero_based: use zero based index of points if true otherwise use 1-based index of points.
:param pared: use the pared down list of points
:return: A list of point index lists
"""
points = self._points[:]
elements = self._elements[:]
offset = 0
if not zero_based:
offset = 1
np = None
if pared:
np = NodePare()
np.addPoints(points)
np.parePoints()
if pared or not zero_based:
modified_elements = []
for element in elements:
modified_element = [index + offset if np is None else np.getParedIndex(index) + offset
for index in element]
modified_elements.append(modified_element)
elements = modified_elements
return elements | Get the elements of the mesh as a list of point index list.
:param zero_based: use zero based index of points if true otherwise use 1-based index of points.
:param pared: use the pared down list of points
:return: A list of point index lists | Below is the the instruction that describes the task:
### Input:
Get the elements of the mesh as a list of point index list.
:param zero_based: use zero based index of points if true otherwise use 1-based index of points.
:param pared: use the pared down list of points
:return: A list of point index lists
### Response:
def getElements(self, zero_based=True, pared=False):
"""
Get the elements of the mesh as a list of point index list.
:param zero_based: use zero based index of points if true otherwise use 1-based index of points.
:param pared: use the pared down list of points
:return: A list of point index lists
"""
points = self._points[:]
elements = self._elements[:]
offset = 0
if not zero_based:
offset = 1
np = None
if pared:
np = NodePare()
np.addPoints(points)
np.parePoints()
if pared or not zero_based:
modified_elements = []
for element in elements:
modified_element = [index + offset if np is None else np.getParedIndex(index) + offset
for index in element]
modified_elements.append(modified_element)
elements = modified_elements
return elements |
def render(self, template, **kwargs):
"""Renders the template
:param template: The template to render.
The template is actually a file, which is usually generated
by :class:`rtcclient.template.Templater.getTemplate`
and can also be modified by user accordingly.
:param kwargs: The `kwargs` dict is used to fill the template.
These two parameter are mandatory:
* description
* title
Some of below parameters (which may not be included in some
customized workitem type ) are mandatory if `keep` (parameter in
:class:`rtcclient.template.Templater.getTemplate`) is set to
`False`; Optional for otherwise.
* teamArea (Team Area)
* ownedBy (Owned By)
* plannedFor(Planned For)
* severity(Severity)
* priority(Priority)
* filedAgainst(Filed Against)
Actually all these needed keywords/attributes/fields can be
retrieved by :class:`rtcclient.template.Templater.listFields`
:return: the :class:`string` object
:rtype: string
"""
try:
temp = self.environment.get_template(template)
return temp.render(**kwargs)
except AttributeError:
err_msg = "Invalid value for 'template'"
self.log.error(err_msg)
raise exception.BadValue(err_msg) | Renders the template
:param template: The template to render.
The template is actually a file, which is usually generated
by :class:`rtcclient.template.Templater.getTemplate`
and can also be modified by user accordingly.
:param kwargs: The `kwargs` dict is used to fill the template.
These two parameter are mandatory:
* description
* title
Some of below parameters (which may not be included in some
customized workitem type ) are mandatory if `keep` (parameter in
:class:`rtcclient.template.Templater.getTemplate`) is set to
`False`; Optional for otherwise.
* teamArea (Team Area)
* ownedBy (Owned By)
* plannedFor(Planned For)
* severity(Severity)
* priority(Priority)
* filedAgainst(Filed Against)
Actually all these needed keywords/attributes/fields can be
retrieved by :class:`rtcclient.template.Templater.listFields`
:return: the :class:`string` object
:rtype: string | Below is the the instruction that describes the task:
### Input:
Renders the template
:param template: The template to render.
The template is actually a file, which is usually generated
by :class:`rtcclient.template.Templater.getTemplate`
and can also be modified by user accordingly.
:param kwargs: The `kwargs` dict is used to fill the template.
These two parameter are mandatory:
* description
* title
Some of below parameters (which may not be included in some
customized workitem type ) are mandatory if `keep` (parameter in
:class:`rtcclient.template.Templater.getTemplate`) is set to
`False`; Optional for otherwise.
* teamArea (Team Area)
* ownedBy (Owned By)
* plannedFor(Planned For)
* severity(Severity)
* priority(Priority)
* filedAgainst(Filed Against)
Actually all these needed keywords/attributes/fields can be
retrieved by :class:`rtcclient.template.Templater.listFields`
:return: the :class:`string` object
:rtype: string
### Response:
def render(self, template, **kwargs):
"""Renders the template
:param template: The template to render.
The template is actually a file, which is usually generated
by :class:`rtcclient.template.Templater.getTemplate`
and can also be modified by user accordingly.
:param kwargs: The `kwargs` dict is used to fill the template.
These two parameter are mandatory:
* description
* title
Some of below parameters (which may not be included in some
customized workitem type ) are mandatory if `keep` (parameter in
:class:`rtcclient.template.Templater.getTemplate`) is set to
`False`; Optional for otherwise.
* teamArea (Team Area)
* ownedBy (Owned By)
* plannedFor(Planned For)
* severity(Severity)
* priority(Priority)
* filedAgainst(Filed Against)
Actually all these needed keywords/attributes/fields can be
retrieved by :class:`rtcclient.template.Templater.listFields`
:return: the :class:`string` object
:rtype: string
"""
try:
temp = self.environment.get_template(template)
return temp.render(**kwargs)
except AttributeError:
err_msg = "Invalid value for 'template'"
self.log.error(err_msg)
raise exception.BadValue(err_msg) |
def register(cls, name: str, plugin: Type[ConnectionPlugin]) -> None:
"""Registers a connection plugin with a specified name
Args:
name: name of the connection plugin to register
plugin: defined connection plugin class
Raises:
:obj:`nornir.core.exceptions.ConnectionPluginAlreadyRegistered` if
another plugin with the specified name was already registered
"""
existing_plugin = cls.available.get(name)
if existing_plugin is None:
cls.available[name] = plugin
elif existing_plugin != plugin:
raise ConnectionPluginAlreadyRegistered(
f"Connection plugin {plugin.__name__} can't be registered as "
f"{name!r} because plugin {existing_plugin.__name__} "
f"was already registered under this name"
) | Registers a connection plugin with a specified name
Args:
name: name of the connection plugin to register
plugin: defined connection plugin class
Raises:
:obj:`nornir.core.exceptions.ConnectionPluginAlreadyRegistered` if
another plugin with the specified name was already registered | Below is the the instruction that describes the task:
### Input:
Registers a connection plugin with a specified name
Args:
name: name of the connection plugin to register
plugin: defined connection plugin class
Raises:
:obj:`nornir.core.exceptions.ConnectionPluginAlreadyRegistered` if
another plugin with the specified name was already registered
### Response:
def register(cls, name: str, plugin: Type[ConnectionPlugin]) -> None:
"""Registers a connection plugin with a specified name
Args:
name: name of the connection plugin to register
plugin: defined connection plugin class
Raises:
:obj:`nornir.core.exceptions.ConnectionPluginAlreadyRegistered` if
another plugin with the specified name was already registered
"""
existing_plugin = cls.available.get(name)
if existing_plugin is None:
cls.available[name] = plugin
elif existing_plugin != plugin:
raise ConnectionPluginAlreadyRegistered(
f"Connection plugin {plugin.__name__} can't be registered as "
f"{name!r} because plugin {existing_plugin.__name__} "
f"was already registered under this name"
) |
def connections(self, wait):
"""
wait for connections to both rabbitmq and elasticsearch to be made
before binding a routing key to a channel and sending messages to
elasticsearch
"""
while wait:
try:
params = pika.ConnectionParameters(host=self.rmq_host,
port=self.rmq_port)
connection = pika.BlockingConnection(params)
self.channel = connection.channel()
self.channel.exchange_declare(exchange='topic_recs',
exchange_type='topic')
result = self.channel.queue_declare()
self.queue_name = result.method.queue
self.es_conn = Elasticsearch([{'host': self.es_host,
'port': self.es_port}])
wait = False
print('connected to rabbitmq and elasticsearch...')
except Exception as e: # pragma: no cover
print(str(e))
print('waiting for connection to rabbitmq...' + str(e))
time.sleep(2)
wait = True | wait for connections to both rabbitmq and elasticsearch to be made
before binding a routing key to a channel and sending messages to
elasticsearch | Below is the the instruction that describes the task:
### Input:
wait for connections to both rabbitmq and elasticsearch to be made
before binding a routing key to a channel and sending messages to
elasticsearch
### Response:
def connections(self, wait):
"""
wait for connections to both rabbitmq and elasticsearch to be made
before binding a routing key to a channel and sending messages to
elasticsearch
"""
while wait:
try:
params = pika.ConnectionParameters(host=self.rmq_host,
port=self.rmq_port)
connection = pika.BlockingConnection(params)
self.channel = connection.channel()
self.channel.exchange_declare(exchange='topic_recs',
exchange_type='topic')
result = self.channel.queue_declare()
self.queue_name = result.method.queue
self.es_conn = Elasticsearch([{'host': self.es_host,
'port': self.es_port}])
wait = False
print('connected to rabbitmq and elasticsearch...')
except Exception as e: # pragma: no cover
print(str(e))
print('waiting for connection to rabbitmq...' + str(e))
time.sleep(2)
wait = True |
def sph_coords_to_pose(theta, psi):
""" Convert spherical coordinates to a pose.
Parameters
----------
theta : float
azimuth angle
psi : float
elevation angle
Returns
-------
:obj:`RigidTransformation`
rigid transformation corresponding to rotation with no translation
"""
# rotate about the z and y axes individually
rot_z = RigidTransform.z_axis_rotation(theta)
rot_y = RigidTransform.y_axis_rotation(psi)
R = rot_y.dot(rot_z)
return RigidTransform(rotation=R) | Convert spherical coordinates to a pose.
Parameters
----------
theta : float
azimuth angle
psi : float
elevation angle
Returns
-------
:obj:`RigidTransformation`
rigid transformation corresponding to rotation with no translation | Below is the the instruction that describes the task:
### Input:
Convert spherical coordinates to a pose.
Parameters
----------
theta : float
azimuth angle
psi : float
elevation angle
Returns
-------
:obj:`RigidTransformation`
rigid transformation corresponding to rotation with no translation
### Response:
def sph_coords_to_pose(theta, psi):
""" Convert spherical coordinates to a pose.
Parameters
----------
theta : float
azimuth angle
psi : float
elevation angle
Returns
-------
:obj:`RigidTransformation`
rigid transformation corresponding to rotation with no translation
"""
# rotate about the z and y axes individually
rot_z = RigidTransform.z_axis_rotation(theta)
rot_y = RigidTransform.y_axis_rotation(psi)
R = rot_y.dot(rot_z)
return RigidTransform(rotation=R) |
def corners(bounds):
"""
Given a pair of axis aligned bounds, return all
8 corners of the bounding box.
Parameters
----------
bounds : (2,3) or (2,2) float
Axis aligned bounds
Returns
----------
corners : (8,3) float
Corner vertices of the cube
"""
bounds = np.asanyarray(bounds, dtype=np.float64)
if util.is_shape(bounds, (2, 2)):
bounds = np.column_stack((bounds, [0, 0]))
elif not util.is_shape(bounds, (2, 3)):
raise ValueError('bounds must be (2,2) or (2,3)!')
minx, miny, minz, maxx, maxy, maxz = np.arange(6)
corner_index = np.array([minx, miny, minz,
maxx, miny, minz,
maxx, maxy, minz,
minx, maxy, minz,
minx, miny, maxz,
maxx, miny, maxz,
maxx, maxy, maxz,
minx, maxy, maxz]).reshape((-1, 3))
corners = bounds.reshape(-1)[corner_index]
return corners | Given a pair of axis aligned bounds, return all
8 corners of the bounding box.
Parameters
----------
bounds : (2,3) or (2,2) float
Axis aligned bounds
Returns
----------
corners : (8,3) float
Corner vertices of the cube | Below is the the instruction that describes the task:
### Input:
Given a pair of axis aligned bounds, return all
8 corners of the bounding box.
Parameters
----------
bounds : (2,3) or (2,2) float
Axis aligned bounds
Returns
----------
corners : (8,3) float
Corner vertices of the cube
### Response:
def corners(bounds):
"""
Given a pair of axis aligned bounds, return all
8 corners of the bounding box.
Parameters
----------
bounds : (2,3) or (2,2) float
Axis aligned bounds
Returns
----------
corners : (8,3) float
Corner vertices of the cube
"""
bounds = np.asanyarray(bounds, dtype=np.float64)
if util.is_shape(bounds, (2, 2)):
bounds = np.column_stack((bounds, [0, 0]))
elif not util.is_shape(bounds, (2, 3)):
raise ValueError('bounds must be (2,2) or (2,3)!')
minx, miny, minz, maxx, maxy, maxz = np.arange(6)
corner_index = np.array([minx, miny, minz,
maxx, miny, minz,
maxx, maxy, minz,
minx, maxy, minz,
minx, miny, maxz,
maxx, miny, maxz,
maxx, maxy, maxz,
minx, maxy, maxz]).reshape((-1, 3))
corners = bounds.reshape(-1)[corner_index]
return corners |
def _inject_into_mod(mod, name, value, force_lock=False):
'''
Inject a variable into a module. This is used to inject "globals" like
``__salt__``, ``__pillar``, or ``grains``.
Instead of injecting the value directly, a ``ThreadLocalProxy`` is created.
If such a proxy is already present under the specified name, it is updated
with the new value. This update only affects the current thread, so that
the same name can refer to different values depending on the thread of
execution.
This is important for data that is not truly global. For example, pillar
data might be dynamically overriden through function parameters and thus
the actual values available in pillar might depend on the thread that is
calling a module.
mod:
module object into which the value is going to be injected.
name:
name of the variable that is injected into the module.
value:
value that is injected into the variable. The value is not injected
directly, but instead set as the new reference of the proxy that has
been created for the variable.
force_lock:
whether the lock should be acquired before checking whether a proxy
object for the specified name has already been injected into the
module. If ``False`` (the default), this function checks for the
module's variable without acquiring the lock and only acquires the lock
if a new proxy has to be created and injected.
'''
old_value = getattr(mod, name, None)
# We use a double-checked locking scheme in order to avoid taking the lock
# when a proxy object has already been injected.
# In most programming languages, double-checked locking is considered
# unsafe when used without explicit memory barriers because one might read
# an uninitialized value. In CPython it is safe due to the global
# interpreter lock (GIL). In Python implementations that do not have the
# GIL, it could be unsafe, but at least Jython also guarantees that (for
# Python objects) memory is not corrupted when writing and reading without
# explicit synchronization
# (http://www.jython.org/jythonbook/en/1.0/Concurrency.html).
# Please note that in order to make this code safe in a runtime environment
# that does not make this guarantees, it is not sufficient. The
# ThreadLocalProxy must also be created with fallback_to_shared set to
# False or a lock must be added to the ThreadLocalProxy.
if force_lock:
with _inject_into_mod.lock:
if isinstance(old_value, ThreadLocalProxy):
ThreadLocalProxy.set_reference(old_value, value)
else:
setattr(mod, name, ThreadLocalProxy(value, True))
else:
if isinstance(old_value, ThreadLocalProxy):
ThreadLocalProxy.set_reference(old_value, value)
else:
_inject_into_mod(mod, name, value, True) | Inject a variable into a module. This is used to inject "globals" like
``__salt__``, ``__pillar``, or ``grains``.
Instead of injecting the value directly, a ``ThreadLocalProxy`` is created.
If such a proxy is already present under the specified name, it is updated
with the new value. This update only affects the current thread, so that
the same name can refer to different values depending on the thread of
execution.
This is important for data that is not truly global. For example, pillar
data might be dynamically overriden through function parameters and thus
the actual values available in pillar might depend on the thread that is
calling a module.
mod:
module object into which the value is going to be injected.
name:
name of the variable that is injected into the module.
value:
value that is injected into the variable. The value is not injected
directly, but instead set as the new reference of the proxy that has
been created for the variable.
force_lock:
whether the lock should be acquired before checking whether a proxy
object for the specified name has already been injected into the
module. If ``False`` (the default), this function checks for the
module's variable without acquiring the lock and only acquires the lock
if a new proxy has to be created and injected. | Below is the the instruction that describes the task:
### Input:
Inject a variable into a module. This is used to inject "globals" like
``__salt__``, ``__pillar``, or ``grains``.
Instead of injecting the value directly, a ``ThreadLocalProxy`` is created.
If such a proxy is already present under the specified name, it is updated
with the new value. This update only affects the current thread, so that
the same name can refer to different values depending on the thread of
execution.
This is important for data that is not truly global. For example, pillar
data might be dynamically overriden through function parameters and thus
the actual values available in pillar might depend on the thread that is
calling a module.
mod:
module object into which the value is going to be injected.
name:
name of the variable that is injected into the module.
value:
value that is injected into the variable. The value is not injected
directly, but instead set as the new reference of the proxy that has
been created for the variable.
force_lock:
whether the lock should be acquired before checking whether a proxy
object for the specified name has already been injected into the
module. If ``False`` (the default), this function checks for the
module's variable without acquiring the lock and only acquires the lock
if a new proxy has to be created and injected.
### Response:
def _inject_into_mod(mod, name, value, force_lock=False):
'''
Inject a variable into a module. This is used to inject "globals" like
``__salt__``, ``__pillar``, or ``grains``.
Instead of injecting the value directly, a ``ThreadLocalProxy`` is created.
If such a proxy is already present under the specified name, it is updated
with the new value. This update only affects the current thread, so that
the same name can refer to different values depending on the thread of
execution.
This is important for data that is not truly global. For example, pillar
data might be dynamically overriden through function parameters and thus
the actual values available in pillar might depend on the thread that is
calling a module.
mod:
module object into which the value is going to be injected.
name:
name of the variable that is injected into the module.
value:
value that is injected into the variable. The value is not injected
directly, but instead set as the new reference of the proxy that has
been created for the variable.
force_lock:
whether the lock should be acquired before checking whether a proxy
object for the specified name has already been injected into the
module. If ``False`` (the default), this function checks for the
module's variable without acquiring the lock and only acquires the lock
if a new proxy has to be created and injected.
'''
old_value = getattr(mod, name, None)
# We use a double-checked locking scheme in order to avoid taking the lock
# when a proxy object has already been injected.
# In most programming languages, double-checked locking is considered
# unsafe when used without explicit memory barriers because one might read
# an uninitialized value. In CPython it is safe due to the global
# interpreter lock (GIL). In Python implementations that do not have the
# GIL, it could be unsafe, but at least Jython also guarantees that (for
# Python objects) memory is not corrupted when writing and reading without
# explicit synchronization
# (http://www.jython.org/jythonbook/en/1.0/Concurrency.html).
# Please note that in order to make this code safe in a runtime environment
# that does not make this guarantees, it is not sufficient. The
# ThreadLocalProxy must also be created with fallback_to_shared set to
# False or a lock must be added to the ThreadLocalProxy.
if force_lock:
with _inject_into_mod.lock:
if isinstance(old_value, ThreadLocalProxy):
ThreadLocalProxy.set_reference(old_value, value)
else:
setattr(mod, name, ThreadLocalProxy(value, True))
else:
if isinstance(old_value, ThreadLocalProxy):
ThreadLocalProxy.set_reference(old_value, value)
else:
_inject_into_mod(mod, name, value, True) |
def _attribute_is_magic(node, attrs, parents):
"""Checks that node is an attribute used inside one of allowed parents"""
if node.attrname not in attrs:
return False
if not node.last_child():
return False
try:
for cls in node.last_child().inferred():
if isinstance(cls, Super):
cls = cls._self_class # pylint: disable=protected-access
if node_is_subclass(cls, *parents) or cls.qname() in parents:
return True
except InferenceError:
pass
return False | Checks that node is an attribute used inside one of allowed parents | Below is the the instruction that describes the task:
### Input:
Checks that node is an attribute used inside one of allowed parents
### Response:
def _attribute_is_magic(node, attrs, parents):
"""Checks that node is an attribute used inside one of allowed parents"""
if node.attrname not in attrs:
return False
if not node.last_child():
return False
try:
for cls in node.last_child().inferred():
if isinstance(cls, Super):
cls = cls._self_class # pylint: disable=protected-access
if node_is_subclass(cls, *parents) or cls.qname() in parents:
return True
except InferenceError:
pass
return False |
def qteIsQtmacsWidget(widgetObj):
"""
Determine if a widget is part of Qtmacs widget hierarchy.
A widget belongs to the Qtmacs hierarchy if it, or one of its
parents, has a "_qteAdmin" attribute (added via ``qteAddWidget``).
Since every applet has this attribute is guaranteed that the
function returns **True** if the widget is embedded inside
somewhere.
|Args|
* ``widgetObj`` (**QWidget**): the widget to test.
|Returns|
* **bool**: **True** if the widget, or one of its ancestors
in the Qt hierarchy have a '_qteAdmin' attribute.
|Raises|
* **None**
"""
if widgetObj is None:
return False
if hasattr(widgetObj, '_qteAdmin'):
return True
# Keep track of the already visited objects to avoid infinite loops.
visited = [widgetObj]
# Traverse the hierarchy until a parent features the '_qteAdmin'
# attribute, the parent is None, or the parent is an already
# visited widget.
wid = widgetObj.parent()
while wid not in visited:
if hasattr(wid, '_qteAdmin'):
return True
elif wid is None:
return False
else:
visited.append(wid)
wid = wid.parent()
return False | Determine if a widget is part of Qtmacs widget hierarchy.
A widget belongs to the Qtmacs hierarchy if it, or one of its
parents, has a "_qteAdmin" attribute (added via ``qteAddWidget``).
Since every applet has this attribute is guaranteed that the
function returns **True** if the widget is embedded inside
somewhere.
|Args|
* ``widgetObj`` (**QWidget**): the widget to test.
|Returns|
* **bool**: **True** if the widget, or one of its ancestors
in the Qt hierarchy have a '_qteAdmin' attribute.
|Raises|
* **None** | Below is the the instruction that describes the task:
### Input:
Determine if a widget is part of Qtmacs widget hierarchy.
A widget belongs to the Qtmacs hierarchy if it, or one of its
parents, has a "_qteAdmin" attribute (added via ``qteAddWidget``).
Since every applet has this attribute is guaranteed that the
function returns **True** if the widget is embedded inside
somewhere.
|Args|
* ``widgetObj`` (**QWidget**): the widget to test.
|Returns|
* **bool**: **True** if the widget, or one of its ancestors
in the Qt hierarchy have a '_qteAdmin' attribute.
|Raises|
* **None**
### Response:
def qteIsQtmacsWidget(widgetObj):
"""
Determine if a widget is part of Qtmacs widget hierarchy.
A widget belongs to the Qtmacs hierarchy if it, or one of its
parents, has a "_qteAdmin" attribute (added via ``qteAddWidget``).
Since every applet has this attribute is guaranteed that the
function returns **True** if the widget is embedded inside
somewhere.
|Args|
* ``widgetObj`` (**QWidget**): the widget to test.
|Returns|
* **bool**: **True** if the widget, or one of its ancestors
in the Qt hierarchy have a '_qteAdmin' attribute.
|Raises|
* **None**
"""
if widgetObj is None:
return False
if hasattr(widgetObj, '_qteAdmin'):
return True
# Keep track of the already visited objects to avoid infinite loops.
visited = [widgetObj]
# Traverse the hierarchy until a parent features the '_qteAdmin'
# attribute, the parent is None, or the parent is an already
# visited widget.
wid = widgetObj.parent()
while wid not in visited:
if hasattr(wid, '_qteAdmin'):
return True
elif wid is None:
return False
else:
visited.append(wid)
wid = wid.parent()
return False |
def retrieve(self, namespace, stream, start_time, end_time, start_id,
configuration, order=ResultOrder.ASCENDING, limit=sys.maxint):
"""
Retrieves all the events for `stream` from `start_time` (inclusive) till
`end_time` (inclusive). Alternatively to `start_time`, `start_id` can be
provided, and then all events from `start_id` (exclusive) till `end_time`
(inclusive) are returned. `start_id` should be used in cases when the client
got disconnected from the server before all the events in the requested
time window had been returned. `order` can be one of ResultOrder.ASCENDING
or ResultOrder.DESCENDING.
Returns an iterator over all JSON serialized (strings) events.
"""
if not start_id:
start_id = uuid_from_kronos_time(start_time, _type=UUIDType.LOWEST)
else:
start_id = TimeUUID(start_id)
if uuid_to_kronos_time(start_id) > end_time:
return []
return self._retrieve(namespace, stream, start_id, end_time, order, limit,
configuration) | Retrieves all the events for `stream` from `start_time` (inclusive) till
`end_time` (inclusive). Alternatively to `start_time`, `start_id` can be
provided, and then all events from `start_id` (exclusive) till `end_time`
(inclusive) are returned. `start_id` should be used in cases when the client
got disconnected from the server before all the events in the requested
time window had been returned. `order` can be one of ResultOrder.ASCENDING
or ResultOrder.DESCENDING.
Returns an iterator over all JSON serialized (strings) events. | Below is the the instruction that describes the task:
### Input:
Retrieves all the events for `stream` from `start_time` (inclusive) till
`end_time` (inclusive). Alternatively to `start_time`, `start_id` can be
provided, and then all events from `start_id` (exclusive) till `end_time`
(inclusive) are returned. `start_id` should be used in cases when the client
got disconnected from the server before all the events in the requested
time window had been returned. `order` can be one of ResultOrder.ASCENDING
or ResultOrder.DESCENDING.
Returns an iterator over all JSON serialized (strings) events.
### Response:
def retrieve(self, namespace, stream, start_time, end_time, start_id,
configuration, order=ResultOrder.ASCENDING, limit=sys.maxint):
"""
Retrieves all the events for `stream` from `start_time` (inclusive) till
`end_time` (inclusive). Alternatively to `start_time`, `start_id` can be
provided, and then all events from `start_id` (exclusive) till `end_time`
(inclusive) are returned. `start_id` should be used in cases when the client
got disconnected from the server before all the events in the requested
time window had been returned. `order` can be one of ResultOrder.ASCENDING
or ResultOrder.DESCENDING.
Returns an iterator over all JSON serialized (strings) events.
"""
if not start_id:
start_id = uuid_from_kronos_time(start_time, _type=UUIDType.LOWEST)
else:
start_id = TimeUUID(start_id)
if uuid_to_kronos_time(start_id) > end_time:
return []
return self._retrieve(namespace, stream, start_id, end_time, order, limit,
configuration) |
def conditional_jit(function=None, **kwargs): # noqa: D202
"""Use numba's jit decorator if numba is installed.
Notes
-----
If called without arguments then return wrapped function.
@conditional_jit
def my_func():
return
else called with arguments
@conditional_jit(nopython=True)
def my_func():
return
"""
def wrapper(function):
try:
numba = importlib.import_module("numba")
return numba.jit(**kwargs)(function)
except ImportError:
return function
if function:
return wrapper(function)
else:
return wrapper | Use numba's jit decorator if numba is installed.
Notes
-----
If called without arguments then return wrapped function.
@conditional_jit
def my_func():
return
else called with arguments
@conditional_jit(nopython=True)
def my_func():
return | Below is the the instruction that describes the task:
### Input:
Use numba's jit decorator if numba is installed.
Notes
-----
If called without arguments then return wrapped function.
@conditional_jit
def my_func():
return
else called with arguments
@conditional_jit(nopython=True)
def my_func():
return
### Response:
def conditional_jit(function=None, **kwargs): # noqa: D202
"""Use numba's jit decorator if numba is installed.
Notes
-----
If called without arguments then return wrapped function.
@conditional_jit
def my_func():
return
else called with arguments
@conditional_jit(nopython=True)
def my_func():
return
"""
def wrapper(function):
try:
numba = importlib.import_module("numba")
return numba.jit(**kwargs)(function)
except ImportError:
return function
if function:
return wrapper(function)
else:
return wrapper |
def check_and_mutate_row(
self,
table_name,
row_key,
app_profile_id=None,
predicate_filter=None,
true_mutations=None,
false_mutations=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Mutates a row atomically based on the output of a predicate Reader filter.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> # TODO: Initialize `row_key`:
>>> row_key = b''
>>>
>>> response = client.check_and_mutate_row(table_name, row_key)
Args:
table_name (str): The unique name of the table to which the conditional mutation should be
applied. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
row_key (bytes): The key of the row to which the conditional mutation should be applied.
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending
on whether or not any results are yielded, either ``true_mutations`` or
``false_mutations`` will be executed. If unset, checks that the row
contains any values at all.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.RowFilter`
true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if
``predicate_filter`` yields at least one cell when applied to
``row_key``. Entries are applied in order, meaning that earlier
mutations can be masked by later ones. Must contain at least one entry
if ``false_mutations`` is empty, and at most 100000.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Mutation`
false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if
``predicate_filter`` does not yield any cells when applied to
``row_key``. Entries are applied in order, meaning that earlier
mutations can be masked by later ones. Must contain at least one entry
if ``true_mutations`` is empty, and at most 100000.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Mutation`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "check_and_mutate_row" not in self._inner_api_calls:
self._inner_api_calls[
"check_and_mutate_row"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.check_and_mutate_row,
default_retry=self._method_configs["CheckAndMutateRow"].retry,
default_timeout=self._method_configs["CheckAndMutateRow"].timeout,
client_info=self._client_info,
)
request = bigtable_pb2.CheckAndMutateRowRequest(
table_name=table_name,
row_key=row_key,
app_profile_id=app_profile_id,
predicate_filter=predicate_filter,
true_mutations=true_mutations,
false_mutations=false_mutations,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("table_name", table_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["check_and_mutate_row"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Mutates a row atomically based on the output of a predicate Reader filter.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> # TODO: Initialize `row_key`:
>>> row_key = b''
>>>
>>> response = client.check_and_mutate_row(table_name, row_key)
Args:
table_name (str): The unique name of the table to which the conditional mutation should be
applied. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
row_key (bytes): The key of the row to which the conditional mutation should be applied.
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending
on whether or not any results are yielded, either ``true_mutations`` or
``false_mutations`` will be executed. If unset, checks that the row
contains any values at all.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.RowFilter`
true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if
``predicate_filter`` yields at least one cell when applied to
``row_key``. Entries are applied in order, meaning that earlier
mutations can be masked by later ones. Must contain at least one entry
if ``false_mutations`` is empty, and at most 100000.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Mutation`
false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if
``predicate_filter`` does not yield any cells when applied to
``row_key``. Entries are applied in order, meaning that earlier
mutations can be masked by later ones. Must contain at least one entry
if ``true_mutations`` is empty, and at most 100000.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Mutation`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | Below is the the instruction that describes the task:
### Input:
Mutates a row atomically based on the output of a predicate Reader filter.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> # TODO: Initialize `row_key`:
>>> row_key = b''
>>>
>>> response = client.check_and_mutate_row(table_name, row_key)
Args:
table_name (str): The unique name of the table to which the conditional mutation should be
applied. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
row_key (bytes): The key of the row to which the conditional mutation should be applied.
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending
on whether or not any results are yielded, either ``true_mutations`` or
``false_mutations`` will be executed. If unset, checks that the row
contains any values at all.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.RowFilter`
true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if
``predicate_filter`` yields at least one cell when applied to
``row_key``. Entries are applied in order, meaning that earlier
mutations can be masked by later ones. Must contain at least one entry
if ``false_mutations`` is empty, and at most 100000.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Mutation`
false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if
``predicate_filter`` does not yield any cells when applied to
``row_key``. Entries are applied in order, meaning that earlier
mutations can be masked by later ones. Must contain at least one entry
if ``true_mutations`` is empty, and at most 100000.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Mutation`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
### Response:
def check_and_mutate_row(
self,
table_name,
row_key,
app_profile_id=None,
predicate_filter=None,
true_mutations=None,
false_mutations=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Mutates a row atomically based on the output of a predicate Reader filter.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> # TODO: Initialize `row_key`:
>>> row_key = b''
>>>
>>> response = client.check_and_mutate_row(table_name, row_key)
Args:
table_name (str): The unique name of the table to which the conditional mutation should be
applied. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
row_key (bytes): The key of the row to which the conditional mutation should be applied.
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending
on whether or not any results are yielded, either ``true_mutations`` or
``false_mutations`` will be executed. If unset, checks that the row
contains any values at all.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.RowFilter`
true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if
``predicate_filter`` yields at least one cell when applied to
``row_key``. Entries are applied in order, meaning that earlier
mutations can be masked by later ones. Must contain at least one entry
if ``false_mutations`` is empty, and at most 100000.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Mutation`
false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if
``predicate_filter`` does not yield any cells when applied to
``row_key``. Entries are applied in order, meaning that earlier
mutations can be masked by later ones. Must contain at least one entry
if ``true_mutations`` is empty, and at most 100000.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Mutation`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "check_and_mutate_row" not in self._inner_api_calls:
self._inner_api_calls[
"check_and_mutate_row"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.check_and_mutate_row,
default_retry=self._method_configs["CheckAndMutateRow"].retry,
default_timeout=self._method_configs["CheckAndMutateRow"].timeout,
client_info=self._client_info,
)
request = bigtable_pb2.CheckAndMutateRowRequest(
table_name=table_name,
row_key=row_key,
app_profile_id=app_profile_id,
predicate_filter=predicate_filter,
true_mutations=true_mutations,
false_mutations=false_mutations,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("table_name", table_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["check_and_mutate_row"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
def tree_build(self):
"""Build a tree from the taxonomy data present in this `ClassificationsDataFrame` or
`SampleCollection`.
Returns
-------
`skbio.tree.TreeNode`, the root node of a tree that contains all the taxa in the current
analysis and their parents leading back to the root node.
"""
from skbio.tree import TreeNode
# build all the nodes
nodes = {}
for tax_id in self.taxonomy.index:
node = TreeNode(name=tax_id, length=1)
node.tax_name = self.taxonomy["name"][tax_id]
node.rank = self.taxonomy["rank"][tax_id]
node.parent_tax_id = self.taxonomy["parent_tax_id"][tax_id]
nodes[tax_id] = node
# generate all the links
for tax_id in self.taxonomy.index:
try:
parent = nodes[nodes[tax_id].parent_tax_id]
except KeyError:
if tax_id != "1":
warnings.warn(
"tax_id={} has parent_tax_id={} which is not in tree"
"".format(tax_id, nodes[tax_id].parent_tax_id)
)
continue
parent.append(nodes[tax_id])
return nodes["1"] | Build a tree from the taxonomy data present in this `ClassificationsDataFrame` or
`SampleCollection`.
Returns
-------
`skbio.tree.TreeNode`, the root node of a tree that contains all the taxa in the current
analysis and their parents leading back to the root node. | Below is the the instruction that describes the task:
### Input:
Build a tree from the taxonomy data present in this `ClassificationsDataFrame` or
`SampleCollection`.
Returns
-------
`skbio.tree.TreeNode`, the root node of a tree that contains all the taxa in the current
analysis and their parents leading back to the root node.
### Response:
def tree_build(self):
"""Build a tree from the taxonomy data present in this `ClassificationsDataFrame` or
`SampleCollection`.
Returns
-------
`skbio.tree.TreeNode`, the root node of a tree that contains all the taxa in the current
analysis and their parents leading back to the root node.
"""
from skbio.tree import TreeNode
# build all the nodes
nodes = {}
for tax_id in self.taxonomy.index:
node = TreeNode(name=tax_id, length=1)
node.tax_name = self.taxonomy["name"][tax_id]
node.rank = self.taxonomy["rank"][tax_id]
node.parent_tax_id = self.taxonomy["parent_tax_id"][tax_id]
nodes[tax_id] = node
# generate all the links
for tax_id in self.taxonomy.index:
try:
parent = nodes[nodes[tax_id].parent_tax_id]
except KeyError:
if tax_id != "1":
warnings.warn(
"tax_id={} has parent_tax_id={} which is not in tree"
"".format(tax_id, nodes[tax_id].parent_tax_id)
)
continue
parent.append(nodes[tax_id])
return nodes["1"] |
def ephem(self, *args, **kwargs):
"""Create an Ephem object which is a subset of this one
Take the same keyword arguments as :py:meth:`ephemeris`
Return:
Ephem:
"""
return self.__class__(self.ephemeris(*args, **kwargs)) | Create an Ephem object which is a subset of this one
Take the same keyword arguments as :py:meth:`ephemeris`
Return:
Ephem: | Below is the the instruction that describes the task:
### Input:
Create an Ephem object which is a subset of this one
Take the same keyword arguments as :py:meth:`ephemeris`
Return:
Ephem:
### Response:
def ephem(self, *args, **kwargs):
"""Create an Ephem object which is a subset of this one
Take the same keyword arguments as :py:meth:`ephemeris`
Return:
Ephem:
"""
return self.__class__(self.ephemeris(*args, **kwargs)) |
def Print(self, output_writer):
"""Prints a human readable version of the filter.
Args:
output_writer (CLIOutputWriter): output writer.
"""
if self._names:
output_writer.Write('\tnames: {0:s}\n'.format(
', '.join(self._names))) | Prints a human readable version of the filter.
Args:
output_writer (CLIOutputWriter): output writer. | Below is the the instruction that describes the task:
### Input:
Prints a human readable version of the filter.
Args:
output_writer (CLIOutputWriter): output writer.
### Response:
def Print(self, output_writer):
"""Prints a human readable version of the filter.
Args:
output_writer (CLIOutputWriter): output writer.
"""
if self._names:
output_writer.Write('\tnames: {0:s}\n'.format(
', '.join(self._names))) |
def run_on_main_thread(self, func, args=None, kwargs=None):
"""
Runs the ``func`` callable on the main thread, by using the provided microservice
instance's IOLoop.
:param func: callable to run on the main thread
:param args: tuple or list with the positional arguments.
:param kwargs: dict with the keyword arguments.
:return:
"""
if not args:
args = ()
if not kwargs:
kwargs = {}
self.microservice.get_io_loop().add_callback(func, *args, **kwargs) | Runs the ``func`` callable on the main thread, by using the provided microservice
instance's IOLoop.
:param func: callable to run on the main thread
:param args: tuple or list with the positional arguments.
:param kwargs: dict with the keyword arguments.
:return: | Below is the the instruction that describes the task:
### Input:
Runs the ``func`` callable on the main thread, by using the provided microservice
instance's IOLoop.
:param func: callable to run on the main thread
:param args: tuple or list with the positional arguments.
:param kwargs: dict with the keyword arguments.
:return:
### Response:
def run_on_main_thread(self, func, args=None, kwargs=None):
"""
Runs the ``func`` callable on the main thread, by using the provided microservice
instance's IOLoop.
:param func: callable to run on the main thread
:param args: tuple or list with the positional arguments.
:param kwargs: dict with the keyword arguments.
:return:
"""
if not args:
args = ()
if not kwargs:
kwargs = {}
self.microservice.get_io_loop().add_callback(func, *args, **kwargs) |
def slice_slice(old_slice, applied_slice, size):
"""Given a slice and the size of the dimension to which it will be applied,
index it with another slice to return a new slice equivalent to applying
the slices sequentially
"""
step = (old_slice.step or 1) * (applied_slice.step or 1)
# For now, use the hack of turning old_slice into an ndarray to reconstruct
# the slice start and stop. This is not entirely ideal, but it is still
# definitely better than leaving the indexer as an array.
items = _expand_slice(old_slice, size)[applied_slice]
if len(items) > 0:
start = items[0]
stop = items[-1] + int(np.sign(step))
if stop < 0:
stop = None
else:
start = 0
stop = 0
return slice(start, stop, step) | Given a slice and the size of the dimension to which it will be applied,
index it with another slice to return a new slice equivalent to applying
the slices sequentially | Below is the the instruction that describes the task:
### Input:
Given a slice and the size of the dimension to which it will be applied,
index it with another slice to return a new slice equivalent to applying
the slices sequentially
### Response:
def slice_slice(old_slice, applied_slice, size):
"""Given a slice and the size of the dimension to which it will be applied,
index it with another slice to return a new slice equivalent to applying
the slices sequentially
"""
step = (old_slice.step or 1) * (applied_slice.step or 1)
# For now, use the hack of turning old_slice into an ndarray to reconstruct
# the slice start and stop. This is not entirely ideal, but it is still
# definitely better than leaving the indexer as an array.
items = _expand_slice(old_slice, size)[applied_slice]
if len(items) > 0:
start = items[0]
stop = items[-1] + int(np.sign(step))
if stop < 0:
stop = None
else:
start = 0
stop = 0
return slice(start, stop, step) |
def get_task_cost(self, task_name):
"""
Get task cost
:param task_name: name of the task
:return: task cost
:rtype: Instance.TaskCost
:Example:
>>> cost = instance.get_task_cost(instance.get_task_names()[0])
>>> cost.cpu_cost
200
>>> cost.memory_cost
4096
>>> cost.input_size
0
"""
summary = self.get_task_summary(task_name)
if summary is None:
return None
if 'Cost' in summary:
task_cost = summary['Cost']
cpu_cost = task_cost.get('CPU')
memory = task_cost.get('Memory')
input_size = task_cost.get('Input')
return Instance.TaskCost(cpu_cost, memory, input_size) | Get task cost
:param task_name: name of the task
:return: task cost
:rtype: Instance.TaskCost
:Example:
>>> cost = instance.get_task_cost(instance.get_task_names()[0])
>>> cost.cpu_cost
200
>>> cost.memory_cost
4096
>>> cost.input_size
0 | Below is the the instruction that describes the task:
### Input:
Get task cost
:param task_name: name of the task
:return: task cost
:rtype: Instance.TaskCost
:Example:
>>> cost = instance.get_task_cost(instance.get_task_names()[0])
>>> cost.cpu_cost
200
>>> cost.memory_cost
4096
>>> cost.input_size
0
### Response:
def get_task_cost(self, task_name):
"""
Get task cost
:param task_name: name of the task
:return: task cost
:rtype: Instance.TaskCost
:Example:
>>> cost = instance.get_task_cost(instance.get_task_names()[0])
>>> cost.cpu_cost
200
>>> cost.memory_cost
4096
>>> cost.input_size
0
"""
summary = self.get_task_summary(task_name)
if summary is None:
return None
if 'Cost' in summary:
task_cost = summary['Cost']
cpu_cost = task_cost.get('CPU')
memory = task_cost.get('Memory')
input_size = task_cost.get('Input')
return Instance.TaskCost(cpu_cost, memory, input_size) |
def rnd_datetime_array(size, start=datetime(1970, 1, 1), end=None):
"""
Array or Matrix of random datetime generator.
:returns: 1d or 2d array of datetime.date
"""
if end is None:
end = datetime.now()
start = parser.parse_datetime(start)
end = parser.parse_datetime(end)
_assert_correct_start_end(start, end)
return _randn(size, _rnd_datetime, start, end) | Array or Matrix of random datetime generator.
:returns: 1d or 2d array of datetime.date | Below is the the instruction that describes the task:
### Input:
Array or Matrix of random datetime generator.
:returns: 1d or 2d array of datetime.date
### Response:
def rnd_datetime_array(size, start=datetime(1970, 1, 1), end=None):
"""
Array or Matrix of random datetime generator.
:returns: 1d or 2d array of datetime.date
"""
if end is None:
end = datetime.now()
start = parser.parse_datetime(start)
end = parser.parse_datetime(end)
_assert_correct_start_end(start, end)
return _randn(size, _rnd_datetime, start, end) |
def groupby(self, io_select):
"""
SLICE THIS MATRIX INTO ONES WITH LESS DIMENSIONALITY
io_select - 1 IF GROUPING BY THIS DIMENSION, 0 IF FLATTENING
return -
"""
# offsets WILL SERVE TO MASK DIMS WE ARE NOT GROUPING BY, AND SERVE AS RELATIVE INDEX FOR EACH COORDINATE
offsets = []
new_dim = []
acc = 1
for i, d in reversed(enumerate(self.dims)):
if not io_select[i]:
new_dim.insert(0, d)
offsets.insert(0, acc * io_select[i])
acc *= d
if not new_dim:
# WHEN groupby ALL DIMENSIONS, ONLY THE VALUES REMAIN
# RETURN AN ITERATOR OF PAIRS (c, v), WHERE
# c - COORDINATES INTO THE CUBE
# v - VALUE AT GIVEN COORDINATES
return ((c, self[c]) for c in self._all_combos())
else:
output = [[None, Matrix(dims=new_dim)] for i in range(acc)]
_groupby(self.cube, 0, offsets, 0, output, tuple(), [])
return output | SLICE THIS MATRIX INTO ONES WITH LESS DIMENSIONALITY
io_select - 1 IF GROUPING BY THIS DIMENSION, 0 IF FLATTENING
return - | Below is the the instruction that describes the task:
### Input:
SLICE THIS MATRIX INTO ONES WITH LESS DIMENSIONALITY
io_select - 1 IF GROUPING BY THIS DIMENSION, 0 IF FLATTENING
return -
### Response:
def groupby(self, io_select):
"""
SLICE THIS MATRIX INTO ONES WITH LESS DIMENSIONALITY
io_select - 1 IF GROUPING BY THIS DIMENSION, 0 IF FLATTENING
return -
"""
# offsets WILL SERVE TO MASK DIMS WE ARE NOT GROUPING BY, AND SERVE AS RELATIVE INDEX FOR EACH COORDINATE
offsets = []
new_dim = []
acc = 1
for i, d in reversed(enumerate(self.dims)):
if not io_select[i]:
new_dim.insert(0, d)
offsets.insert(0, acc * io_select[i])
acc *= d
if not new_dim:
# WHEN groupby ALL DIMENSIONS, ONLY THE VALUES REMAIN
# RETURN AN ITERATOR OF PAIRS (c, v), WHERE
# c - COORDINATES INTO THE CUBE
# v - VALUE AT GIVEN COORDINATES
return ((c, self[c]) for c in self._all_combos())
else:
output = [[None, Matrix(dims=new_dim)] for i in range(acc)]
_groupby(self.cube, 0, offsets, 0, output, tuple(), [])
return output |
def named_tuple(
element_name, # type: Text
tuple_type, # type: Type[Tuple]
child_processors, # type: List[Processor]
required=True, # type: bool
alias=None, # type: Optional[Text]
hooks=None # type: Optional[Hooks]
):
# type: (...) -> RootProcessor
"""
Create a processor for namedtuple values.
:param tuple_type: The namedtuple type.
See also :func:`declxml.dictionary`
"""
converter = _named_tuple_converter(tuple_type)
processor = _Aggregate(element_name, converter, child_processors, required, alias)
return _processor_wrap_if_hooks(processor, hooks) | Create a processor for namedtuple values.
:param tuple_type: The namedtuple type.
See also :func:`declxml.dictionary` | Below is the the instruction that describes the task:
### Input:
Create a processor for namedtuple values.
:param tuple_type: The namedtuple type.
See also :func:`declxml.dictionary`
### Response:
def named_tuple(
element_name, # type: Text
tuple_type, # type: Type[Tuple]
child_processors, # type: List[Processor]
required=True, # type: bool
alias=None, # type: Optional[Text]
hooks=None # type: Optional[Hooks]
):
# type: (...) -> RootProcessor
"""
Create a processor for namedtuple values.
:param tuple_type: The namedtuple type.
See also :func:`declxml.dictionary`
"""
converter = _named_tuple_converter(tuple_type)
processor = _Aggregate(element_name, converter, child_processors, required, alias)
return _processor_wrap_if_hooks(processor, hooks) |
def memcache(self, f):
"""Cache a function in memory using an internal dictionary."""
name = _fullname(f)
cache = self.load_memcache(name)
@wraps(f)
def memcached(*args):
"""Cache the function in memory."""
# The arguments need to be hashable. Much faster than using hash().
h = args
out = cache.get(h, None)
if out is None:
out = f(*args)
cache[h] = out
return out
return memcached | Cache a function in memory using an internal dictionary. | Below is the the instruction that describes the task:
### Input:
Cache a function in memory using an internal dictionary.
### Response:
def memcache(self, f):
"""Cache a function in memory using an internal dictionary."""
name = _fullname(f)
cache = self.load_memcache(name)
@wraps(f)
def memcached(*args):
"""Cache the function in memory."""
# The arguments need to be hashable. Much faster than using hash().
h = args
out = cache.get(h, None)
if out is None:
out = f(*args)
cache[h] = out
return out
return memcached |
def cleanupContainers(self):
"""
Cleans up all containers to the right of the current one.
"""
for i in range(self.count() - 1, self.currentIndex(), -1):
widget = self.widget(i)
widget.close()
widget.setParent(None)
widget.deleteLater() | Cleans up all containers to the right of the current one. | Below is the the instruction that describes the task:
### Input:
Cleans up all containers to the right of the current one.
### Response:
def cleanupContainers(self):
"""
Cleans up all containers to the right of the current one.
"""
for i in range(self.count() - 1, self.currentIndex(), -1):
widget = self.widget(i)
widget.close()
widget.setParent(None)
widget.deleteLater() |
def get_last_args(tp):
"""Get last arguments of (multiply) subscripted type.
Parameters for Callable are flattened. Examples::
get_last_args(int) == ()
get_last_args(Union) == ()
get_last_args(ClassVar[int]) == (int,)
get_last_args(Union[T, int]) == (T, int)
get_last_args(Iterable[Tuple[T, S]][int, T]) == (int, T)
get_last_args(Callable[[T], int]) == (T, int)
get_last_args(Callable[[], int]) == (int,)
"""
if NEW_TYPING:
raise ValueError('This function is only supported in Python 3.6,'
' use get_args instead')
if is_classvar(tp):
return (tp.__type__,) if tp.__type__ is not None else ()
if (
is_generic_type(tp) or is_union_type(tp) or
is_callable_type(tp) or is_tuple_type(tp)
):
return tp.__args__ if tp.__args__ is not None else ()
return () | Get last arguments of (multiply) subscripted type.
Parameters for Callable are flattened. Examples::
get_last_args(int) == ()
get_last_args(Union) == ()
get_last_args(ClassVar[int]) == (int,)
get_last_args(Union[T, int]) == (T, int)
get_last_args(Iterable[Tuple[T, S]][int, T]) == (int, T)
get_last_args(Callable[[T], int]) == (T, int)
get_last_args(Callable[[], int]) == (int,) | Below is the the instruction that describes the task:
### Input:
Get last arguments of (multiply) subscripted type.
Parameters for Callable are flattened. Examples::
get_last_args(int) == ()
get_last_args(Union) == ()
get_last_args(ClassVar[int]) == (int,)
get_last_args(Union[T, int]) == (T, int)
get_last_args(Iterable[Tuple[T, S]][int, T]) == (int, T)
get_last_args(Callable[[T], int]) == (T, int)
get_last_args(Callable[[], int]) == (int,)
### Response:
def get_last_args(tp):
"""Get last arguments of (multiply) subscripted type.
Parameters for Callable are flattened. Examples::
get_last_args(int) == ()
get_last_args(Union) == ()
get_last_args(ClassVar[int]) == (int,)
get_last_args(Union[T, int]) == (T, int)
get_last_args(Iterable[Tuple[T, S]][int, T]) == (int, T)
get_last_args(Callable[[T], int]) == (T, int)
get_last_args(Callable[[], int]) == (int,)
"""
if NEW_TYPING:
raise ValueError('This function is only supported in Python 3.6,'
' use get_args instead')
if is_classvar(tp):
return (tp.__type__,) if tp.__type__ is not None else ()
if (
is_generic_type(tp) or is_union_type(tp) or
is_callable_type(tp) or is_tuple_type(tp)
):
return tp.__args__ if tp.__args__ is not None else ()
return () |
def string(cls, name, description=None, unit='',
default=None, initial_status=None):
"""Instantiate a new string sensor object.
Parameters
----------
name : str
The name of the sensor.
description : str
A short description of the sensor.
units : str
The units of the sensor value. May be the empty string
if there are no applicable units.
default : string
An initial value for the sensor. Defaults to the empty string.
initial_status : int enum or None
An initial status for the sensor. If None, defaults to
Sensor.UNKNOWN. `initial_status` must be one of the keys in
Sensor.STATUSES
"""
return cls(cls.STRING, name, description, unit, None,
default, initial_status) | Instantiate a new string sensor object.
Parameters
----------
name : str
The name of the sensor.
description : str
A short description of the sensor.
units : str
The units of the sensor value. May be the empty string
if there are no applicable units.
default : string
An initial value for the sensor. Defaults to the empty string.
initial_status : int enum or None
An initial status for the sensor. If None, defaults to
Sensor.UNKNOWN. `initial_status` must be one of the keys in
Sensor.STATUSES | Below is the the instruction that describes the task:
### Input:
Instantiate a new string sensor object.
Parameters
----------
name : str
The name of the sensor.
description : str
A short description of the sensor.
units : str
The units of the sensor value. May be the empty string
if there are no applicable units.
default : string
An initial value for the sensor. Defaults to the empty string.
initial_status : int enum or None
An initial status for the sensor. If None, defaults to
Sensor.UNKNOWN. `initial_status` must be one of the keys in
Sensor.STATUSES
### Response:
def string(cls, name, description=None, unit='',
default=None, initial_status=None):
"""Instantiate a new string sensor object.
Parameters
----------
name : str
The name of the sensor.
description : str
A short description of the sensor.
units : str
The units of the sensor value. May be the empty string
if there are no applicable units.
default : string
An initial value for the sensor. Defaults to the empty string.
initial_status : int enum or None
An initial status for the sensor. If None, defaults to
Sensor.UNKNOWN. `initial_status` must be one of the keys in
Sensor.STATUSES
"""
return cls(cls.STRING, name, description, unit, None,
default, initial_status) |
def list_packages(request):
"""
Retrieve a list of the package names registered with the package index.
Returns a list of name strings.
"""
session = DBSession()
names = [p.name for p in Package.all(session, order_by=Package.name)]
return names | Retrieve a list of the package names registered with the package index.
Returns a list of name strings. | Below is the the instruction that describes the task:
### Input:
Retrieve a list of the package names registered with the package index.
Returns a list of name strings.
### Response:
def list_packages(request):
"""
Retrieve a list of the package names registered with the package index.
Returns a list of name strings.
"""
session = DBSession()
names = [p.name for p in Package.all(session, order_by=Package.name)]
return names |
def autoLayoutSelected( self,
padX = None,
padY = None,
direction = Qt.Horizontal,
layout = 'Layered',
animate = 0,
centerOn = None,
center = None):
"""
Automatically lays out all the selected nodes in the scene using the \
autoLayoutNodes method.
:param padX | <int> || None | default is 2 * cell width
padY | <int> || None | default is 2 * cell height
direction | <Qt.Direction>
layout | <str> | name of the layout plugin to use
animate | <int> | number of seconds to animate over
:return {<XNode>: <QRectF>, ..} | new rects per node
"""
nodes = self.selectedNodes()
return self.autoLayoutNodes(nodes,
padX,
padY,
direction,
layout,
animate,
centerOn,
center) | Automatically lays out all the selected nodes in the scene using the \
autoLayoutNodes method.
:param padX | <int> || None | default is 2 * cell width
padY | <int> || None | default is 2 * cell height
direction | <Qt.Direction>
layout | <str> | name of the layout plugin to use
animate | <int> | number of seconds to animate over
:return {<XNode>: <QRectF>, ..} | new rects per node | Below is the the instruction that describes the task:
### Input:
Automatically lays out all the selected nodes in the scene using the \
autoLayoutNodes method.
:param padX | <int> || None | default is 2 * cell width
padY | <int> || None | default is 2 * cell height
direction | <Qt.Direction>
layout | <str> | name of the layout plugin to use
animate | <int> | number of seconds to animate over
:return {<XNode>: <QRectF>, ..} | new rects per node
### Response:
def autoLayoutSelected( self,
padX = None,
padY = None,
direction = Qt.Horizontal,
layout = 'Layered',
animate = 0,
centerOn = None,
center = None):
"""
Automatically lays out all the selected nodes in the scene using the \
autoLayoutNodes method.
:param padX | <int> || None | default is 2 * cell width
padY | <int> || None | default is 2 * cell height
direction | <Qt.Direction>
layout | <str> | name of the layout plugin to use
animate | <int> | number of seconds to animate over
:return {<XNode>: <QRectF>, ..} | new rects per node
"""
nodes = self.selectedNodes()
return self.autoLayoutNodes(nodes,
padX,
padY,
direction,
layout,
animate,
centerOn,
center) |
def to_netcdf(self, filename, compress=True):
"""Write InferenceData to file using netcdf4.
Parameters
----------
filename : str
Location to write to
compress : bool
Whether to compress result. Note this saves disk space, but may make
saving and loading somewhat slower (default: True).
Returns
-------
str
Location of netcdf file
"""
mode = "w" # overwrite first, then append
if self._groups: # check's whether a group is present or not.
for group in self._groups:
data = getattr(self, group)
kwargs = {}
if compress:
kwargs["encoding"] = {var_name: {"zlib": True} for var_name in data.variables}
data.to_netcdf(filename, mode=mode, group=group, **kwargs)
data.close()
mode = "a"
else: # creates a netcdf file for an empty InferenceData object.
empty_netcdf_file = nc.Dataset(filename, mode="w", format="NETCDF4")
empty_netcdf_file.close()
return filename | Write InferenceData to file using netcdf4.
Parameters
----------
filename : str
Location to write to
compress : bool
Whether to compress result. Note this saves disk space, but may make
saving and loading somewhat slower (default: True).
Returns
-------
str
Location of netcdf file | Below is the the instruction that describes the task:
### Input:
Write InferenceData to file using netcdf4.
Parameters
----------
filename : str
Location to write to
compress : bool
Whether to compress result. Note this saves disk space, but may make
saving and loading somewhat slower (default: True).
Returns
-------
str
Location of netcdf file
### Response:
def to_netcdf(self, filename, compress=True):
"""Write InferenceData to file using netcdf4.
Parameters
----------
filename : str
Location to write to
compress : bool
Whether to compress result. Note this saves disk space, but may make
saving and loading somewhat slower (default: True).
Returns
-------
str
Location of netcdf file
"""
mode = "w" # overwrite first, then append
if self._groups: # check's whether a group is present or not.
for group in self._groups:
data = getattr(self, group)
kwargs = {}
if compress:
kwargs["encoding"] = {var_name: {"zlib": True} for var_name in data.variables}
data.to_netcdf(filename, mode=mode, group=group, **kwargs)
data.close()
mode = "a"
else: # creates a netcdf file for an empty InferenceData object.
empty_netcdf_file = nc.Dataset(filename, mode="w", format="NETCDF4")
empty_netcdf_file.close()
return filename |
def get_equipment(self, **kwargs):
"""
Return list environments related with environment vip
"""
uri = 'api/v3/equipment/'
uri = self.prepare_url(uri, kwargs)
return super(ApiEquipment, self).get(uri) | Return list environments related with environment vip | Below is the the instruction that describes the task:
### Input:
Return list environments related with environment vip
### Response:
def get_equipment(self, **kwargs):
"""
Return list environments related with environment vip
"""
uri = 'api/v3/equipment/'
uri = self.prepare_url(uri, kwargs)
return super(ApiEquipment, self).get(uri) |
def display_latex(*objs, **kwargs):
"""Display the LaTeX representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw latex data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
"""
raw = kwargs.pop('raw',False)
if raw:
for obj in objs:
publish_latex(obj)
else:
display(*objs, include=['text/plain','text/latex']) | Display the LaTeX representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw latex data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False] | Below is the the instruction that describes the task:
### Input:
Display the LaTeX representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw latex data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
### Response:
def display_latex(*objs, **kwargs):
"""Display the LaTeX representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw latex data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
"""
raw = kwargs.pop('raw',False)
if raw:
for obj in objs:
publish_latex(obj)
else:
display(*objs, include=['text/plain','text/latex']) |
def p_members(self, p):
"""members :
| members member VALUE_SEPARATOR
| members member"""
if len(p) == 1:
p[0] = list()
else:
p[1].append(p[2])
p[0] = p[1] | members :
| members member VALUE_SEPARATOR
| members member | Below is the the instruction that describes the task:
### Input:
members :
| members member VALUE_SEPARATOR
| members member
### Response:
def p_members(self, p):
"""members :
| members member VALUE_SEPARATOR
| members member"""
if len(p) == 1:
p[0] = list()
else:
p[1].append(p[2])
p[0] = p[1] |
def silence(cls, *modules, **kwargs):
"""
Args:
*modules: Modules, or names of modules to silence (by setting their log level to WARNING or above)
**kwargs: Pass as kwargs due to python 2.7, would be level=logging.WARNING otherwise
"""
level = kwargs.pop("level", logging.WARNING)
for mod in modules:
name = mod.__name__ if hasattr(mod, "__name__") else mod
logging.getLogger(name).setLevel(level) | Args:
*modules: Modules, or names of modules to silence (by setting their log level to WARNING or above)
**kwargs: Pass as kwargs due to python 2.7, would be level=logging.WARNING otherwise | Below is the the instruction that describes the task:
### Input:
Args:
*modules: Modules, or names of modules to silence (by setting their log level to WARNING or above)
**kwargs: Pass as kwargs due to python 2.7, would be level=logging.WARNING otherwise
### Response:
def silence(cls, *modules, **kwargs):
"""
Args:
*modules: Modules, or names of modules to silence (by setting their log level to WARNING or above)
**kwargs: Pass as kwargs due to python 2.7, would be level=logging.WARNING otherwise
"""
level = kwargs.pop("level", logging.WARNING)
for mod in modules:
name = mod.__name__ if hasattr(mod, "__name__") else mod
logging.getLogger(name).setLevel(level) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.