code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def load_bioassembly_info_from_file(self, biomol_num):
"""Load metadata about a bioassembly (such as chains and their transformations) from a structure file.
"""
# current functionality is to take in a pre-assembled bioassembly file, parse it's MODELs and get info from that
pass
from Bio.PDB import PDBParser
p = PDBParser(PERMISSIVE=True, QUIET=True)
structure = StructureIO(self.structure_path, self.file_type)
structure = p.get_structure('tmp', infile)
bioass_to_chain_stoich = defaultdict(int)
for model in structure:
for chain in model:
bioass_to_chain_stoich[chain.id] += 1
return dict(bioass_to_chain_stoich) | Load metadata about a bioassembly (such as chains and their transformations) from a structure file. | Below is the the instruction that describes the task:
### Input:
Load metadata about a bioassembly (such as chains and their transformations) from a structure file.
### Response:
def load_bioassembly_info_from_file(self, biomol_num):
"""Load metadata about a bioassembly (such as chains and their transformations) from a structure file.
"""
# current functionality is to take in a pre-assembled bioassembly file, parse it's MODELs and get info from that
pass
from Bio.PDB import PDBParser
p = PDBParser(PERMISSIVE=True, QUIET=True)
structure = StructureIO(self.structure_path, self.file_type)
structure = p.get_structure('tmp', infile)
bioass_to_chain_stoich = defaultdict(int)
for model in structure:
for chain in model:
bioass_to_chain_stoich[chain.id] += 1
return dict(bioass_to_chain_stoich) |
def get_table(self, dbname, tbl_name):
"""
Parameters:
- dbname
- tbl_name
"""
self.send_get_table(dbname, tbl_name)
return self.recv_get_table() | Parameters:
- dbname
- tbl_name | Below is the the instruction that describes the task:
### Input:
Parameters:
- dbname
- tbl_name
### Response:
def get_table(self, dbname, tbl_name):
"""
Parameters:
- dbname
- tbl_name
"""
self.send_get_table(dbname, tbl_name)
return self.recv_get_table() |
def apply(self, token, previous=(None, None), next=(None, None)):
""" Applies lexical rules to the given token, which is a [word, tag] list.
"""
w = token[0]
for r in self:
if r[1] in self._cmd: # Rule = ly hassuf 2 RB x
f, x, pos, cmd = bool(0), r[0], r[-2], r[1].lower()
if r[2] in self._cmd: # Rule = NN s fhassuf 1 NNS x
f, x, pos, cmd = bool(1), r[1], r[-2], r[2].lower().lstrip("f")
if f and token[1] != r[0]:
continue
if (cmd == "word" and x == w) \
or (cmd == "char" and x in w) \
or (cmd == "haspref" and w.startswith(x)) \
or (cmd == "hassuf" and w.endswith(x)) \
or (cmd == "addpref" and x + w in self.known) \
or (cmd == "addsuf" and w + x in self.known) \
or (cmd == "deletepref" and w.startswith(x) and w[len(x):] in self.known) \
or (cmd == "deletesuf" and w.endswith(x) and w[:-len(x)] in self.known) \
or (cmd == "goodleft" and x == next[0]) \
or (cmd == "goodright" and x == previous[0]):
token[1] = pos
return token | Applies lexical rules to the given token, which is a [word, tag] list. | Below is the the instruction that describes the task:
### Input:
Applies lexical rules to the given token, which is a [word, tag] list.
### Response:
def apply(self, token, previous=(None, None), next=(None, None)):
""" Applies lexical rules to the given token, which is a [word, tag] list.
"""
w = token[0]
for r in self:
if r[1] in self._cmd: # Rule = ly hassuf 2 RB x
f, x, pos, cmd = bool(0), r[0], r[-2], r[1].lower()
if r[2] in self._cmd: # Rule = NN s fhassuf 1 NNS x
f, x, pos, cmd = bool(1), r[1], r[-2], r[2].lower().lstrip("f")
if f and token[1] != r[0]:
continue
if (cmd == "word" and x == w) \
or (cmd == "char" and x in w) \
or (cmd == "haspref" and w.startswith(x)) \
or (cmd == "hassuf" and w.endswith(x)) \
or (cmd == "addpref" and x + w in self.known) \
or (cmd == "addsuf" and w + x in self.known) \
or (cmd == "deletepref" and w.startswith(x) and w[len(x):] in self.known) \
or (cmd == "deletesuf" and w.endswith(x) and w[:-len(x)] in self.known) \
or (cmd == "goodleft" and x == next[0]) \
or (cmd == "goodright" and x == previous[0]):
token[1] = pos
return token |
def is_ready(self):
"""Is thread & ioloop ready.
:returns bool:
"""
if not self._thread:
return False
if not self._ready.is_set():
return False
return True | Is thread & ioloop ready.
:returns bool: | Below is the the instruction that describes the task:
### Input:
Is thread & ioloop ready.
:returns bool:
### Response:
def is_ready(self):
"""Is thread & ioloop ready.
:returns bool:
"""
if not self._thread:
return False
if not self._ready.is_set():
return False
return True |
def copy(self):
"""
Create a deep copy of this sequence
Returns:
:obj:`.FileSequence`:
"""
fs = self.__class__.__new__(self.__class__)
fs.__dict__ = self.__dict__.copy()
fs._frameSet = None
if self._frameSet is not None:
fs._frameSet = self._frameSet.copy()
return fs | Create a deep copy of this sequence
Returns:
:obj:`.FileSequence`: | Below is the the instruction that describes the task:
### Input:
Create a deep copy of this sequence
Returns:
:obj:`.FileSequence`:
### Response:
def copy(self):
"""
Create a deep copy of this sequence
Returns:
:obj:`.FileSequence`:
"""
fs = self.__class__.__new__(self.__class__)
fs.__dict__ = self.__dict__.copy()
fs._frameSet = None
if self._frameSet is not None:
fs._frameSet = self._frameSet.copy()
return fs |
def _check_flag_masks(self, ds, name):
'''
Check a variable's flag_masks attribute for compliance under CF
- flag_masks exists as an array
- flag_masks is the same dtype as the variable
- variable's dtype can support bit-field
- flag_masks is the same length as flag_meanings
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Variable name
:rtype: compliance_checker.base.Result
'''
variable = ds.variables[name]
flag_masks = variable.flag_masks
flag_meanings = getattr(ds, 'flag_meanings', None)
valid_masks = TestCtx(BaseCheck.HIGH, self.section_titles['3.5'])
valid_masks.assert_true(isinstance(flag_masks, np.ndarray),
"{}'s flag_masks must be an array of values not {}".format(name, type(flag_masks)))
if not isinstance(flag_masks, np.ndarray):
return valid_masks.to_result()
valid_masks.assert_true(variable.dtype.type == flag_masks.dtype.type,
"flag_masks ({}) mustbe the same data type as {} ({})"
"".format(flag_masks.dtype.type, name, variable.dtype.type))
type_ok = (np.issubdtype(variable.dtype, np.integer) or
np.issubdtype(variable.dtype, 'S') or
np.issubdtype(variable.dtype, 'b'))
valid_masks.assert_true(type_ok, "{}'s data type must be capable of bit-field expression".format(name))
if isinstance(flag_meanings, basestring):
flag_meanings = flag_meanings.split()
valid_masks.assert_true(len(flag_meanings) == len(flag_masks),
"{} flag_meanings and flag_masks should have the same number ".format(name)+\
"of elements.")
return valid_masks.to_result() | Check a variable's flag_masks attribute for compliance under CF
- flag_masks exists as an array
- flag_masks is the same dtype as the variable
- variable's dtype can support bit-field
- flag_masks is the same length as flag_meanings
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Variable name
:rtype: compliance_checker.base.Result | Below is the the instruction that describes the task:
### Input:
Check a variable's flag_masks attribute for compliance under CF
- flag_masks exists as an array
- flag_masks is the same dtype as the variable
- variable's dtype can support bit-field
- flag_masks is the same length as flag_meanings
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Variable name
:rtype: compliance_checker.base.Result
### Response:
def _check_flag_masks(self, ds, name):
'''
Check a variable's flag_masks attribute for compliance under CF
- flag_masks exists as an array
- flag_masks is the same dtype as the variable
- variable's dtype can support bit-field
- flag_masks is the same length as flag_meanings
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Variable name
:rtype: compliance_checker.base.Result
'''
variable = ds.variables[name]
flag_masks = variable.flag_masks
flag_meanings = getattr(ds, 'flag_meanings', None)
valid_masks = TestCtx(BaseCheck.HIGH, self.section_titles['3.5'])
valid_masks.assert_true(isinstance(flag_masks, np.ndarray),
"{}'s flag_masks must be an array of values not {}".format(name, type(flag_masks)))
if not isinstance(flag_masks, np.ndarray):
return valid_masks.to_result()
valid_masks.assert_true(variable.dtype.type == flag_masks.dtype.type,
"flag_masks ({}) mustbe the same data type as {} ({})"
"".format(flag_masks.dtype.type, name, variable.dtype.type))
type_ok = (np.issubdtype(variable.dtype, np.integer) or
np.issubdtype(variable.dtype, 'S') or
np.issubdtype(variable.dtype, 'b'))
valid_masks.assert_true(type_ok, "{}'s data type must be capable of bit-field expression".format(name))
if isinstance(flag_meanings, basestring):
flag_meanings = flag_meanings.split()
valid_masks.assert_true(len(flag_meanings) == len(flag_masks),
"{} flag_meanings and flag_masks should have the same number ".format(name)+\
"of elements.")
return valid_masks.to_result() |
def create_composite_loss(losses=None,
regularize=True,
include_marked=True,
name='cost'):
"""Creates a loss that is the sum of all specified losses.
Args:
losses: A sequence of losses to include.
regularize: Whether or not to include regularization losses.
include_marked: Whether or not to use the marked losses.
name: The name for this variable.
Returns:
A single tensor that is the sum of all losses.
"""
books = for_default_graph()
return books.create_composite_loss(losses,
regularize,
include_marked=include_marked,
name=name) | Creates a loss that is the sum of all specified losses.
Args:
losses: A sequence of losses to include.
regularize: Whether or not to include regularization losses.
include_marked: Whether or not to use the marked losses.
name: The name for this variable.
Returns:
A single tensor that is the sum of all losses. | Below is the the instruction that describes the task:
### Input:
Creates a loss that is the sum of all specified losses.
Args:
losses: A sequence of losses to include.
regularize: Whether or not to include regularization losses.
include_marked: Whether or not to use the marked losses.
name: The name for this variable.
Returns:
A single tensor that is the sum of all losses.
### Response:
def create_composite_loss(losses=None,
regularize=True,
include_marked=True,
name='cost'):
"""Creates a loss that is the sum of all specified losses.
Args:
losses: A sequence of losses to include.
regularize: Whether or not to include regularization losses.
include_marked: Whether or not to use the marked losses.
name: The name for this variable.
Returns:
A single tensor that is the sum of all losses.
"""
books = for_default_graph()
return books.create_composite_loss(losses,
regularize,
include_marked=include_marked,
name=name) |
def _resource_dump(pe, res):
"""Return the dump of the given resource."""
rva = res.data.struct.OffsetToData
size = res.data.struct.Size
dump = pe.get_data(rva, size)
return dump | Return the dump of the given resource. | Below is the the instruction that describes the task:
### Input:
Return the dump of the given resource.
### Response:
def _resource_dump(pe, res):
"""Return the dump of the given resource."""
rva = res.data.struct.OffsetToData
size = res.data.struct.Size
dump = pe.get_data(rva, size)
return dump |
def get_ntstatus_code(self):
"""
@rtype: int
@return: NTSTATUS status code that caused the exception.
@note: This method is only meaningful for in-page memory error
exceptions.
@raise NotImplementedError: Not an in-page memory error.
"""
if self.get_exception_code() != win32.EXCEPTION_IN_PAGE_ERROR:
msg = "This method is only meaningful "\
"for in-page memory error exceptions."
raise NotImplementedError(msg)
return self.get_exception_information(2) | @rtype: int
@return: NTSTATUS status code that caused the exception.
@note: This method is only meaningful for in-page memory error
exceptions.
@raise NotImplementedError: Not an in-page memory error. | Below is the the instruction that describes the task:
### Input:
@rtype: int
@return: NTSTATUS status code that caused the exception.
@note: This method is only meaningful for in-page memory error
exceptions.
@raise NotImplementedError: Not an in-page memory error.
### Response:
def get_ntstatus_code(self):
"""
@rtype: int
@return: NTSTATUS status code that caused the exception.
@note: This method is only meaningful for in-page memory error
exceptions.
@raise NotImplementedError: Not an in-page memory error.
"""
if self.get_exception_code() != win32.EXCEPTION_IN_PAGE_ERROR:
msg = "This method is only meaningful "\
"for in-page memory error exceptions."
raise NotImplementedError(msg)
return self.get_exception_information(2) |
def publishing_clone_relations(self, src_obj):
"""
Clone forward and reverse M2Ms.
This code is difficult to follow because the logic it applies is
confusing, but here is a summary that might help:
- when a draft object is published, the "current" and definitive
relationships are cloned to the published copy. The definitive
relationships are the draft-to-draft ones, as set in the admin.
- a "related draft" is the draft object at the other side of
a draft-to-draft M2M relationship
- if a related draft also has a published copy, a draft-to-
published relationship is added to that published copy. This
makes our newly-published item also "published" from the reverse
direction
- if our draft object has a related published copy without a
correponding related draft -- that is, a draft-to-published
relation without a definitive draft-to-draft relation -- then
we remove that relation as it is no longer "current". This
makes our newly-published item "unpublished" from the reverse
direction when an admin removes the underlying relationship.
An example case:
- We have Event "E" (unpublished) and Program "P" (published)
- We add an M2M relationship from E to P. Until the relationship
change is published it only affects drafts. Relationships are:
E draft <-> P draft
- We publish E, applying the relationship to published copies on
both sides:
E draft <-> P draft
E published <-> P draft
P published <-> E draft
- We remove the M2M relationship between E and P (We could do this
from either side: remove E from P; or, remove P from E). The
draft-to-draft relation is removed but published copy
relationships are not affected:
E published <-> P draft
P published <-> E draft
- We publish P (or E) to apply the relationshp removal to
published copies on both sides. No relationships remain.
To handle M2M relationships in general we iterate over entries in the
through-table relationship table to clone these entries, or remove
them, as appropriate. By processing the M2M relationships in this way
we can handle both kinds of M2M relationship:
- standard M2M relationships with no explicit through table defined
(these get an auto-generated through table) which are easier to
handle because we can add/remove items with the relationship's
queryset directly
- M2M relationships with an explicit through table defined, which
are more difficult to handle because we must use the through
model's manager to add/remove relationships.
See unit tests in ``TestPublishingOfM2MRelationships``.
"""
def clone_through_model_relationship(
manager, through_entry, dst_obj, rel_obj
):
dst_obj_filter = build_filter_for_through_field(
manager, manager.source_field_name, dst_obj)
rel_obj_filter = build_filter_for_through_field(
manager, manager.target_field_name, rel_obj)
if manager.through.objects \
.filter(**dst_obj_filter) \
.filter(**rel_obj_filter) \
.exists():
return
through_entry.pk = None
setattr(through_entry, manager.source_field_name, dst_obj)
setattr(through_entry, manager.target_field_name, rel_obj)
through_entry.save()
def delete_through_model_relationship(manager, src_obj, dst_obj):
src_obj_filter = build_filter_for_through_field(
manager, manager.source_field_name, src_obj)
dst_obj_filter = build_filter_for_through_field(
manager, manager.target_field_name, dst_obj)
manager.through.objects \
.filter(**src_obj_filter) \
.filter(**dst_obj_filter) \
.delete()
def build_filter_for_through_field(manager, field_name, obj):
# If the field is a `GenericForeignKey` we need to build
# a compatible filter dict against the field target's content type
# and PK...
field = getattr(manager.through, field_name)
if isinstance(field, GenericForeignKey):
field_filter = {
getattr(field, 'fk_field'): obj.pk,
getattr(field, 'ct_field'):
ContentType.objects.get_for_model(obj)
}
# ...otherwise standard FK fields can be handled simply
else:
field_filter = {field_name: obj}
return field_filter
def clone(src_manager):
if (
not hasattr(src_manager, 'source_field_name') or
not hasattr(src_manager, 'target_field_name')
):
raise PublishingException(
"Publishing requires many-to-many managers to have"
" 'source_field_name' and 'target_field_name' attributes"
" with the source and target field names that relate the"
" through model to the ends of the M2M relationship."
" If a non-standard manager does not provide these"
" attributes you must add them."
)
src_obj_source_field_filter = build_filter_for_through_field(
src_manager, src_manager.source_field_name, src_obj)
through_qs = src_manager.through.objects \
.filter(**src_obj_source_field_filter)
published_rel_objs_maybe_obsolete = []
current_draft_rel_pks = set()
for through_entry in through_qs:
rel_obj = getattr(through_entry, src_manager.target_field_name)
# If the object referenced by the M2M is publishable we only
# clone the relationship if it is to a draft copy, not if it is
# to a published copy. If it is not a publishable object at
# all then we always clone the relationship (True by default).
if getattr(rel_obj, 'publishing_is_draft', True):
clone_through_model_relationship(
src_manager, through_entry, self, rel_obj)
# If the related draft object also has a published copy,
# we need to make sure the published copy also knows about
# this newly-published draft.
try:
# Get published copy for related object, if any
rel_obj_published = rel_obj.publishing_linked
except AttributeError:
pass # Related item has no published copy
else:
if rel_obj_published:
clone_through_model_relationship(
src_manager, through_entry,
src_obj, rel_obj_published)
# Track IDs of related draft copies, so we can tell later
# whether relationshps with published copies are obsolete
current_draft_rel_pks.add(rel_obj.pk)
else:
# Track related published copies, in case they have
# become obsolete
published_rel_objs_maybe_obsolete.append(rel_obj)
# If related published copies have no corresponding related
# draft after all the previous processing, the relationship is
# obsolete and must be removed.
for published_rel_obj in published_rel_objs_maybe_obsolete:
draft = published_rel_obj.get_draft()
if not draft or draft.pk not in current_draft_rel_pks:
delete_through_model_relationship(
src_manager, src_obj, published_rel_obj)
# Track the relationship through-tables we have processed to avoid
# processing the same relationships in both forward and reverse
# directions, which could otherwise happen in unusual cases like
# for SFMOMA event M2M inter-relationships which are explicitly
# defined both ways as a hack to expose form widgets.
seen_rel_through_tables = set()
# Forward.
for field in src_obj._meta.many_to_many:
src_manager = getattr(src_obj, field.name)
clone(src_manager)
seen_rel_through_tables.add(field.rel.through)
# Reverse.
for field in src_obj._meta.get_all_related_many_to_many_objects():
# Skip reverse relationship we have already seen
if field.field.rel.through in seen_rel_through_tables:
continue
field_accessor_name = field.get_accessor_name()
# M2M relationships with `self` don't have accessor names
if not field_accessor_name:
continue
src_manager = getattr(src_obj, field_accessor_name)
clone(src_manager) | Clone forward and reverse M2Ms.
This code is difficult to follow because the logic it applies is
confusing, but here is a summary that might help:
- when a draft object is published, the "current" and definitive
relationships are cloned to the published copy. The definitive
relationships are the draft-to-draft ones, as set in the admin.
- a "related draft" is the draft object at the other side of
a draft-to-draft M2M relationship
- if a related draft also has a published copy, a draft-to-
published relationship is added to that published copy. This
makes our newly-published item also "published" from the reverse
direction
- if our draft object has a related published copy without a
correponding related draft -- that is, a draft-to-published
relation without a definitive draft-to-draft relation -- then
we remove that relation as it is no longer "current". This
makes our newly-published item "unpublished" from the reverse
direction when an admin removes the underlying relationship.
An example case:
- We have Event "E" (unpublished) and Program "P" (published)
- We add an M2M relationship from E to P. Until the relationship
change is published it only affects drafts. Relationships are:
E draft <-> P draft
- We publish E, applying the relationship to published copies on
both sides:
E draft <-> P draft
E published <-> P draft
P published <-> E draft
- We remove the M2M relationship between E and P (We could do this
from either side: remove E from P; or, remove P from E). The
draft-to-draft relation is removed but published copy
relationships are not affected:
E published <-> P draft
P published <-> E draft
- We publish P (or E) to apply the relationshp removal to
published copies on both sides. No relationships remain.
To handle M2M relationships in general we iterate over entries in the
through-table relationship table to clone these entries, or remove
them, as appropriate. By processing the M2M relationships in this way
we can handle both kinds of M2M relationship:
- standard M2M relationships with no explicit through table defined
(these get an auto-generated through table) which are easier to
handle because we can add/remove items with the relationship's
queryset directly
- M2M relationships with an explicit through table defined, which
are more difficult to handle because we must use the through
model's manager to add/remove relationships.
See unit tests in ``TestPublishingOfM2MRelationships``. | Below is the the instruction that describes the task:
### Input:
Clone forward and reverse M2Ms.
This code is difficult to follow because the logic it applies is
confusing, but here is a summary that might help:
- when a draft object is published, the "current" and definitive
relationships are cloned to the published copy. The definitive
relationships are the draft-to-draft ones, as set in the admin.
- a "related draft" is the draft object at the other side of
a draft-to-draft M2M relationship
- if a related draft also has a published copy, a draft-to-
published relationship is added to that published copy. This
makes our newly-published item also "published" from the reverse
direction
- if our draft object has a related published copy without a
correponding related draft -- that is, a draft-to-published
relation without a definitive draft-to-draft relation -- then
we remove that relation as it is no longer "current". This
makes our newly-published item "unpublished" from the reverse
direction when an admin removes the underlying relationship.
An example case:
- We have Event "E" (unpublished) and Program "P" (published)
- We add an M2M relationship from E to P. Until the relationship
change is published it only affects drafts. Relationships are:
E draft <-> P draft
- We publish E, applying the relationship to published copies on
both sides:
E draft <-> P draft
E published <-> P draft
P published <-> E draft
- We remove the M2M relationship between E and P (We could do this
from either side: remove E from P; or, remove P from E). The
draft-to-draft relation is removed but published copy
relationships are not affected:
E published <-> P draft
P published <-> E draft
- We publish P (or E) to apply the relationshp removal to
published copies on both sides. No relationships remain.
To handle M2M relationships in general we iterate over entries in the
through-table relationship table to clone these entries, or remove
them, as appropriate. By processing the M2M relationships in this way
we can handle both kinds of M2M relationship:
- standard M2M relationships with no explicit through table defined
(these get an auto-generated through table) which are easier to
handle because we can add/remove items with the relationship's
queryset directly
- M2M relationships with an explicit through table defined, which
are more difficult to handle because we must use the through
model's manager to add/remove relationships.
See unit tests in ``TestPublishingOfM2MRelationships``.
### Response:
def publishing_clone_relations(self, src_obj):
"""
Clone forward and reverse M2Ms.
This code is difficult to follow because the logic it applies is
confusing, but here is a summary that might help:
- when a draft object is published, the "current" and definitive
relationships are cloned to the published copy. The definitive
relationships are the draft-to-draft ones, as set in the admin.
- a "related draft" is the draft object at the other side of
a draft-to-draft M2M relationship
- if a related draft also has a published copy, a draft-to-
published relationship is added to that published copy. This
makes our newly-published item also "published" from the reverse
direction
- if our draft object has a related published copy without a
correponding related draft -- that is, a draft-to-published
relation without a definitive draft-to-draft relation -- then
we remove that relation as it is no longer "current". This
makes our newly-published item "unpublished" from the reverse
direction when an admin removes the underlying relationship.
An example case:
- We have Event "E" (unpublished) and Program "P" (published)
- We add an M2M relationship from E to P. Until the relationship
change is published it only affects drafts. Relationships are:
E draft <-> P draft
- We publish E, applying the relationship to published copies on
both sides:
E draft <-> P draft
E published <-> P draft
P published <-> E draft
- We remove the M2M relationship between E and P (We could do this
from either side: remove E from P; or, remove P from E). The
draft-to-draft relation is removed but published copy
relationships are not affected:
E published <-> P draft
P published <-> E draft
- We publish P (or E) to apply the relationshp removal to
published copies on both sides. No relationships remain.
To handle M2M relationships in general we iterate over entries in the
through-table relationship table to clone these entries, or remove
them, as appropriate. By processing the M2M relationships in this way
we can handle both kinds of M2M relationship:
- standard M2M relationships with no explicit through table defined
(these get an auto-generated through table) which are easier to
handle because we can add/remove items with the relationship's
queryset directly
- M2M relationships with an explicit through table defined, which
are more difficult to handle because we must use the through
model's manager to add/remove relationships.
See unit tests in ``TestPublishingOfM2MRelationships``.
"""
def clone_through_model_relationship(
manager, through_entry, dst_obj, rel_obj
):
dst_obj_filter = build_filter_for_through_field(
manager, manager.source_field_name, dst_obj)
rel_obj_filter = build_filter_for_through_field(
manager, manager.target_field_name, rel_obj)
if manager.through.objects \
.filter(**dst_obj_filter) \
.filter(**rel_obj_filter) \
.exists():
return
through_entry.pk = None
setattr(through_entry, manager.source_field_name, dst_obj)
setattr(through_entry, manager.target_field_name, rel_obj)
through_entry.save()
def delete_through_model_relationship(manager, src_obj, dst_obj):
src_obj_filter = build_filter_for_through_field(
manager, manager.source_field_name, src_obj)
dst_obj_filter = build_filter_for_through_field(
manager, manager.target_field_name, dst_obj)
manager.through.objects \
.filter(**src_obj_filter) \
.filter(**dst_obj_filter) \
.delete()
def build_filter_for_through_field(manager, field_name, obj):
# If the field is a `GenericForeignKey` we need to build
# a compatible filter dict against the field target's content type
# and PK...
field = getattr(manager.through, field_name)
if isinstance(field, GenericForeignKey):
field_filter = {
getattr(field, 'fk_field'): obj.pk,
getattr(field, 'ct_field'):
ContentType.objects.get_for_model(obj)
}
# ...otherwise standard FK fields can be handled simply
else:
field_filter = {field_name: obj}
return field_filter
def clone(src_manager):
if (
not hasattr(src_manager, 'source_field_name') or
not hasattr(src_manager, 'target_field_name')
):
raise PublishingException(
"Publishing requires many-to-many managers to have"
" 'source_field_name' and 'target_field_name' attributes"
" with the source and target field names that relate the"
" through model to the ends of the M2M relationship."
" If a non-standard manager does not provide these"
" attributes you must add them."
)
src_obj_source_field_filter = build_filter_for_through_field(
src_manager, src_manager.source_field_name, src_obj)
through_qs = src_manager.through.objects \
.filter(**src_obj_source_field_filter)
published_rel_objs_maybe_obsolete = []
current_draft_rel_pks = set()
for through_entry in through_qs:
rel_obj = getattr(through_entry, src_manager.target_field_name)
# If the object referenced by the M2M is publishable we only
# clone the relationship if it is to a draft copy, not if it is
# to a published copy. If it is not a publishable object at
# all then we always clone the relationship (True by default).
if getattr(rel_obj, 'publishing_is_draft', True):
clone_through_model_relationship(
src_manager, through_entry, self, rel_obj)
# If the related draft object also has a published copy,
# we need to make sure the published copy also knows about
# this newly-published draft.
try:
# Get published copy for related object, if any
rel_obj_published = rel_obj.publishing_linked
except AttributeError:
pass # Related item has no published copy
else:
if rel_obj_published:
clone_through_model_relationship(
src_manager, through_entry,
src_obj, rel_obj_published)
# Track IDs of related draft copies, so we can tell later
# whether relationshps with published copies are obsolete
current_draft_rel_pks.add(rel_obj.pk)
else:
# Track related published copies, in case they have
# become obsolete
published_rel_objs_maybe_obsolete.append(rel_obj)
# If related published copies have no corresponding related
# draft after all the previous processing, the relationship is
# obsolete and must be removed.
for published_rel_obj in published_rel_objs_maybe_obsolete:
draft = published_rel_obj.get_draft()
if not draft or draft.pk not in current_draft_rel_pks:
delete_through_model_relationship(
src_manager, src_obj, published_rel_obj)
# Track the relationship through-tables we have processed to avoid
# processing the same relationships in both forward and reverse
# directions, which could otherwise happen in unusual cases like
# for SFMOMA event M2M inter-relationships which are explicitly
# defined both ways as a hack to expose form widgets.
seen_rel_through_tables = set()
# Forward.
for field in src_obj._meta.many_to_many:
src_manager = getattr(src_obj, field.name)
clone(src_manager)
seen_rel_through_tables.add(field.rel.through)
# Reverse.
for field in src_obj._meta.get_all_related_many_to_many_objects():
# Skip reverse relationship we have already seen
if field.field.rel.through in seen_rel_through_tables:
continue
field_accessor_name = field.get_accessor_name()
# M2M relationships with `self` don't have accessor names
if not field_accessor_name:
continue
src_manager = getattr(src_obj, field_accessor_name)
clone(src_manager) |
def schedule_ping_frequency(self): # pragma: no cover
"Send a ping message to slack every 20 seconds"
ping = crontab('* * * * * */20', func=self.send_ping, start=False)
ping.start() | Send a ping message to slack every 20 seconds | Below is the the instruction that describes the task:
### Input:
Send a ping message to slack every 20 seconds
### Response:
def schedule_ping_frequency(self): # pragma: no cover
"Send a ping message to slack every 20 seconds"
ping = crontab('* * * * * */20', func=self.send_ping, start=False)
ping.start() |
def unmasked_for_shape_and_pixel_scale(cls, shape, pixel_scale, invert=False):
"""Setup a mask where all pixels are unmasked.
Parameters
----------
shape : (int, int)
The (y,x) shape of the mask in units of pixels.
pixel_scale: float
The arc-second to pixel conversion factor of each pixel.
"""
mask = np.full(tuple(map(lambda d: int(d), shape)), False)
if invert: mask = np.invert(mask)
return cls(array=mask, pixel_scale=pixel_scale) | Setup a mask where all pixels are unmasked.
Parameters
----------
shape : (int, int)
The (y,x) shape of the mask in units of pixels.
pixel_scale: float
The arc-second to pixel conversion factor of each pixel. | Below is the the instruction that describes the task:
### Input:
Setup a mask where all pixels are unmasked.
Parameters
----------
shape : (int, int)
The (y,x) shape of the mask in units of pixels.
pixel_scale: float
The arc-second to pixel conversion factor of each pixel.
### Response:
def unmasked_for_shape_and_pixel_scale(cls, shape, pixel_scale, invert=False):
"""Setup a mask where all pixels are unmasked.
Parameters
----------
shape : (int, int)
The (y,x) shape of the mask in units of pixels.
pixel_scale: float
The arc-second to pixel conversion factor of each pixel.
"""
mask = np.full(tuple(map(lambda d: int(d), shape)), False)
if invert: mask = np.invert(mask)
return cls(array=mask, pixel_scale=pixel_scale) |
def get_seed(self):
"""
Collects the required information to generate a data estructure
that can be used to recreate exactly the same geometry object
via *\*\*kwargs*.
:returns: Object's sufficient info to initialize it.
:rtype: dict
"""
return {'polygons': [poly.points for poly in self],
'holes': [hole.points for hole in self.holes]} | Collects the required information to generate a data estructure
that can be used to recreate exactly the same geometry object
via *\*\*kwargs*.
:returns: Object's sufficient info to initialize it.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Collects the required information to generate a data estructure
that can be used to recreate exactly the same geometry object
via *\*\*kwargs*.
:returns: Object's sufficient info to initialize it.
:rtype: dict
### Response:
def get_seed(self):
"""
Collects the required information to generate a data estructure
that can be used to recreate exactly the same geometry object
via *\*\*kwargs*.
:returns: Object's sufficient info to initialize it.
:rtype: dict
"""
return {'polygons': [poly.points for poly in self],
'holes': [hole.points for hole in self.holes]} |
def get_credits_by_section_and_regid(section, regid):
"""
Returns a uw_sws.models.Registration object
for the section and regid passed in.
"""
deprecation("Use get_credits_by_reg_url")
# note trailing comma in URL, it's required for the optional dup_code param
url = "{}{},{},{},{},{},{},.json".format(
reg_credits_url_prefix,
section.term.year,
section.term.quarter,
re.sub(' ', '%20', section.curriculum_abbr),
section.course_number,
section.section_id,
regid
)
reg_data = get_resource(url)
try:
return Decimal(reg_data['Credits'].strip())
except InvalidOperation:
pass | Returns a uw_sws.models.Registration object
for the section and regid passed in. | Below is the the instruction that describes the task:
### Input:
Returns a uw_sws.models.Registration object
for the section and regid passed in.
### Response:
def get_credits_by_section_and_regid(section, regid):
"""
Returns a uw_sws.models.Registration object
for the section and regid passed in.
"""
deprecation("Use get_credits_by_reg_url")
# note trailing comma in URL, it's required for the optional dup_code param
url = "{}{},{},{},{},{},{},.json".format(
reg_credits_url_prefix,
section.term.year,
section.term.quarter,
re.sub(' ', '%20', section.curriculum_abbr),
section.course_number,
section.section_id,
regid
)
reg_data = get_resource(url)
try:
return Decimal(reg_data['Credits'].strip())
except InvalidOperation:
pass |
def get_library_citation():
'''Return a descriptive string and reference data for what users of the library should cite'''
all_ref_data = api.get_reference_data()
lib_refs_data = {k: all_ref_data[k] for k in _lib_refs}
return (_lib_refs_desc, lib_refs_data) | Return a descriptive string and reference data for what users of the library should cite | Below is the the instruction that describes the task:
### Input:
Return a descriptive string and reference data for what users of the library should cite
### Response:
def get_library_citation():
'''Return a descriptive string and reference data for what users of the library should cite'''
all_ref_data = api.get_reference_data()
lib_refs_data = {k: all_ref_data[k] for k in _lib_refs}
return (_lib_refs_desc, lib_refs_data) |
def load_segmented_data(filename):
"""
Helper function to load segmented gait time series data.
:param filename: The full path of the file that contais our data. This should be a comma separated value (csv file).
:type filename: str
:return: The gait time series segmented data, with a x, y, z, mag_acc_sum and segmented columns.
:rtype: pandas.DataFrame
"""
data = pd.read_csv(filename, index_col=0)
data.index = data.index.astype(np.datetime64)
return data | Helper function to load segmented gait time series data.
:param filename: The full path of the file that contais our data. This should be a comma separated value (csv file).
:type filename: str
:return: The gait time series segmented data, with a x, y, z, mag_acc_sum and segmented columns.
:rtype: pandas.DataFrame | Below is the the instruction that describes the task:
### Input:
Helper function to load segmented gait time series data.
:param filename: The full path of the file that contais our data. This should be a comma separated value (csv file).
:type filename: str
:return: The gait time series segmented data, with a x, y, z, mag_acc_sum and segmented columns.
:rtype: pandas.DataFrame
### Response:
def load_segmented_data(filename):
"""
Helper function to load segmented gait time series data.
:param filename: The full path of the file that contais our data. This should be a comma separated value (csv file).
:type filename: str
:return: The gait time series segmented data, with a x, y, z, mag_acc_sum and segmented columns.
:rtype: pandas.DataFrame
"""
data = pd.read_csv(filename, index_col=0)
data.index = data.index.astype(np.datetime64)
return data |
def parse_compartments(self):
"""Parse compartment information from model.
Return tuple of: 1) iterator of
:class:`psamm.datasource.entry.CompartmentEntry`; 2) Set of pairs
defining the compartment boundaries of the model.
"""
compartments = OrderedDict()
boundaries = set()
if 'compartments' in self._model:
boundary_map = {}
for compartment_def in self._model['compartments']:
compartment_id = compartment_def.get('id')
_check_id(compartment_id, 'Compartment')
if compartment_id in compartments:
raise ParseError('Duplicate compartment ID: {}'.format(
compartment_id))
props = dict(compartment_def)
adjacent_to = props.pop('adjacent_to', None)
if adjacent_to is not None:
if not isinstance(adjacent_to, list):
adjacent_to = [adjacent_to]
for other in adjacent_to:
boundary_map.setdefault(other, set()).add(
compartment_id)
mark = FileMark(self._context, None, None)
compartment = CompartmentEntry(props, mark)
compartments[compartment_id] = compartment
# Check boundaries from boundary_map
for source, dest_set in iteritems(boundary_map):
if source not in compartments:
raise ParseError(
'Invalid compartment {} referenced'
' by compartment {}'.format(
source, ', '.join(dest_set)))
for dest in dest_set:
boundaries.add(tuple(sorted((source, dest))))
return itervalues(compartments), frozenset(boundaries) | Parse compartment information from model.
Return tuple of: 1) iterator of
:class:`psamm.datasource.entry.CompartmentEntry`; 2) Set of pairs
defining the compartment boundaries of the model. | Below is the the instruction that describes the task:
### Input:
Parse compartment information from model.
Return tuple of: 1) iterator of
:class:`psamm.datasource.entry.CompartmentEntry`; 2) Set of pairs
defining the compartment boundaries of the model.
### Response:
def parse_compartments(self):
"""Parse compartment information from model.
Return tuple of: 1) iterator of
:class:`psamm.datasource.entry.CompartmentEntry`; 2) Set of pairs
defining the compartment boundaries of the model.
"""
compartments = OrderedDict()
boundaries = set()
if 'compartments' in self._model:
boundary_map = {}
for compartment_def in self._model['compartments']:
compartment_id = compartment_def.get('id')
_check_id(compartment_id, 'Compartment')
if compartment_id in compartments:
raise ParseError('Duplicate compartment ID: {}'.format(
compartment_id))
props = dict(compartment_def)
adjacent_to = props.pop('adjacent_to', None)
if adjacent_to is not None:
if not isinstance(adjacent_to, list):
adjacent_to = [adjacent_to]
for other in adjacent_to:
boundary_map.setdefault(other, set()).add(
compartment_id)
mark = FileMark(self._context, None, None)
compartment = CompartmentEntry(props, mark)
compartments[compartment_id] = compartment
# Check boundaries from boundary_map
for source, dest_set in iteritems(boundary_map):
if source not in compartments:
raise ParseError(
'Invalid compartment {} referenced'
' by compartment {}'.format(
source, ', '.join(dest_set)))
for dest in dest_set:
boundaries.add(tuple(sorted((source, dest))))
return itervalues(compartments), frozenset(boundaries) |
def get(self, treeiter, *columns):
"""
:param treeiter: the :obj:`Gtk.TreeIter`
:type treeiter: :obj:`Gtk.TreeIter`
:param \\*columns: a list of column indices to fetch
:type columns: (:obj:`int`)
Returns a tuple of all values specified by their indices in `columns`
in the order the indices are contained in `columns`
Also see :obj:`Gtk.TreeStore.get_value`\\()
"""
n_columns = self.get_n_columns()
values = []
for col in columns:
if not isinstance(col, int):
raise TypeError("column numbers must be ints")
if col < 0 or col >= n_columns:
raise ValueError("column number is out of range")
values.append(self.get_value(treeiter, col))
return tuple(values) | :param treeiter: the :obj:`Gtk.TreeIter`
:type treeiter: :obj:`Gtk.TreeIter`
:param \\*columns: a list of column indices to fetch
:type columns: (:obj:`int`)
Returns a tuple of all values specified by their indices in `columns`
in the order the indices are contained in `columns`
Also see :obj:`Gtk.TreeStore.get_value`\\() | Below is the the instruction that describes the task:
### Input:
:param treeiter: the :obj:`Gtk.TreeIter`
:type treeiter: :obj:`Gtk.TreeIter`
:param \\*columns: a list of column indices to fetch
:type columns: (:obj:`int`)
Returns a tuple of all values specified by their indices in `columns`
in the order the indices are contained in `columns`
Also see :obj:`Gtk.TreeStore.get_value`\\()
### Response:
def get(self, treeiter, *columns):
"""
:param treeiter: the :obj:`Gtk.TreeIter`
:type treeiter: :obj:`Gtk.TreeIter`
:param \\*columns: a list of column indices to fetch
:type columns: (:obj:`int`)
Returns a tuple of all values specified by their indices in `columns`
in the order the indices are contained in `columns`
Also see :obj:`Gtk.TreeStore.get_value`\\()
"""
n_columns = self.get_n_columns()
values = []
for col in columns:
if not isinstance(col, int):
raise TypeError("column numbers must be ints")
if col < 0 or col >= n_columns:
raise ValueError("column number is out of range")
values.append(self.get_value(treeiter, col))
return tuple(values) |
def libvlc_media_list_player_new(p_instance):
'''Create new media_list_player.
@param p_instance: libvlc instance.
@return: media list player instance or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_list_player_new', None) or \
_Cfunction('libvlc_media_list_player_new', ((1,),), class_result(MediaListPlayer),
ctypes.c_void_p, Instance)
return f(p_instance) | Create new media_list_player.
@param p_instance: libvlc instance.
@return: media list player instance or NULL on error. | Below is the the instruction that describes the task:
### Input:
Create new media_list_player.
@param p_instance: libvlc instance.
@return: media list player instance or NULL on error.
### Response:
def libvlc_media_list_player_new(p_instance):
'''Create new media_list_player.
@param p_instance: libvlc instance.
@return: media list player instance or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_list_player_new', None) or \
_Cfunction('libvlc_media_list_player_new', ((1,),), class_result(MediaListPlayer),
ctypes.c_void_p, Instance)
return f(p_instance) |
def _handshake(self):
"""
Perform an initial TLS handshake
"""
session_context = None
ssl_policy_ref = None
crl_search_ref = None
crl_policy_ref = None
ocsp_search_ref = None
ocsp_policy_ref = None
policy_array_ref = None
try:
if osx_version_info < (10, 8):
session_context_pointer = new(Security, 'SSLContextRef *')
result = Security.SSLNewContext(False, session_context_pointer)
handle_sec_error(result)
session_context = unwrap(session_context_pointer)
else:
session_context = Security.SSLCreateContext(
null(),
SecurityConst.kSSLClientSide,
SecurityConst.kSSLStreamType
)
result = Security.SSLSetIOFuncs(
session_context,
_read_callback_pointer,
_write_callback_pointer
)
handle_sec_error(result)
self._connection_id = id(self) % 2147483647
_connection_refs[self._connection_id] = self
_socket_refs[self._connection_id] = self._socket
result = Security.SSLSetConnection(session_context, self._connection_id)
handle_sec_error(result)
utf8_domain = self._hostname.encode('utf-8')
result = Security.SSLSetPeerDomainName(
session_context,
utf8_domain,
len(utf8_domain)
)
handle_sec_error(result)
if osx_version_info >= (10, 10):
disable_auto_validation = self._session._manual_validation or self._session._extra_trust_roots
explicit_validation = (not self._session._manual_validation) and self._session._extra_trust_roots
else:
disable_auto_validation = True
explicit_validation = not self._session._manual_validation
# Ensure requested protocol support is set for the session
if osx_version_info < (10, 8):
for protocol in ['SSLv2', 'SSLv3', 'TLSv1']:
protocol_const = _PROTOCOL_STRING_CONST_MAP[protocol]
enabled = protocol in self._session._protocols
result = Security.SSLSetProtocolVersionEnabled(
session_context,
protocol_const,
enabled
)
handle_sec_error(result)
if disable_auto_validation:
result = Security.SSLSetEnableCertVerify(session_context, False)
handle_sec_error(result)
else:
protocol_consts = [_PROTOCOL_STRING_CONST_MAP[protocol] for protocol in self._session._protocols]
min_protocol = min(protocol_consts)
max_protocol = max(protocol_consts)
result = Security.SSLSetProtocolVersionMin(
session_context,
min_protocol
)
handle_sec_error(result)
result = Security.SSLSetProtocolVersionMax(
session_context,
max_protocol
)
handle_sec_error(result)
if disable_auto_validation:
result = Security.SSLSetSessionOption(
session_context,
SecurityConst.kSSLSessionOptionBreakOnServerAuth,
True
)
handle_sec_error(result)
# Disable all sorts of bad cipher suites
supported_ciphers_pointer = new(Security, 'size_t *')
result = Security.SSLGetNumberSupportedCiphers(session_context, supported_ciphers_pointer)
handle_sec_error(result)
supported_ciphers = deref(supported_ciphers_pointer)
cipher_buffer = buffer_from_bytes(supported_ciphers * 4)
supported_cipher_suites_pointer = cast(Security, 'uint32_t *', cipher_buffer)
result = Security.SSLGetSupportedCiphers(
session_context,
supported_cipher_suites_pointer,
supported_ciphers_pointer
)
handle_sec_error(result)
supported_ciphers = deref(supported_ciphers_pointer)
supported_cipher_suites = array_from_pointer(
Security,
'uint32_t',
supported_cipher_suites_pointer,
supported_ciphers
)
good_ciphers = []
for supported_cipher_suite in supported_cipher_suites:
cipher_suite = int_to_bytes(supported_cipher_suite, width=2)
cipher_suite_name = CIPHER_SUITE_MAP.get(cipher_suite, cipher_suite)
good_cipher = _cipher_blacklist_regex.search(cipher_suite_name) is None
if good_cipher:
good_ciphers.append(supported_cipher_suite)
num_good_ciphers = len(good_ciphers)
good_ciphers_array = new(Security, 'uint32_t[]', num_good_ciphers)
array_set(good_ciphers_array, good_ciphers)
good_ciphers_pointer = cast(Security, 'uint32_t *', good_ciphers_array)
result = Security.SSLSetEnabledCiphers(
session_context,
good_ciphers_pointer,
num_good_ciphers
)
handle_sec_error(result)
# Set a peer id from the session to allow for session reuse, the hostname
# is appended to prevent a bug on OS X 10.7 where it tries to reuse a
# connection even if the hostnames are different.
peer_id = self._session._peer_id + self._hostname.encode('utf-8')
result = Security.SSLSetPeerID(session_context, peer_id, len(peer_id))
handle_sec_error(result)
handshake_result = Security.SSLHandshake(session_context)
if self._exception is not None:
exception = self._exception
self._exception = None
raise exception
while handshake_result == SecurityConst.errSSLWouldBlock:
handshake_result = Security.SSLHandshake(session_context)
if self._exception is not None:
exception = self._exception
self._exception = None
raise exception
if osx_version_info < (10, 8) and osx_version_info >= (10, 7):
do_validation = explicit_validation and handshake_result == 0
else:
do_validation = explicit_validation and handshake_result == SecurityConst.errSSLServerAuthCompleted
if do_validation:
trust_ref_pointer = new(Security, 'SecTrustRef *')
result = Security.SSLCopyPeerTrust(
session_context,
trust_ref_pointer
)
handle_sec_error(result)
trust_ref = unwrap(trust_ref_pointer)
cf_string_hostname = CFHelpers.cf_string_from_unicode(self._hostname)
ssl_policy_ref = Security.SecPolicyCreateSSL(True, cf_string_hostname)
result = CoreFoundation.CFRelease(cf_string_hostname)
handle_cf_error(result)
# Create a new policy for OCSP checking to disable it
ocsp_oid_pointer = struct(Security, 'CSSM_OID')
ocsp_oid = unwrap(ocsp_oid_pointer)
ocsp_oid.Length = len(SecurityConst.APPLE_TP_REVOCATION_OCSP)
ocsp_oid_buffer = buffer_from_bytes(SecurityConst.APPLE_TP_REVOCATION_OCSP)
ocsp_oid.Data = cast(Security, 'char *', ocsp_oid_buffer)
ocsp_search_ref_pointer = new(Security, 'SecPolicySearchRef *')
result = Security.SecPolicySearchCreate(
SecurityConst.CSSM_CERT_X_509v3,
ocsp_oid_pointer,
null(),
ocsp_search_ref_pointer
)
handle_sec_error(result)
ocsp_search_ref = unwrap(ocsp_search_ref_pointer)
ocsp_policy_ref_pointer = new(Security, 'SecPolicyRef *')
result = Security.SecPolicySearchCopyNext(ocsp_search_ref, ocsp_policy_ref_pointer)
handle_sec_error(result)
ocsp_policy_ref = unwrap(ocsp_policy_ref_pointer)
ocsp_struct_pointer = struct(Security, 'CSSM_APPLE_TP_OCSP_OPTIONS')
ocsp_struct = unwrap(ocsp_struct_pointer)
ocsp_struct.Version = SecurityConst.CSSM_APPLE_TP_OCSP_OPTS_VERSION
ocsp_struct.Flags = (
SecurityConst.CSSM_TP_ACTION_OCSP_DISABLE_NET |
SecurityConst.CSSM_TP_ACTION_OCSP_CACHE_READ_DISABLE
)
ocsp_struct_bytes = struct_bytes(ocsp_struct_pointer)
cssm_data_pointer = struct(Security, 'CSSM_DATA')
cssm_data = unwrap(cssm_data_pointer)
cssm_data.Length = len(ocsp_struct_bytes)
ocsp_struct_buffer = buffer_from_bytes(ocsp_struct_bytes)
cssm_data.Data = cast(Security, 'char *', ocsp_struct_buffer)
result = Security.SecPolicySetValue(ocsp_policy_ref, cssm_data_pointer)
handle_sec_error(result)
# Create a new policy for CRL checking to disable it
crl_oid_pointer = struct(Security, 'CSSM_OID')
crl_oid = unwrap(crl_oid_pointer)
crl_oid.Length = len(SecurityConst.APPLE_TP_REVOCATION_CRL)
crl_oid_buffer = buffer_from_bytes(SecurityConst.APPLE_TP_REVOCATION_CRL)
crl_oid.Data = cast(Security, 'char *', crl_oid_buffer)
crl_search_ref_pointer = new(Security, 'SecPolicySearchRef *')
result = Security.SecPolicySearchCreate(
SecurityConst.CSSM_CERT_X_509v3,
crl_oid_pointer,
null(),
crl_search_ref_pointer
)
handle_sec_error(result)
crl_search_ref = unwrap(crl_search_ref_pointer)
crl_policy_ref_pointer = new(Security, 'SecPolicyRef *')
result = Security.SecPolicySearchCopyNext(crl_search_ref, crl_policy_ref_pointer)
handle_sec_error(result)
crl_policy_ref = unwrap(crl_policy_ref_pointer)
crl_struct_pointer = struct(Security, 'CSSM_APPLE_TP_CRL_OPTIONS')
crl_struct = unwrap(crl_struct_pointer)
crl_struct.Version = SecurityConst.CSSM_APPLE_TP_CRL_OPTS_VERSION
crl_struct.CrlFlags = 0
crl_struct_bytes = struct_bytes(crl_struct_pointer)
cssm_data_pointer = struct(Security, 'CSSM_DATA')
cssm_data = unwrap(cssm_data_pointer)
cssm_data.Length = len(crl_struct_bytes)
crl_struct_buffer = buffer_from_bytes(crl_struct_bytes)
cssm_data.Data = cast(Security, 'char *', crl_struct_buffer)
result = Security.SecPolicySetValue(crl_policy_ref, cssm_data_pointer)
handle_sec_error(result)
policy_array_ref = CFHelpers.cf_array_from_list([
ssl_policy_ref,
crl_policy_ref,
ocsp_policy_ref
])
result = Security.SecTrustSetPolicies(trust_ref, policy_array_ref)
handle_sec_error(result)
if self._session._extra_trust_roots:
ca_cert_refs = []
ca_certs = []
for cert in self._session._extra_trust_roots:
ca_cert = load_certificate(cert)
ca_certs.append(ca_cert)
ca_cert_refs.append(ca_cert.sec_certificate_ref)
result = Security.SecTrustSetAnchorCertificatesOnly(trust_ref, False)
handle_sec_error(result)
array_ref = CFHelpers.cf_array_from_list(ca_cert_refs)
result = Security.SecTrustSetAnchorCertificates(trust_ref, array_ref)
handle_sec_error(result)
result_pointer = new(Security, 'SecTrustResultType *')
result = Security.SecTrustEvaluate(trust_ref, result_pointer)
handle_sec_error(result)
trust_result_code = deref(result_pointer)
invalid_chain_error_codes = set([
SecurityConst.kSecTrustResultProceed,
SecurityConst.kSecTrustResultUnspecified
])
if trust_result_code not in invalid_chain_error_codes:
handshake_result = SecurityConst.errSSLXCertChainInvalid
else:
handshake_result = Security.SSLHandshake(session_context)
while handshake_result == SecurityConst.errSSLWouldBlock:
handshake_result = Security.SSLHandshake(session_context)
self._done_handshake = True
handshake_error_codes = set([
SecurityConst.errSSLXCertChainInvalid,
SecurityConst.errSSLCertExpired,
SecurityConst.errSSLCertNotYetValid,
SecurityConst.errSSLUnknownRootCert,
SecurityConst.errSSLNoRootCert,
SecurityConst.errSSLHostNameMismatch,
SecurityConst.errSSLInternal,
])
# In testing, only errSSLXCertChainInvalid was ever returned for
# all of these different situations, however we include the others
# for completeness. To get the real reason we have to use the
# certificate from the handshake and use the deprecated function
# SecTrustGetCssmResultCode().
if handshake_result in handshake_error_codes:
trust_ref_pointer = new(Security, 'SecTrustRef *')
result = Security.SSLCopyPeerTrust(
session_context,
trust_ref_pointer
)
handle_sec_error(result)
trust_ref = unwrap(trust_ref_pointer)
result_code_pointer = new(Security, 'OSStatus *')
result = Security.SecTrustGetCssmResultCode(trust_ref, result_code_pointer)
result_code = deref(result_code_pointer)
chain = extract_chain(self._server_hello)
self_signed = False
revoked = False
expired = False
not_yet_valid = False
no_issuer = False
cert = None
bad_hostname = False
if chain:
cert = chain[0]
oscrypto_cert = load_certificate(cert)
self_signed = oscrypto_cert.self_signed
revoked = result_code == SecurityConst.CSSMERR_TP_CERT_REVOKED
no_issuer = not self_signed and result_code == SecurityConst.CSSMERR_TP_NOT_TRUSTED
expired = result_code == SecurityConst.CSSMERR_TP_CERT_EXPIRED
not_yet_valid = result_code == SecurityConst.CSSMERR_TP_CERT_NOT_VALID_YET
bad_hostname = result_code == SecurityConst.CSSMERR_APPLETP_HOSTNAME_MISMATCH
# On macOS 10.12, some expired certificates return errSSLInternal
if osx_version_info >= (10, 12):
validity = cert['tbs_certificate']['validity']
not_before = validity['not_before'].chosen.native
not_after = validity['not_after'].chosen.native
utcnow = datetime.datetime.now(timezone.utc)
expired = not_after < utcnow
not_yet_valid = not_before > utcnow
if chain and chain[0].hash_algo in set(['md5', 'md2']):
raise_weak_signature(chain[0])
if revoked:
raise_revoked(cert)
if bad_hostname:
raise_hostname(cert, self._hostname)
elif expired or not_yet_valid:
raise_expired_not_yet_valid(cert)
elif no_issuer:
raise_no_issuer(cert)
elif self_signed:
raise_self_signed(cert)
if detect_client_auth_request(self._server_hello):
raise_client_auth()
raise_verification(cert)
if handshake_result == SecurityConst.errSSLPeerHandshakeFail:
if detect_client_auth_request(self._server_hello):
raise_client_auth()
raise_handshake()
if handshake_result == SecurityConst.errSSLWeakPeerEphemeralDHKey:
raise_dh_params()
if handshake_result == SecurityConst.errSSLPeerProtocolVersion:
raise_protocol_version()
if handshake_result in set([SecurityConst.errSSLRecordOverflow, SecurityConst.errSSLProtocol]):
self._server_hello += _read_remaining(self._socket)
raise_protocol_error(self._server_hello)
if handshake_result in set([SecurityConst.errSSLClosedNoNotify, SecurityConst.errSSLClosedAbort]):
if not self._done_handshake:
self._server_hello += _read_remaining(self._socket)
if detect_other_protocol(self._server_hello):
raise_protocol_error(self._server_hello)
raise_disconnection()
if osx_version_info < (10, 10):
dh_params_length = get_dh_params_length(self._server_hello)
if dh_params_length is not None and dh_params_length < 1024:
raise_dh_params()
would_block = handshake_result == SecurityConst.errSSLWouldBlock
server_auth_complete = handshake_result == SecurityConst.errSSLServerAuthCompleted
manual_validation = self._session._manual_validation and server_auth_complete
if not would_block and not manual_validation:
handle_sec_error(handshake_result, TLSError)
self._session_context = session_context
protocol_const_pointer = new(Security, 'SSLProtocol *')
result = Security.SSLGetNegotiatedProtocolVersion(
session_context,
protocol_const_pointer
)
handle_sec_error(result)
protocol_const = deref(protocol_const_pointer)
self._protocol = _PROTOCOL_CONST_STRING_MAP[protocol_const]
cipher_int_pointer = new(Security, 'SSLCipherSuite *')
result = Security.SSLGetNegotiatedCipher(
session_context,
cipher_int_pointer
)
handle_sec_error(result)
cipher_int = deref(cipher_int_pointer)
cipher_bytes = int_to_bytes(cipher_int, width=2)
self._cipher_suite = CIPHER_SUITE_MAP.get(cipher_bytes, cipher_bytes)
session_info = parse_session_info(
self._server_hello,
self._client_hello
)
self._compression = session_info['compression']
self._session_id = session_info['session_id']
self._session_ticket = session_info['session_ticket']
except (OSError, socket_.error):
if session_context:
if osx_version_info < (10, 8):
result = Security.SSLDisposeContext(session_context)
handle_sec_error(result)
else:
result = CoreFoundation.CFRelease(session_context)
handle_cf_error(result)
self._session_context = None
self.close()
raise
finally:
# Trying to release crl_search_ref or ocsp_search_ref results in
# a segmentation fault, so we do not do that
if ssl_policy_ref:
result = CoreFoundation.CFRelease(ssl_policy_ref)
handle_cf_error(result)
ssl_policy_ref = None
if crl_policy_ref:
result = CoreFoundation.CFRelease(crl_policy_ref)
handle_cf_error(result)
crl_policy_ref = None
if ocsp_policy_ref:
result = CoreFoundation.CFRelease(ocsp_policy_ref)
handle_cf_error(result)
ocsp_policy_ref = None
if policy_array_ref:
result = CoreFoundation.CFRelease(policy_array_ref)
handle_cf_error(result)
policy_array_ref = None | Perform an initial TLS handshake | Below is the the instruction that describes the task:
### Input:
Perform an initial TLS handshake
### Response:
def _handshake(self):
"""
Perform an initial TLS handshake
"""
session_context = None
ssl_policy_ref = None
crl_search_ref = None
crl_policy_ref = None
ocsp_search_ref = None
ocsp_policy_ref = None
policy_array_ref = None
try:
if osx_version_info < (10, 8):
session_context_pointer = new(Security, 'SSLContextRef *')
result = Security.SSLNewContext(False, session_context_pointer)
handle_sec_error(result)
session_context = unwrap(session_context_pointer)
else:
session_context = Security.SSLCreateContext(
null(),
SecurityConst.kSSLClientSide,
SecurityConst.kSSLStreamType
)
result = Security.SSLSetIOFuncs(
session_context,
_read_callback_pointer,
_write_callback_pointer
)
handle_sec_error(result)
self._connection_id = id(self) % 2147483647
_connection_refs[self._connection_id] = self
_socket_refs[self._connection_id] = self._socket
result = Security.SSLSetConnection(session_context, self._connection_id)
handle_sec_error(result)
utf8_domain = self._hostname.encode('utf-8')
result = Security.SSLSetPeerDomainName(
session_context,
utf8_domain,
len(utf8_domain)
)
handle_sec_error(result)
if osx_version_info >= (10, 10):
disable_auto_validation = self._session._manual_validation or self._session._extra_trust_roots
explicit_validation = (not self._session._manual_validation) and self._session._extra_trust_roots
else:
disable_auto_validation = True
explicit_validation = not self._session._manual_validation
# Ensure requested protocol support is set for the session
if osx_version_info < (10, 8):
for protocol in ['SSLv2', 'SSLv3', 'TLSv1']:
protocol_const = _PROTOCOL_STRING_CONST_MAP[protocol]
enabled = protocol in self._session._protocols
result = Security.SSLSetProtocolVersionEnabled(
session_context,
protocol_const,
enabled
)
handle_sec_error(result)
if disable_auto_validation:
result = Security.SSLSetEnableCertVerify(session_context, False)
handle_sec_error(result)
else:
protocol_consts = [_PROTOCOL_STRING_CONST_MAP[protocol] for protocol in self._session._protocols]
min_protocol = min(protocol_consts)
max_protocol = max(protocol_consts)
result = Security.SSLSetProtocolVersionMin(
session_context,
min_protocol
)
handle_sec_error(result)
result = Security.SSLSetProtocolVersionMax(
session_context,
max_protocol
)
handle_sec_error(result)
if disable_auto_validation:
result = Security.SSLSetSessionOption(
session_context,
SecurityConst.kSSLSessionOptionBreakOnServerAuth,
True
)
handle_sec_error(result)
# Disable all sorts of bad cipher suites
supported_ciphers_pointer = new(Security, 'size_t *')
result = Security.SSLGetNumberSupportedCiphers(session_context, supported_ciphers_pointer)
handle_sec_error(result)
supported_ciphers = deref(supported_ciphers_pointer)
cipher_buffer = buffer_from_bytes(supported_ciphers * 4)
supported_cipher_suites_pointer = cast(Security, 'uint32_t *', cipher_buffer)
result = Security.SSLGetSupportedCiphers(
session_context,
supported_cipher_suites_pointer,
supported_ciphers_pointer
)
handle_sec_error(result)
supported_ciphers = deref(supported_ciphers_pointer)
supported_cipher_suites = array_from_pointer(
Security,
'uint32_t',
supported_cipher_suites_pointer,
supported_ciphers
)
good_ciphers = []
for supported_cipher_suite in supported_cipher_suites:
cipher_suite = int_to_bytes(supported_cipher_suite, width=2)
cipher_suite_name = CIPHER_SUITE_MAP.get(cipher_suite, cipher_suite)
good_cipher = _cipher_blacklist_regex.search(cipher_suite_name) is None
if good_cipher:
good_ciphers.append(supported_cipher_suite)
num_good_ciphers = len(good_ciphers)
good_ciphers_array = new(Security, 'uint32_t[]', num_good_ciphers)
array_set(good_ciphers_array, good_ciphers)
good_ciphers_pointer = cast(Security, 'uint32_t *', good_ciphers_array)
result = Security.SSLSetEnabledCiphers(
session_context,
good_ciphers_pointer,
num_good_ciphers
)
handle_sec_error(result)
# Set a peer id from the session to allow for session reuse, the hostname
# is appended to prevent a bug on OS X 10.7 where it tries to reuse a
# connection even if the hostnames are different.
peer_id = self._session._peer_id + self._hostname.encode('utf-8')
result = Security.SSLSetPeerID(session_context, peer_id, len(peer_id))
handle_sec_error(result)
handshake_result = Security.SSLHandshake(session_context)
if self._exception is not None:
exception = self._exception
self._exception = None
raise exception
while handshake_result == SecurityConst.errSSLWouldBlock:
handshake_result = Security.SSLHandshake(session_context)
if self._exception is not None:
exception = self._exception
self._exception = None
raise exception
if osx_version_info < (10, 8) and osx_version_info >= (10, 7):
do_validation = explicit_validation and handshake_result == 0
else:
do_validation = explicit_validation and handshake_result == SecurityConst.errSSLServerAuthCompleted
if do_validation:
trust_ref_pointer = new(Security, 'SecTrustRef *')
result = Security.SSLCopyPeerTrust(
session_context,
trust_ref_pointer
)
handle_sec_error(result)
trust_ref = unwrap(trust_ref_pointer)
cf_string_hostname = CFHelpers.cf_string_from_unicode(self._hostname)
ssl_policy_ref = Security.SecPolicyCreateSSL(True, cf_string_hostname)
result = CoreFoundation.CFRelease(cf_string_hostname)
handle_cf_error(result)
# Create a new policy for OCSP checking to disable it
ocsp_oid_pointer = struct(Security, 'CSSM_OID')
ocsp_oid = unwrap(ocsp_oid_pointer)
ocsp_oid.Length = len(SecurityConst.APPLE_TP_REVOCATION_OCSP)
ocsp_oid_buffer = buffer_from_bytes(SecurityConst.APPLE_TP_REVOCATION_OCSP)
ocsp_oid.Data = cast(Security, 'char *', ocsp_oid_buffer)
ocsp_search_ref_pointer = new(Security, 'SecPolicySearchRef *')
result = Security.SecPolicySearchCreate(
SecurityConst.CSSM_CERT_X_509v3,
ocsp_oid_pointer,
null(),
ocsp_search_ref_pointer
)
handle_sec_error(result)
ocsp_search_ref = unwrap(ocsp_search_ref_pointer)
ocsp_policy_ref_pointer = new(Security, 'SecPolicyRef *')
result = Security.SecPolicySearchCopyNext(ocsp_search_ref, ocsp_policy_ref_pointer)
handle_sec_error(result)
ocsp_policy_ref = unwrap(ocsp_policy_ref_pointer)
ocsp_struct_pointer = struct(Security, 'CSSM_APPLE_TP_OCSP_OPTIONS')
ocsp_struct = unwrap(ocsp_struct_pointer)
ocsp_struct.Version = SecurityConst.CSSM_APPLE_TP_OCSP_OPTS_VERSION
ocsp_struct.Flags = (
SecurityConst.CSSM_TP_ACTION_OCSP_DISABLE_NET |
SecurityConst.CSSM_TP_ACTION_OCSP_CACHE_READ_DISABLE
)
ocsp_struct_bytes = struct_bytes(ocsp_struct_pointer)
cssm_data_pointer = struct(Security, 'CSSM_DATA')
cssm_data = unwrap(cssm_data_pointer)
cssm_data.Length = len(ocsp_struct_bytes)
ocsp_struct_buffer = buffer_from_bytes(ocsp_struct_bytes)
cssm_data.Data = cast(Security, 'char *', ocsp_struct_buffer)
result = Security.SecPolicySetValue(ocsp_policy_ref, cssm_data_pointer)
handle_sec_error(result)
# Create a new policy for CRL checking to disable it
crl_oid_pointer = struct(Security, 'CSSM_OID')
crl_oid = unwrap(crl_oid_pointer)
crl_oid.Length = len(SecurityConst.APPLE_TP_REVOCATION_CRL)
crl_oid_buffer = buffer_from_bytes(SecurityConst.APPLE_TP_REVOCATION_CRL)
crl_oid.Data = cast(Security, 'char *', crl_oid_buffer)
crl_search_ref_pointer = new(Security, 'SecPolicySearchRef *')
result = Security.SecPolicySearchCreate(
SecurityConst.CSSM_CERT_X_509v3,
crl_oid_pointer,
null(),
crl_search_ref_pointer
)
handle_sec_error(result)
crl_search_ref = unwrap(crl_search_ref_pointer)
crl_policy_ref_pointer = new(Security, 'SecPolicyRef *')
result = Security.SecPolicySearchCopyNext(crl_search_ref, crl_policy_ref_pointer)
handle_sec_error(result)
crl_policy_ref = unwrap(crl_policy_ref_pointer)
crl_struct_pointer = struct(Security, 'CSSM_APPLE_TP_CRL_OPTIONS')
crl_struct = unwrap(crl_struct_pointer)
crl_struct.Version = SecurityConst.CSSM_APPLE_TP_CRL_OPTS_VERSION
crl_struct.CrlFlags = 0
crl_struct_bytes = struct_bytes(crl_struct_pointer)
cssm_data_pointer = struct(Security, 'CSSM_DATA')
cssm_data = unwrap(cssm_data_pointer)
cssm_data.Length = len(crl_struct_bytes)
crl_struct_buffer = buffer_from_bytes(crl_struct_bytes)
cssm_data.Data = cast(Security, 'char *', crl_struct_buffer)
result = Security.SecPolicySetValue(crl_policy_ref, cssm_data_pointer)
handle_sec_error(result)
policy_array_ref = CFHelpers.cf_array_from_list([
ssl_policy_ref,
crl_policy_ref,
ocsp_policy_ref
])
result = Security.SecTrustSetPolicies(trust_ref, policy_array_ref)
handle_sec_error(result)
if self._session._extra_trust_roots:
ca_cert_refs = []
ca_certs = []
for cert in self._session._extra_trust_roots:
ca_cert = load_certificate(cert)
ca_certs.append(ca_cert)
ca_cert_refs.append(ca_cert.sec_certificate_ref)
result = Security.SecTrustSetAnchorCertificatesOnly(trust_ref, False)
handle_sec_error(result)
array_ref = CFHelpers.cf_array_from_list(ca_cert_refs)
result = Security.SecTrustSetAnchorCertificates(trust_ref, array_ref)
handle_sec_error(result)
result_pointer = new(Security, 'SecTrustResultType *')
result = Security.SecTrustEvaluate(trust_ref, result_pointer)
handle_sec_error(result)
trust_result_code = deref(result_pointer)
invalid_chain_error_codes = set([
SecurityConst.kSecTrustResultProceed,
SecurityConst.kSecTrustResultUnspecified
])
if trust_result_code not in invalid_chain_error_codes:
handshake_result = SecurityConst.errSSLXCertChainInvalid
else:
handshake_result = Security.SSLHandshake(session_context)
while handshake_result == SecurityConst.errSSLWouldBlock:
handshake_result = Security.SSLHandshake(session_context)
self._done_handshake = True
handshake_error_codes = set([
SecurityConst.errSSLXCertChainInvalid,
SecurityConst.errSSLCertExpired,
SecurityConst.errSSLCertNotYetValid,
SecurityConst.errSSLUnknownRootCert,
SecurityConst.errSSLNoRootCert,
SecurityConst.errSSLHostNameMismatch,
SecurityConst.errSSLInternal,
])
# In testing, only errSSLXCertChainInvalid was ever returned for
# all of these different situations, however we include the others
# for completeness. To get the real reason we have to use the
# certificate from the handshake and use the deprecated function
# SecTrustGetCssmResultCode().
if handshake_result in handshake_error_codes:
trust_ref_pointer = new(Security, 'SecTrustRef *')
result = Security.SSLCopyPeerTrust(
session_context,
trust_ref_pointer
)
handle_sec_error(result)
trust_ref = unwrap(trust_ref_pointer)
result_code_pointer = new(Security, 'OSStatus *')
result = Security.SecTrustGetCssmResultCode(trust_ref, result_code_pointer)
result_code = deref(result_code_pointer)
chain = extract_chain(self._server_hello)
self_signed = False
revoked = False
expired = False
not_yet_valid = False
no_issuer = False
cert = None
bad_hostname = False
if chain:
cert = chain[0]
oscrypto_cert = load_certificate(cert)
self_signed = oscrypto_cert.self_signed
revoked = result_code == SecurityConst.CSSMERR_TP_CERT_REVOKED
no_issuer = not self_signed and result_code == SecurityConst.CSSMERR_TP_NOT_TRUSTED
expired = result_code == SecurityConst.CSSMERR_TP_CERT_EXPIRED
not_yet_valid = result_code == SecurityConst.CSSMERR_TP_CERT_NOT_VALID_YET
bad_hostname = result_code == SecurityConst.CSSMERR_APPLETP_HOSTNAME_MISMATCH
# On macOS 10.12, some expired certificates return errSSLInternal
if osx_version_info >= (10, 12):
validity = cert['tbs_certificate']['validity']
not_before = validity['not_before'].chosen.native
not_after = validity['not_after'].chosen.native
utcnow = datetime.datetime.now(timezone.utc)
expired = not_after < utcnow
not_yet_valid = not_before > utcnow
if chain and chain[0].hash_algo in set(['md5', 'md2']):
raise_weak_signature(chain[0])
if revoked:
raise_revoked(cert)
if bad_hostname:
raise_hostname(cert, self._hostname)
elif expired or not_yet_valid:
raise_expired_not_yet_valid(cert)
elif no_issuer:
raise_no_issuer(cert)
elif self_signed:
raise_self_signed(cert)
if detect_client_auth_request(self._server_hello):
raise_client_auth()
raise_verification(cert)
if handshake_result == SecurityConst.errSSLPeerHandshakeFail:
if detect_client_auth_request(self._server_hello):
raise_client_auth()
raise_handshake()
if handshake_result == SecurityConst.errSSLWeakPeerEphemeralDHKey:
raise_dh_params()
if handshake_result == SecurityConst.errSSLPeerProtocolVersion:
raise_protocol_version()
if handshake_result in set([SecurityConst.errSSLRecordOverflow, SecurityConst.errSSLProtocol]):
self._server_hello += _read_remaining(self._socket)
raise_protocol_error(self._server_hello)
if handshake_result in set([SecurityConst.errSSLClosedNoNotify, SecurityConst.errSSLClosedAbort]):
if not self._done_handshake:
self._server_hello += _read_remaining(self._socket)
if detect_other_protocol(self._server_hello):
raise_protocol_error(self._server_hello)
raise_disconnection()
if osx_version_info < (10, 10):
dh_params_length = get_dh_params_length(self._server_hello)
if dh_params_length is not None and dh_params_length < 1024:
raise_dh_params()
would_block = handshake_result == SecurityConst.errSSLWouldBlock
server_auth_complete = handshake_result == SecurityConst.errSSLServerAuthCompleted
manual_validation = self._session._manual_validation and server_auth_complete
if not would_block and not manual_validation:
handle_sec_error(handshake_result, TLSError)
self._session_context = session_context
protocol_const_pointer = new(Security, 'SSLProtocol *')
result = Security.SSLGetNegotiatedProtocolVersion(
session_context,
protocol_const_pointer
)
handle_sec_error(result)
protocol_const = deref(protocol_const_pointer)
self._protocol = _PROTOCOL_CONST_STRING_MAP[protocol_const]
cipher_int_pointer = new(Security, 'SSLCipherSuite *')
result = Security.SSLGetNegotiatedCipher(
session_context,
cipher_int_pointer
)
handle_sec_error(result)
cipher_int = deref(cipher_int_pointer)
cipher_bytes = int_to_bytes(cipher_int, width=2)
self._cipher_suite = CIPHER_SUITE_MAP.get(cipher_bytes, cipher_bytes)
session_info = parse_session_info(
self._server_hello,
self._client_hello
)
self._compression = session_info['compression']
self._session_id = session_info['session_id']
self._session_ticket = session_info['session_ticket']
except (OSError, socket_.error):
if session_context:
if osx_version_info < (10, 8):
result = Security.SSLDisposeContext(session_context)
handle_sec_error(result)
else:
result = CoreFoundation.CFRelease(session_context)
handle_cf_error(result)
self._session_context = None
self.close()
raise
finally:
# Trying to release crl_search_ref or ocsp_search_ref results in
# a segmentation fault, so we do not do that
if ssl_policy_ref:
result = CoreFoundation.CFRelease(ssl_policy_ref)
handle_cf_error(result)
ssl_policy_ref = None
if crl_policy_ref:
result = CoreFoundation.CFRelease(crl_policy_ref)
handle_cf_error(result)
crl_policy_ref = None
if ocsp_policy_ref:
result = CoreFoundation.CFRelease(ocsp_policy_ref)
handle_cf_error(result)
ocsp_policy_ref = None
if policy_array_ref:
result = CoreFoundation.CFRelease(policy_array_ref)
handle_cf_error(result)
policy_array_ref = None |
def addCity(self, fileName):
"""Add a JSON file and read the users.
:param fileName: path to the JSON file. This file has to have a list of
users, called users.
:type fileName: str.
"""
with open(fileName) as data_file:
data = load(data_file)
for u in data["users"]:
if not any(d["name"] == u["name"] for d in self.__users):
self.__users.append(u) | Add a JSON file and read the users.
:param fileName: path to the JSON file. This file has to have a list of
users, called users.
:type fileName: str. | Below is the the instruction that describes the task:
### Input:
Add a JSON file and read the users.
:param fileName: path to the JSON file. This file has to have a list of
users, called users.
:type fileName: str.
### Response:
def addCity(self, fileName):
"""Add a JSON file and read the users.
:param fileName: path to the JSON file. This file has to have a list of
users, called users.
:type fileName: str.
"""
with open(fileName) as data_file:
data = load(data_file)
for u in data["users"]:
if not any(d["name"] == u["name"] for d in self.__users):
self.__users.append(u) |
def QA_data_min_resample(min_data, type_='5min'):
"""分钟线采样成大周期
分钟线采样成子级别的分钟线
time+ OHLC==> resample
Arguments:
min {[type]} -- [description]
raw_type {[type]} -- [description]
new_type {[type]} -- [description]
"""
try:
min_data = min_data.reset_index().set_index('datetime', drop=False)
except:
min_data = min_data.set_index('datetime', drop=False)
CONVERSION = {
'code': 'first',
'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last',
'vol': 'sum',
'amount': 'sum'
} if 'vol' in min_data.columns else {
'code': 'first',
'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last',
'volume': 'sum',
'amount': 'sum'
}
resx = pd.DataFrame()
for item in set(min_data.index.date):
min_data_p = min_data.loc[str(item)]
n = min_data_p['{} 21:00:00'.format(item):].resample(
type_,
base=30,
closed='right',
loffset=type_
).apply(CONVERSION)
d = min_data_p[:'{} 11:30:00'.format(item)].resample(
type_,
base=30,
closed='right',
loffset=type_
).apply(CONVERSION)
f = min_data_p['{} 13:00:00'.format(item):].resample(
type_,
closed='right',
loffset=type_
).apply(CONVERSION)
resx = resx.append(d).append(f)
return resx.dropna().reset_index().set_index(['datetime', 'code']) | 分钟线采样成大周期
分钟线采样成子级别的分钟线
time+ OHLC==> resample
Arguments:
min {[type]} -- [description]
raw_type {[type]} -- [description]
new_type {[type]} -- [description] | Below is the the instruction that describes the task:
### Input:
分钟线采样成大周期
分钟线采样成子级别的分钟线
time+ OHLC==> resample
Arguments:
min {[type]} -- [description]
raw_type {[type]} -- [description]
new_type {[type]} -- [description]
### Response:
def QA_data_min_resample(min_data, type_='5min'):
"""分钟线采样成大周期
分钟线采样成子级别的分钟线
time+ OHLC==> resample
Arguments:
min {[type]} -- [description]
raw_type {[type]} -- [description]
new_type {[type]} -- [description]
"""
try:
min_data = min_data.reset_index().set_index('datetime', drop=False)
except:
min_data = min_data.set_index('datetime', drop=False)
CONVERSION = {
'code': 'first',
'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last',
'vol': 'sum',
'amount': 'sum'
} if 'vol' in min_data.columns else {
'code': 'first',
'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last',
'volume': 'sum',
'amount': 'sum'
}
resx = pd.DataFrame()
for item in set(min_data.index.date):
min_data_p = min_data.loc[str(item)]
n = min_data_p['{} 21:00:00'.format(item):].resample(
type_,
base=30,
closed='right',
loffset=type_
).apply(CONVERSION)
d = min_data_p[:'{} 11:30:00'.format(item)].resample(
type_,
base=30,
closed='right',
loffset=type_
).apply(CONVERSION)
f = min_data_p['{} 13:00:00'.format(item):].resample(
type_,
closed='right',
loffset=type_
).apply(CONVERSION)
resx = resx.append(d).append(f)
return resx.dropna().reset_index().set_index(['datetime', 'code']) |
def update(self, callback=None, errback=None, **kwargs):
"""
Update zone configuration. Pass a list of keywords and their values to
update. For the list of keywords available for zone configuration, see
:attr:`ns1.rest.zones.Zones.INT_FIELDS` and
:attr:`ns1.rest.zones.Zones.PASSTHRU_FIELDS`
"""
if not self.data:
raise ZoneException('zone not loaded')
def success(result, *args):
self.data = result
if callback:
return callback(self)
else:
return self
return self._rest.update(self.zone, callback=success, errback=errback,
**kwargs) | Update zone configuration. Pass a list of keywords and their values to
update. For the list of keywords available for zone configuration, see
:attr:`ns1.rest.zones.Zones.INT_FIELDS` and
:attr:`ns1.rest.zones.Zones.PASSTHRU_FIELDS` | Below is the the instruction that describes the task:
### Input:
Update zone configuration. Pass a list of keywords and their values to
update. For the list of keywords available for zone configuration, see
:attr:`ns1.rest.zones.Zones.INT_FIELDS` and
:attr:`ns1.rest.zones.Zones.PASSTHRU_FIELDS`
### Response:
def update(self, callback=None, errback=None, **kwargs):
"""
Update zone configuration. Pass a list of keywords and their values to
update. For the list of keywords available for zone configuration, see
:attr:`ns1.rest.zones.Zones.INT_FIELDS` and
:attr:`ns1.rest.zones.Zones.PASSTHRU_FIELDS`
"""
if not self.data:
raise ZoneException('zone not loaded')
def success(result, *args):
self.data = result
if callback:
return callback(self)
else:
return self
return self._rest.update(self.zone, callback=success, errback=errback,
**kwargs) |
def _update_pathway_definitions(crosstalk_corrected_index_map,
gene_row_names,
pathway_column_names):
"""Helper function to convert the mapping of int
(pathway id -> list of gene ids) to the corresponding pathway
names and gene identifiers.
"""
corrected_pathway_definitions = {}
for pathway_index, gene_indices in crosstalk_corrected_index_map.items():
pathway = pathway_column_names[pathway_index]
genes = set([gene_row_names[index] for index in list(gene_indices)])
corrected_pathway_definitions[pathway] = genes
return corrected_pathway_definitions | Helper function to convert the mapping of int
(pathway id -> list of gene ids) to the corresponding pathway
names and gene identifiers. | Below is the the instruction that describes the task:
### Input:
Helper function to convert the mapping of int
(pathway id -> list of gene ids) to the corresponding pathway
names and gene identifiers.
### Response:
def _update_pathway_definitions(crosstalk_corrected_index_map,
gene_row_names,
pathway_column_names):
"""Helper function to convert the mapping of int
(pathway id -> list of gene ids) to the corresponding pathway
names and gene identifiers.
"""
corrected_pathway_definitions = {}
for pathway_index, gene_indices in crosstalk_corrected_index_map.items():
pathway = pathway_column_names[pathway_index]
genes = set([gene_row_names[index] for index in list(gene_indices)])
corrected_pathway_definitions[pathway] = genes
return corrected_pathway_definitions |
def _from_signer_and_info(cls, signer, info, **kwargs):
"""Creates a Credentials instance from a signer and service account
info.
Args:
signer (google.auth.crypt.Signer): The signer used to sign JWTs.
info (Mapping[str, str]): The service account info.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: The constructed credentials.
Raises:
ValueError: If the info is not in the expected format.
"""
return cls(
signer,
service_account_email=info['client_email'],
token_uri=info['token_uri'],
project_id=info.get('project_id'), **kwargs) | Creates a Credentials instance from a signer and service account
info.
Args:
signer (google.auth.crypt.Signer): The signer used to sign JWTs.
info (Mapping[str, str]): The service account info.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: The constructed credentials.
Raises:
ValueError: If the info is not in the expected format. | Below is the the instruction that describes the task:
### Input:
Creates a Credentials instance from a signer and service account
info.
Args:
signer (google.auth.crypt.Signer): The signer used to sign JWTs.
info (Mapping[str, str]): The service account info.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: The constructed credentials.
Raises:
ValueError: If the info is not in the expected format.
### Response:
def _from_signer_and_info(cls, signer, info, **kwargs):
"""Creates a Credentials instance from a signer and service account
info.
Args:
signer (google.auth.crypt.Signer): The signer used to sign JWTs.
info (Mapping[str, str]): The service account info.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: The constructed credentials.
Raises:
ValueError: If the info is not in the expected format.
"""
return cls(
signer,
service_account_email=info['client_email'],
token_uri=info['token_uri'],
project_id=info.get('project_id'), **kwargs) |
def recursion_error(self, repeated_parser: str):
"""Generate an error to indicate that infinite recursion was encountered.
A parser can supply a representation of itself to this method and the
reader will supply the context, including the location where the
parser stalled.
Args:
repeated_parser: A representation of the repeated parser
Returns:
A full error message
"""
if self.finished:
return super().recursion_error(repeated_parser)
else:
line_index, character_index, line, pointer = self.current_line()
return 'Infinite recursion detected in {}; empty string was matched and will be matched forever\n' \
'Line {}, character {}\n\n{}{}'.format(repeated_parser, line_index, character_index, line, pointer) | Generate an error to indicate that infinite recursion was encountered.
A parser can supply a representation of itself to this method and the
reader will supply the context, including the location where the
parser stalled.
Args:
repeated_parser: A representation of the repeated parser
Returns:
A full error message | Below is the the instruction that describes the task:
### Input:
Generate an error to indicate that infinite recursion was encountered.
A parser can supply a representation of itself to this method and the
reader will supply the context, including the location where the
parser stalled.
Args:
repeated_parser: A representation of the repeated parser
Returns:
A full error message
### Response:
def recursion_error(self, repeated_parser: str):
"""Generate an error to indicate that infinite recursion was encountered.
A parser can supply a representation of itself to this method and the
reader will supply the context, including the location where the
parser stalled.
Args:
repeated_parser: A representation of the repeated parser
Returns:
A full error message
"""
if self.finished:
return super().recursion_error(repeated_parser)
else:
line_index, character_index, line, pointer = self.current_line()
return 'Infinite recursion detected in {}; empty string was matched and will be matched forever\n' \
'Line {}, character {}\n\n{}{}'.format(repeated_parser, line_index, character_index, line, pointer) |
def skip(self, n_batches, n_epochs=0):
"""
Skip N batches in the training.
"""
logging.info("skip %d epochs and %d batches" % (n_epochs, n_batches))
self._skip_batches = n_batches
self._skip_epochs = n_epochs | Skip N batches in the training. | Below is the the instruction that describes the task:
### Input:
Skip N batches in the training.
### Response:
def skip(self, n_batches, n_epochs=0):
"""
Skip N batches in the training.
"""
logging.info("skip %d epochs and %d batches" % (n_epochs, n_batches))
self._skip_batches = n_batches
self._skip_epochs = n_epochs |
def find_unrelated(x, plim=0.1, axis=0):
"""Find indicies of insignificant un-/correlated variables
Example:
--------
i, j = find_unrelated(x, plim, rlim)
"""
# transpose if axis<>0
if axis is not 0:
x = x.T
# read dimensions and allocate variables
_, c = x.shape
pairs = []
# compute each (i,j)-th correlation
for i in range(0, c):
for j in range(i + 1, c):
_, p = scipy.stats.pearsonr(x[:, i], x[:, j])
if p > plim:
pairs.append((i, j))
# done
return tuple(pairs) | Find indicies of insignificant un-/correlated variables
Example:
--------
i, j = find_unrelated(x, plim, rlim) | Below is the the instruction that describes the task:
### Input:
Find indicies of insignificant un-/correlated variables
Example:
--------
i, j = find_unrelated(x, plim, rlim)
### Response:
def find_unrelated(x, plim=0.1, axis=0):
"""Find indicies of insignificant un-/correlated variables
Example:
--------
i, j = find_unrelated(x, plim, rlim)
"""
# transpose if axis<>0
if axis is not 0:
x = x.T
# read dimensions and allocate variables
_, c = x.shape
pairs = []
# compute each (i,j)-th correlation
for i in range(0, c):
for j in range(i + 1, c):
_, p = scipy.stats.pearsonr(x[:, i], x[:, j])
if p > plim:
pairs.append((i, j))
# done
return tuple(pairs) |
def read_ipv4(self, length):
"""Read Internet Protocol version 4 (IPv4).
Structure of IPv4 header [RFC 791]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|Version| IHL |Type of Service| Total Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Identification |Flags| Fragment Offset |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Time to Live | Protocol | Header Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Options | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 ip.version Version (4)
0 4 ip.hdr_len Internal Header Length (IHL)
1 8 ip.dsfield.dscp Differentiated Services Code Point (DSCP)
1 14 ip.dsfield.ecn Explicit Congestion Notification (ECN)
2 16 ip.len Total Length
4 32 ip.id Identification
6 48 - Reserved Bit (must be zero)
6 49 ip.flags.df Don't Fragment (DF)
6 50 ip.flags.mf More Fragments (MF)
6 51 ip.frag_offset Fragment Offset
8 64 ip.ttl Time To Live (TTL)
9 72 ip.proto Protocol (Transport Layer)
10 80 ip.checksum Header Checksum
12 96 ip.src Source IP Address
16 128 ip.dst Destination IP Address
20 160 ip.options IP Options (if IHL > 5)
"""
if length is None:
length = len(self)
_vihl = self._read_fileng(1).hex()
_dscp = self._read_binary(1)
_tlen = self._read_unpack(2)
_iden = self._read_unpack(2)
_frag = self._read_binary(2)
_ttol = self._read_unpack(1)
_prot = self._read_protos(1)
_csum = self._read_fileng(2)
_srca = self._read_ipv4_addr()
_dsta = self._read_ipv4_addr()
ipv4 = dict(
version=_vihl[0],
hdr_len=int(_vihl[1], base=16) * 4,
dsfield=dict(
dscp=(
TOS_PRE.get(int(_dscp[:3], base=2)),
TOS_DEL.get(int(_dscp[3], base=2)),
TOS_THR.get(int(_dscp[4], base=2)),
TOS_REL.get(int(_dscp[5], base=2)),
),
ecn=TOS_ECN.get(int(_dscp[-2:], base=2)),
),
len=_tlen,
id=_iden,
flags=dict(
df=True if int(_frag[1]) else False,
mf=True if int(_frag[2]) else False,
),
frag_offset=int(_frag[3:], base=2) * 8,
ttl=_ttol,
proto=_prot,
checksum=_csum,
src=_srca,
dst=_dsta,
)
_optl = ipv4['hdr_len'] - 20
if _optl:
options = self._read_ipv4_options(_optl)
ipv4['opt'] = options[0] # tuple of option acronyms
ipv4.update(options[1]) # merge option info to buffer
# ipv4['opt'] = self._read_fileng(_optl) or None
hdr_len = ipv4['hdr_len']
raw_len = ipv4['len'] - hdr_len
ipv4['packet'] = self._read_packet(header=hdr_len, payload=raw_len)
return self._decode_next_layer(ipv4, _prot, raw_len) | Read Internet Protocol version 4 (IPv4).
Structure of IPv4 header [RFC 791]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|Version| IHL |Type of Service| Total Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Identification |Flags| Fragment Offset |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Time to Live | Protocol | Header Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Options | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 ip.version Version (4)
0 4 ip.hdr_len Internal Header Length (IHL)
1 8 ip.dsfield.dscp Differentiated Services Code Point (DSCP)
1 14 ip.dsfield.ecn Explicit Congestion Notification (ECN)
2 16 ip.len Total Length
4 32 ip.id Identification
6 48 - Reserved Bit (must be zero)
6 49 ip.flags.df Don't Fragment (DF)
6 50 ip.flags.mf More Fragments (MF)
6 51 ip.frag_offset Fragment Offset
8 64 ip.ttl Time To Live (TTL)
9 72 ip.proto Protocol (Transport Layer)
10 80 ip.checksum Header Checksum
12 96 ip.src Source IP Address
16 128 ip.dst Destination IP Address
20 160 ip.options IP Options (if IHL > 5) | Below is the the instruction that describes the task:
### Input:
Read Internet Protocol version 4 (IPv4).
Structure of IPv4 header [RFC 791]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|Version| IHL |Type of Service| Total Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Identification |Flags| Fragment Offset |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Time to Live | Protocol | Header Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Options | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 ip.version Version (4)
0 4 ip.hdr_len Internal Header Length (IHL)
1 8 ip.dsfield.dscp Differentiated Services Code Point (DSCP)
1 14 ip.dsfield.ecn Explicit Congestion Notification (ECN)
2 16 ip.len Total Length
4 32 ip.id Identification
6 48 - Reserved Bit (must be zero)
6 49 ip.flags.df Don't Fragment (DF)
6 50 ip.flags.mf More Fragments (MF)
6 51 ip.frag_offset Fragment Offset
8 64 ip.ttl Time To Live (TTL)
9 72 ip.proto Protocol (Transport Layer)
10 80 ip.checksum Header Checksum
12 96 ip.src Source IP Address
16 128 ip.dst Destination IP Address
20 160 ip.options IP Options (if IHL > 5)
### Response:
def read_ipv4(self, length):
"""Read Internet Protocol version 4 (IPv4).
Structure of IPv4 header [RFC 791]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|Version| IHL |Type of Service| Total Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Identification |Flags| Fragment Offset |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Time to Live | Protocol | Header Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Options | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 ip.version Version (4)
0 4 ip.hdr_len Internal Header Length (IHL)
1 8 ip.dsfield.dscp Differentiated Services Code Point (DSCP)
1 14 ip.dsfield.ecn Explicit Congestion Notification (ECN)
2 16 ip.len Total Length
4 32 ip.id Identification
6 48 - Reserved Bit (must be zero)
6 49 ip.flags.df Don't Fragment (DF)
6 50 ip.flags.mf More Fragments (MF)
6 51 ip.frag_offset Fragment Offset
8 64 ip.ttl Time To Live (TTL)
9 72 ip.proto Protocol (Transport Layer)
10 80 ip.checksum Header Checksum
12 96 ip.src Source IP Address
16 128 ip.dst Destination IP Address
20 160 ip.options IP Options (if IHL > 5)
"""
if length is None:
length = len(self)
_vihl = self._read_fileng(1).hex()
_dscp = self._read_binary(1)
_tlen = self._read_unpack(2)
_iden = self._read_unpack(2)
_frag = self._read_binary(2)
_ttol = self._read_unpack(1)
_prot = self._read_protos(1)
_csum = self._read_fileng(2)
_srca = self._read_ipv4_addr()
_dsta = self._read_ipv4_addr()
ipv4 = dict(
version=_vihl[0],
hdr_len=int(_vihl[1], base=16) * 4,
dsfield=dict(
dscp=(
TOS_PRE.get(int(_dscp[:3], base=2)),
TOS_DEL.get(int(_dscp[3], base=2)),
TOS_THR.get(int(_dscp[4], base=2)),
TOS_REL.get(int(_dscp[5], base=2)),
),
ecn=TOS_ECN.get(int(_dscp[-2:], base=2)),
),
len=_tlen,
id=_iden,
flags=dict(
df=True if int(_frag[1]) else False,
mf=True if int(_frag[2]) else False,
),
frag_offset=int(_frag[3:], base=2) * 8,
ttl=_ttol,
proto=_prot,
checksum=_csum,
src=_srca,
dst=_dsta,
)
_optl = ipv4['hdr_len'] - 20
if _optl:
options = self._read_ipv4_options(_optl)
ipv4['opt'] = options[0] # tuple of option acronyms
ipv4.update(options[1]) # merge option info to buffer
# ipv4['opt'] = self._read_fileng(_optl) or None
hdr_len = ipv4['hdr_len']
raw_len = ipv4['len'] - hdr_len
ipv4['packet'] = self._read_packet(header=hdr_len, payload=raw_len)
return self._decode_next_layer(ipv4, _prot, raw_len) |
def hexdigest(self):
"""Return the digest value as a string of hexadecimal digits."""
if self._pre_computed_hash is None:
return libssdeep_wrapper.fuzzy_digest(self._state, 0)
else:
return self._pre_computed_hash | Return the digest value as a string of hexadecimal digits. | Below is the the instruction that describes the task:
### Input:
Return the digest value as a string of hexadecimal digits.
### Response:
def hexdigest(self):
"""Return the digest value as a string of hexadecimal digits."""
if self._pre_computed_hash is None:
return libssdeep_wrapper.fuzzy_digest(self._state, 0)
else:
return self._pre_computed_hash |
def is_on(self):
"""
Get sensor state.
Assume offline or open (worst case).
"""
if self._type == 'Occupancy':
return self.status not in CONST.STATUS_ONLINE
return self.status not in (CONST.STATUS_OFF, CONST.STATUS_OFFLINE,
CONST.STATUS_CLOSED) | Get sensor state.
Assume offline or open (worst case). | Below is the the instruction that describes the task:
### Input:
Get sensor state.
Assume offline or open (worst case).
### Response:
def is_on(self):
"""
Get sensor state.
Assume offline or open (worst case).
"""
if self._type == 'Occupancy':
return self.status not in CONST.STATUS_ONLINE
return self.status not in (CONST.STATUS_OFF, CONST.STATUS_OFFLINE,
CONST.STATUS_CLOSED) |
def _bgzip_from_cram(cram_file, dirs, data):
"""Create bgzipped fastq files from an input CRAM file in regions of interest.
Returns a list with a single file, for single end CRAM files, or two
files for paired end input.
"""
import pybedtools
region_file = (tz.get_in(["config", "algorithm", "variant_regions"], data)
if tz.get_in(["config", "algorithm", "coverage_interval"], data)
in ["regional", "exome", "amplicon"]
else None)
if region_file:
regions = ["%s:%s-%s" % tuple(r[:3]) for r in pybedtools.BedTool(region_file)]
else:
regions = [None]
work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep"))
out_s, out_p1, out_p2 = [os.path.join(work_dir, "%s-%s.fq.gz" %
(utils.splitext_plus(os.path.basename(cram_file))[0], fext))
for fext in ["s1", "p1", "p2"]]
if (not utils.file_exists(out_s) and
(not utils.file_exists(out_p1) or not utils.file_exists(out_p2))):
cram.index(cram_file, data["config"])
fastqs, part_dir = _cram_to_fastq_regions(regions, cram_file, dirs, data)
if len(fastqs[0]) == 1:
with file_transaction(data, out_s) as tx_out_file:
_merge_and_bgzip([xs[0] for xs in fastqs], tx_out_file, out_s)
else:
for i, out_file in enumerate([out_p1, out_p2]):
if not utils.file_exists(out_file):
ext = "/%s" % (i + 1)
with file_transaction(data, out_file) as tx_out_file:
_merge_and_bgzip([xs[i] for xs in fastqs], tx_out_file, out_file, ext)
shutil.rmtree(part_dir)
if utils.file_exists(out_p1):
return [out_p1, out_p2]
else:
assert utils.file_exists(out_s)
return [out_s] | Create bgzipped fastq files from an input CRAM file in regions of interest.
Returns a list with a single file, for single end CRAM files, or two
files for paired end input. | Below is the the instruction that describes the task:
### Input:
Create bgzipped fastq files from an input CRAM file in regions of interest.
Returns a list with a single file, for single end CRAM files, or two
files for paired end input.
### Response:
def _bgzip_from_cram(cram_file, dirs, data):
"""Create bgzipped fastq files from an input CRAM file in regions of interest.
Returns a list with a single file, for single end CRAM files, or two
files for paired end input.
"""
import pybedtools
region_file = (tz.get_in(["config", "algorithm", "variant_regions"], data)
if tz.get_in(["config", "algorithm", "coverage_interval"], data)
in ["regional", "exome", "amplicon"]
else None)
if region_file:
regions = ["%s:%s-%s" % tuple(r[:3]) for r in pybedtools.BedTool(region_file)]
else:
regions = [None]
work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep"))
out_s, out_p1, out_p2 = [os.path.join(work_dir, "%s-%s.fq.gz" %
(utils.splitext_plus(os.path.basename(cram_file))[0], fext))
for fext in ["s1", "p1", "p2"]]
if (not utils.file_exists(out_s) and
(not utils.file_exists(out_p1) or not utils.file_exists(out_p2))):
cram.index(cram_file, data["config"])
fastqs, part_dir = _cram_to_fastq_regions(regions, cram_file, dirs, data)
if len(fastqs[0]) == 1:
with file_transaction(data, out_s) as tx_out_file:
_merge_and_bgzip([xs[0] for xs in fastqs], tx_out_file, out_s)
else:
for i, out_file in enumerate([out_p1, out_p2]):
if not utils.file_exists(out_file):
ext = "/%s" % (i + 1)
with file_transaction(data, out_file) as tx_out_file:
_merge_and_bgzip([xs[i] for xs in fastqs], tx_out_file, out_file, ext)
shutil.rmtree(part_dir)
if utils.file_exists(out_p1):
return [out_p1, out_p2]
else:
assert utils.file_exists(out_s)
return [out_s] |
def _format_value(self, operation, key, indent):
"""A value that exists in the operation but has value None is displayed.
A value that does not exist in the operation is left out entirely. The value
name in the operation must match the value name in the template, but the
location does not have to match.
"""
v = self._find_value(operation, key)
if v == "NOT_FOUND":
return []
if not isinstance(v, list):
v = [v]
if not len(v):
v = [None]
key = key + ":"
lines = []
for s in v:
# Access control rules are stored in tuples.
if isinstance(s, tuple):
s = "{}: {}".format(*s)
lines.append(
"{}{}{}{}".format(
" " * indent, key, " " * (TAB - indent - len(key) - 1), s
)
)
key = ""
return lines | A value that exists in the operation but has value None is displayed.
A value that does not exist in the operation is left out entirely. The value
name in the operation must match the value name in the template, but the
location does not have to match. | Below is the the instruction that describes the task:
### Input:
A value that exists in the operation but has value None is displayed.
A value that does not exist in the operation is left out entirely. The value
name in the operation must match the value name in the template, but the
location does not have to match.
### Response:
def _format_value(self, operation, key, indent):
"""A value that exists in the operation but has value None is displayed.
A value that does not exist in the operation is left out entirely. The value
name in the operation must match the value name in the template, but the
location does not have to match.
"""
v = self._find_value(operation, key)
if v == "NOT_FOUND":
return []
if not isinstance(v, list):
v = [v]
if not len(v):
v = [None]
key = key + ":"
lines = []
for s in v:
# Access control rules are stored in tuples.
if isinstance(s, tuple):
s = "{}: {}".format(*s)
lines.append(
"{}{}{}{}".format(
" " * indent, key, " " * (TAB - indent - len(key) - 1), s
)
)
key = ""
return lines |
def sign(url, app_id, app_secret, hash_meth='sha1', **params):
'''
A signature method which generates the necessary Ofly parameters.
:param app_id: The oFlyAppId, i.e. "application ID".
:type app_id: str
:param app_secret: The oFlyAppSecret, i.e. "shared secret".
:type app_secret: str
:param hash_meth: The hash method to use for signing, defaults to
"sha1".
:type hash_meth: str
:param \*\*params: Additional parameters.
:type \*\*\params: dict
'''
hash_meth_str = hash_meth
if hash_meth == 'sha1':
hash_meth = sha1
elif hash_meth == 'md5':
hash_meth = md5
else:
raise TypeError('hash_meth must be one of "sha1", "md5"')
now = datetime.utcnow()
milliseconds = now.microsecond // 1000
time_format = '%Y-%m-%dT%H:%M:%S.{0}Z'.format(milliseconds)
ofly_params = {'oflyAppId': app_id,
'oflyHashMeth': hash_meth_str.upper(),
'oflyTimestamp': now.strftime(time_format)}
url_path = urlsplit(url).path
signature_base_string = app_secret + url_path + '?'
if len(params):
signature_base_string += get_sorted_params(params) + '&'
signature_base_string += get_sorted_params(ofly_params)
if not isinstance(signature_base_string, bytes):
signature_base_string = signature_base_string.encode('utf-8')
ofly_params['oflyApiSig'] = \
hash_meth(signature_base_string).hexdigest()
all_params = dict(tuple(ofly_params.items()) + tuple(params.items()))
return get_sorted_params(all_params) | A signature method which generates the necessary Ofly parameters.
:param app_id: The oFlyAppId, i.e. "application ID".
:type app_id: str
:param app_secret: The oFlyAppSecret, i.e. "shared secret".
:type app_secret: str
:param hash_meth: The hash method to use for signing, defaults to
"sha1".
:type hash_meth: str
:param \*\*params: Additional parameters.
:type \*\*\params: dict | Below is the the instruction that describes the task:
### Input:
A signature method which generates the necessary Ofly parameters.
:param app_id: The oFlyAppId, i.e. "application ID".
:type app_id: str
:param app_secret: The oFlyAppSecret, i.e. "shared secret".
:type app_secret: str
:param hash_meth: The hash method to use for signing, defaults to
"sha1".
:type hash_meth: str
:param \*\*params: Additional parameters.
:type \*\*\params: dict
### Response:
def sign(url, app_id, app_secret, hash_meth='sha1', **params):
'''
A signature method which generates the necessary Ofly parameters.
:param app_id: The oFlyAppId, i.e. "application ID".
:type app_id: str
:param app_secret: The oFlyAppSecret, i.e. "shared secret".
:type app_secret: str
:param hash_meth: The hash method to use for signing, defaults to
"sha1".
:type hash_meth: str
:param \*\*params: Additional parameters.
:type \*\*\params: dict
'''
hash_meth_str = hash_meth
if hash_meth == 'sha1':
hash_meth = sha1
elif hash_meth == 'md5':
hash_meth = md5
else:
raise TypeError('hash_meth must be one of "sha1", "md5"')
now = datetime.utcnow()
milliseconds = now.microsecond // 1000
time_format = '%Y-%m-%dT%H:%M:%S.{0}Z'.format(milliseconds)
ofly_params = {'oflyAppId': app_id,
'oflyHashMeth': hash_meth_str.upper(),
'oflyTimestamp': now.strftime(time_format)}
url_path = urlsplit(url).path
signature_base_string = app_secret + url_path + '?'
if len(params):
signature_base_string += get_sorted_params(params) + '&'
signature_base_string += get_sorted_params(ofly_params)
if not isinstance(signature_base_string, bytes):
signature_base_string = signature_base_string.encode('utf-8')
ofly_params['oflyApiSig'] = \
hash_meth(signature_base_string).hexdigest()
all_params = dict(tuple(ofly_params.items()) + tuple(params.items()))
return get_sorted_params(all_params) |
def p_directive(self, p):
"""
directive : AT name arguments
| AT name
"""
arguments = p[3] if len(p) == 4 else None
p[0] = Directive(name=p[2], arguments=arguments) | directive : AT name arguments
| AT name | Below is the the instruction that describes the task:
### Input:
directive : AT name arguments
| AT name
### Response:
def p_directive(self, p):
"""
directive : AT name arguments
| AT name
"""
arguments = p[3] if len(p) == 4 else None
p[0] = Directive(name=p[2], arguments=arguments) |
def pore_to_pore(target):
r"""
Calculates throat vector as straight path between connected pores.
Parameters
----------
geometry : OpenPNM Geometry object
The object containing the geometrical properties of the throats
Notes
-----
There is an important impicit assumption here: the positive direction is
taken as the direction from the pore with the lower index to the higher.
This corresponds to the pores in the 1st and 2nd columns of the
'throat.conns' array as stored on the etwork.
"""
network = target.project.network
throats = network.throats(target.name)
conns = network['throat.conns']
P1 = conns[:, 0]
P2 = conns[:, 1]
coords = network['pore.coords']
vec = coords[P2] - coords[P1]
unit_vec = tr.unit_vector(vec, axis=1)
return unit_vec[throats] | r"""
Calculates throat vector as straight path between connected pores.
Parameters
----------
geometry : OpenPNM Geometry object
The object containing the geometrical properties of the throats
Notes
-----
There is an important impicit assumption here: the positive direction is
taken as the direction from the pore with the lower index to the higher.
This corresponds to the pores in the 1st and 2nd columns of the
'throat.conns' array as stored on the etwork. | Below is the the instruction that describes the task:
### Input:
r"""
Calculates throat vector as straight path between connected pores.
Parameters
----------
geometry : OpenPNM Geometry object
The object containing the geometrical properties of the throats
Notes
-----
There is an important impicit assumption here: the positive direction is
taken as the direction from the pore with the lower index to the higher.
This corresponds to the pores in the 1st and 2nd columns of the
'throat.conns' array as stored on the etwork.
### Response:
def pore_to_pore(target):
r"""
Calculates throat vector as straight path between connected pores.
Parameters
----------
geometry : OpenPNM Geometry object
The object containing the geometrical properties of the throats
Notes
-----
There is an important impicit assumption here: the positive direction is
taken as the direction from the pore with the lower index to the higher.
This corresponds to the pores in the 1st and 2nd columns of the
'throat.conns' array as stored on the etwork.
"""
network = target.project.network
throats = network.throats(target.name)
conns = network['throat.conns']
P1 = conns[:, 0]
P2 = conns[:, 1]
coords = network['pore.coords']
vec = coords[P2] - coords[P1]
unit_vec = tr.unit_vector(vec, axis=1)
return unit_vec[throats] |
def get_ops(self, key):
''' Returns ops from the key if found otherwise raises a KeyError.
'''
ops = self._store.get(key)
if ops is None:
raise KeyError(
'cannot get operations for {}'.format(key))
return ops | Returns ops from the key if found otherwise raises a KeyError. | Below is the the instruction that describes the task:
### Input:
Returns ops from the key if found otherwise raises a KeyError.
### Response:
def get_ops(self, key):
''' Returns ops from the key if found otherwise raises a KeyError.
'''
ops = self._store.get(key)
if ops is None:
raise KeyError(
'cannot get operations for {}'.format(key))
return ops |
def _build_gecos(gecos_dict):
'''
Accepts a dictionary entry containing GECOS field names and their values,
and returns a full GECOS comment string, to be used with usermod.
'''
return '{0},{1},{2},{3}'.format(gecos_dict.get('fullname', ''),
gecos_dict.get('roomnumber', ''),
gecos_dict.get('workphone', ''),
gecos_dict.get('homephone', '')) | Accepts a dictionary entry containing GECOS field names and their values,
and returns a full GECOS comment string, to be used with usermod. | Below is the the instruction that describes the task:
### Input:
Accepts a dictionary entry containing GECOS field names and their values,
and returns a full GECOS comment string, to be used with usermod.
### Response:
def _build_gecos(gecos_dict):
'''
Accepts a dictionary entry containing GECOS field names and their values,
and returns a full GECOS comment string, to be used with usermod.
'''
return '{0},{1},{2},{3}'.format(gecos_dict.get('fullname', ''),
gecos_dict.get('roomnumber', ''),
gecos_dict.get('workphone', ''),
gecos_dict.get('homephone', '')) |
def format(self, full_info: bool = False):
"""
:param full_info: If True, adds more info about the chat. Please, note that this additional info requires
to make up to THREE synchronous api calls.
"""
chat = self.api_object
if full_info:
self.__format_full(chat)
else:
self.__format_simple(chat) | :param full_info: If True, adds more info about the chat. Please, note that this additional info requires
to make up to THREE synchronous api calls. | Below is the the instruction that describes the task:
### Input:
:param full_info: If True, adds more info about the chat. Please, note that this additional info requires
to make up to THREE synchronous api calls.
### Response:
def format(self, full_info: bool = False):
"""
:param full_info: If True, adds more info about the chat. Please, note that this additional info requires
to make up to THREE synchronous api calls.
"""
chat = self.api_object
if full_info:
self.__format_full(chat)
else:
self.__format_simple(chat) |
def get_calendar(self, listing_id, starting_month=datetime.datetime.now().month, starting_year=datetime.datetime.now().year, calendar_months=12):
"""
Get availability calendar for a given listing
"""
params = {
'year': str(starting_year),
'listing_id': str(listing_id),
'_format': 'with_conditions',
'count': str(calendar_months),
'month': str(starting_month)
}
r = self._session.get(API_URL + "/calendar_months", params=params)
r.raise_for_status()
return r.json() | Get availability calendar for a given listing | Below is the the instruction that describes the task:
### Input:
Get availability calendar for a given listing
### Response:
def get_calendar(self, listing_id, starting_month=datetime.datetime.now().month, starting_year=datetime.datetime.now().year, calendar_months=12):
"""
Get availability calendar for a given listing
"""
params = {
'year': str(starting_year),
'listing_id': str(listing_id),
'_format': 'with_conditions',
'count': str(calendar_months),
'month': str(starting_month)
}
r = self._session.get(API_URL + "/calendar_months", params=params)
r.raise_for_status()
return r.json() |
def get_user_bookmarks(self, id, **data):
"""
GET /users/:id/bookmarks/
Gets all the user's saved events.
In order to update the saved events list, the user must unsave or save each event.
A user is authorized to only see his/her saved events.
"""
return self.get("/users/{0}/bookmarks/".format(id), data=data) | GET /users/:id/bookmarks/
Gets all the user's saved events.
In order to update the saved events list, the user must unsave or save each event.
A user is authorized to only see his/her saved events. | Below is the the instruction that describes the task:
### Input:
GET /users/:id/bookmarks/
Gets all the user's saved events.
In order to update the saved events list, the user must unsave or save each event.
A user is authorized to only see his/her saved events.
### Response:
def get_user_bookmarks(self, id, **data):
"""
GET /users/:id/bookmarks/
Gets all the user's saved events.
In order to update the saved events list, the user must unsave or save each event.
A user is authorized to only see his/her saved events.
"""
return self.get("/users/{0}/bookmarks/".format(id), data=data) |
def match_alphabet(self, pattern):
"""Initialise the alphabet for the Bitap algorithm.
Args:
pattern: The text to encode.
Returns:
Hash of character locations.
"""
s = {}
for char in pattern:
s[char] = 0
for i in range(len(pattern)):
s[pattern[i]] |= 1 << (len(pattern) - i - 1)
return s | Initialise the alphabet for the Bitap algorithm.
Args:
pattern: The text to encode.
Returns:
Hash of character locations. | Below is the the instruction that describes the task:
### Input:
Initialise the alphabet for the Bitap algorithm.
Args:
pattern: The text to encode.
Returns:
Hash of character locations.
### Response:
def match_alphabet(self, pattern):
"""Initialise the alphabet for the Bitap algorithm.
Args:
pattern: The text to encode.
Returns:
Hash of character locations.
"""
s = {}
for char in pattern:
s[char] = 0
for i in range(len(pattern)):
s[pattern[i]] |= 1 << (len(pattern) - i - 1)
return s |
def start(name):
'''
Start the named container
CLI Example:
.. code-block:: bash
salt myminion nspawn.start <name>
'''
if _sd_version() >= 219:
ret = _machinectl('start {0}'.format(name))
else:
cmd = 'systemctl start systemd-nspawn@{0}'.format(name)
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode'] != 0:
__context__['retcode'] = salt.defaults.exitcodes.EX_UNAVAILABLE
return False
return True | Start the named container
CLI Example:
.. code-block:: bash
salt myminion nspawn.start <name> | Below is the the instruction that describes the task:
### Input:
Start the named container
CLI Example:
.. code-block:: bash
salt myminion nspawn.start <name>
### Response:
def start(name):
'''
Start the named container
CLI Example:
.. code-block:: bash
salt myminion nspawn.start <name>
'''
if _sd_version() >= 219:
ret = _machinectl('start {0}'.format(name))
else:
cmd = 'systemctl start systemd-nspawn@{0}'.format(name)
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode'] != 0:
__context__['retcode'] = salt.defaults.exitcodes.EX_UNAVAILABLE
return False
return True |
def read(filepath):
"""
Read a single InkML file
Parameters
----------
filepath : string
path to the (readable) InkML file
Returns
-------
HandwrittenData :
The parsed InkML file as a HandwrittenData object
"""
import xml.etree.ElementTree
root = xml.etree.ElementTree.parse(filepath).getroot()
# Get the raw data
recording = []
strokes = sorted(root.findall('{http://www.w3.org/2003/InkML}trace'),
key=lambda child: int(child.attrib['id']))
time = 0
for stroke in strokes:
stroke = stroke.text.strip().split(',')
stroke = [point.strip().split(' ') for point in stroke]
if len(stroke[0]) == 3:
stroke = [{'x': float(x), 'y': float(y), 'time': float(t)}
for x, y, t in stroke]
else:
stroke = [{'x': float(x), 'y': float(y)} for x, y in stroke]
new_stroke = []
for p in stroke:
new_stroke.append({'x': p['x'], 'y': p['y'], 'time': time})
time += 20
stroke = new_stroke
time += 200
recording.append(stroke)
# Get LaTeX
formula_in_latex = None
annotations = root.findall('{http://www.w3.org/2003/InkML}annotation')
for annotation in annotations:
if annotation.attrib['type'] == 'truth':
formula_in_latex = annotation.text
hw = handwritten_data.HandwrittenData(json.dumps(recording),
formula_in_latex=formula_in_latex)
for annotation in annotations:
if annotation.attrib['type'] == 'writer':
hw.writer = annotation.text
elif annotation.attrib['type'] == 'category':
hw.category = annotation.text
elif annotation.attrib['type'] == 'expression':
hw.expression = annotation.text
# Get segmentation
segmentation = []
trace_groups = root.findall('{http://www.w3.org/2003/InkML}traceGroup')
if len(trace_groups) != 1:
raise Exception('Malformed InkML',
('Exactly 1 top level traceGroup expected, found %i. '
'(%s) - probably no ground truth?') %
(len(trace_groups), filepath))
trace_group = trace_groups[0]
symbol_stream = [] # has to be consistent with segmentation
for tg in trace_group.findall('{http://www.w3.org/2003/InkML}traceGroup'):
annotations = tg.findall('{http://www.w3.org/2003/InkML}annotation')
if len(annotations) != 1:
raise ValueError("%i annotations found for '%s'." %
(len(annotations), filepath))
db_id = formula_to_dbid(normalize_symbol_name(annotations[0].text))
symbol_stream.append(db_id)
trace_views = tg.findall('{http://www.w3.org/2003/InkML}traceView')
symbol = []
for traceView in trace_views:
symbol.append(int(traceView.attrib['traceDataRef']))
segmentation.append(symbol)
hw.symbol_stream = symbol_stream
hw.segmentation = segmentation
_flat_seg = [stroke2 for symbol2 in segmentation for stroke2 in symbol2]
assert len(_flat_seg) == len(recording), \
("Segmentation had length %i, but recording has %i strokes (%s)" %
(len(_flat_seg), len(recording), filepath))
assert set(_flat_seg) == set(range(len(_flat_seg)))
hw.inkml = beautify_xml(filepath)
hw.filepath = filepath
return hw | Read a single InkML file
Parameters
----------
filepath : string
path to the (readable) InkML file
Returns
-------
HandwrittenData :
The parsed InkML file as a HandwrittenData object | Below is the the instruction that describes the task:
### Input:
Read a single InkML file
Parameters
----------
filepath : string
path to the (readable) InkML file
Returns
-------
HandwrittenData :
The parsed InkML file as a HandwrittenData object
### Response:
def read(filepath):
"""
Read a single InkML file
Parameters
----------
filepath : string
path to the (readable) InkML file
Returns
-------
HandwrittenData :
The parsed InkML file as a HandwrittenData object
"""
import xml.etree.ElementTree
root = xml.etree.ElementTree.parse(filepath).getroot()
# Get the raw data
recording = []
strokes = sorted(root.findall('{http://www.w3.org/2003/InkML}trace'),
key=lambda child: int(child.attrib['id']))
time = 0
for stroke in strokes:
stroke = stroke.text.strip().split(',')
stroke = [point.strip().split(' ') for point in stroke]
if len(stroke[0]) == 3:
stroke = [{'x': float(x), 'y': float(y), 'time': float(t)}
for x, y, t in stroke]
else:
stroke = [{'x': float(x), 'y': float(y)} for x, y in stroke]
new_stroke = []
for p in stroke:
new_stroke.append({'x': p['x'], 'y': p['y'], 'time': time})
time += 20
stroke = new_stroke
time += 200
recording.append(stroke)
# Get LaTeX
formula_in_latex = None
annotations = root.findall('{http://www.w3.org/2003/InkML}annotation')
for annotation in annotations:
if annotation.attrib['type'] == 'truth':
formula_in_latex = annotation.text
hw = handwritten_data.HandwrittenData(json.dumps(recording),
formula_in_latex=formula_in_latex)
for annotation in annotations:
if annotation.attrib['type'] == 'writer':
hw.writer = annotation.text
elif annotation.attrib['type'] == 'category':
hw.category = annotation.text
elif annotation.attrib['type'] == 'expression':
hw.expression = annotation.text
# Get segmentation
segmentation = []
trace_groups = root.findall('{http://www.w3.org/2003/InkML}traceGroup')
if len(trace_groups) != 1:
raise Exception('Malformed InkML',
('Exactly 1 top level traceGroup expected, found %i. '
'(%s) - probably no ground truth?') %
(len(trace_groups), filepath))
trace_group = trace_groups[0]
symbol_stream = [] # has to be consistent with segmentation
for tg in trace_group.findall('{http://www.w3.org/2003/InkML}traceGroup'):
annotations = tg.findall('{http://www.w3.org/2003/InkML}annotation')
if len(annotations) != 1:
raise ValueError("%i annotations found for '%s'." %
(len(annotations), filepath))
db_id = formula_to_dbid(normalize_symbol_name(annotations[0].text))
symbol_stream.append(db_id)
trace_views = tg.findall('{http://www.w3.org/2003/InkML}traceView')
symbol = []
for traceView in trace_views:
symbol.append(int(traceView.attrib['traceDataRef']))
segmentation.append(symbol)
hw.symbol_stream = symbol_stream
hw.segmentation = segmentation
_flat_seg = [stroke2 for symbol2 in segmentation for stroke2 in symbol2]
assert len(_flat_seg) == len(recording), \
("Segmentation had length %i, but recording has %i strokes (%s)" %
(len(_flat_seg), len(recording), filepath))
assert set(_flat_seg) == set(range(len(_flat_seg)))
hw.inkml = beautify_xml(filepath)
hw.filepath = filepath
return hw |
def load_truetype_font(
path: str, tile_width: int, tile_height: int
) -> Tileset:
"""Return a new Tileset from a `.ttf` or `.otf` file.
Same as :any:`set_truetype_font`, but returns a :any:`Tileset` instead.
You can send this Tileset to :any:`set_default`.
This function is provisional. The API may change.
"""
if not os.path.exists(path):
raise RuntimeError("File not found:\n\t%s" % (os.path.realpath(path),))
return Tileset._claim(
lib.TCOD_load_truetype_font_(path.encode(), tile_width, tile_height)
) | Return a new Tileset from a `.ttf` or `.otf` file.
Same as :any:`set_truetype_font`, but returns a :any:`Tileset` instead.
You can send this Tileset to :any:`set_default`.
This function is provisional. The API may change. | Below is the the instruction that describes the task:
### Input:
Return a new Tileset from a `.ttf` or `.otf` file.
Same as :any:`set_truetype_font`, but returns a :any:`Tileset` instead.
You can send this Tileset to :any:`set_default`.
This function is provisional. The API may change.
### Response:
def load_truetype_font(
path: str, tile_width: int, tile_height: int
) -> Tileset:
"""Return a new Tileset from a `.ttf` or `.otf` file.
Same as :any:`set_truetype_font`, but returns a :any:`Tileset` instead.
You can send this Tileset to :any:`set_default`.
This function is provisional. The API may change.
"""
if not os.path.exists(path):
raise RuntimeError("File not found:\n\t%s" % (os.path.realpath(path),))
return Tileset._claim(
lib.TCOD_load_truetype_font_(path.encode(), tile_width, tile_height)
) |
def isNXEnabled(self):
"""
Determines if the current L{PE} instance has the NXCOMPAT (Compatible with Data Execution Prevention) flag enabled.
@see: U{http://msdn.microsoft.com/en-us/library/ms235442.aspx}
@rtype: bool
@return: Returns C{True} if the current L{PE} instance has the NXCOMPAT flag enabled. Otherwise, returns C{False}.
"""
return self.ntHeaders.optionalHeader.dllCharacteristics.value & consts.IMAGE_DLL_CHARACTERISTICS_NX_COMPAT == consts.IMAGE_DLL_CHARACTERISTICS_NX_COMPAT | Determines if the current L{PE} instance has the NXCOMPAT (Compatible with Data Execution Prevention) flag enabled.
@see: U{http://msdn.microsoft.com/en-us/library/ms235442.aspx}
@rtype: bool
@return: Returns C{True} if the current L{PE} instance has the NXCOMPAT flag enabled. Otherwise, returns C{False}. | Below is the the instruction that describes the task:
### Input:
Determines if the current L{PE} instance has the NXCOMPAT (Compatible with Data Execution Prevention) flag enabled.
@see: U{http://msdn.microsoft.com/en-us/library/ms235442.aspx}
@rtype: bool
@return: Returns C{True} if the current L{PE} instance has the NXCOMPAT flag enabled. Otherwise, returns C{False}.
### Response:
def isNXEnabled(self):
"""
Determines if the current L{PE} instance has the NXCOMPAT (Compatible with Data Execution Prevention) flag enabled.
@see: U{http://msdn.microsoft.com/en-us/library/ms235442.aspx}
@rtype: bool
@return: Returns C{True} if the current L{PE} instance has the NXCOMPAT flag enabled. Otherwise, returns C{False}.
"""
return self.ntHeaders.optionalHeader.dllCharacteristics.value & consts.IMAGE_DLL_CHARACTERISTICS_NX_COMPAT == consts.IMAGE_DLL_CHARACTERISTICS_NX_COMPAT |
def parse_version(version):
"""
Return a comparable tuple from a version string. We try to force tuple to semver with version like 1.2.0
Replace pkg_resources.parse_version which now display a warning when use for comparing version with tuple
:returns: Version string as comparable tuple
"""
release_type_found = False
version_infos = re.split('(\.|[a-z]+)', version)
version = []
for info in version_infos:
if info == '.' or len(info) == 0:
continue
try:
info = int(info)
# We pad with zero to compare only on string
# This avoid issue when comparing version with different length
version.append("%06d" % (info,))
except ValueError:
# Force to a version with three number
if len(version) == 1:
version.append("00000")
if len(version) == 2:
version.append("000000")
# We want rc to be at lower level than dev version
if info == 'rc':
info = 'c'
version.append(info)
release_type_found = True
if release_type_found is False:
# Force to a version with three number
if len(version) == 1:
version.append("00000")
if len(version) == 2:
version.append("000000")
version.append("final")
return tuple(version) | Return a comparable tuple from a version string. We try to force tuple to semver with version like 1.2.0
Replace pkg_resources.parse_version which now display a warning when use for comparing version with tuple
:returns: Version string as comparable tuple | Below is the the instruction that describes the task:
### Input:
Return a comparable tuple from a version string. We try to force tuple to semver with version like 1.2.0
Replace pkg_resources.parse_version which now display a warning when use for comparing version with tuple
:returns: Version string as comparable tuple
### Response:
def parse_version(version):
"""
Return a comparable tuple from a version string. We try to force tuple to semver with version like 1.2.0
Replace pkg_resources.parse_version which now display a warning when use for comparing version with tuple
:returns: Version string as comparable tuple
"""
release_type_found = False
version_infos = re.split('(\.|[a-z]+)', version)
version = []
for info in version_infos:
if info == '.' or len(info) == 0:
continue
try:
info = int(info)
# We pad with zero to compare only on string
# This avoid issue when comparing version with different length
version.append("%06d" % (info,))
except ValueError:
# Force to a version with three number
if len(version) == 1:
version.append("00000")
if len(version) == 2:
version.append("000000")
# We want rc to be at lower level than dev version
if info == 'rc':
info = 'c'
version.append(info)
release_type_found = True
if release_type_found is False:
# Force to a version with three number
if len(version) == 1:
version.append("00000")
if len(version) == 2:
version.append("000000")
version.append("final")
return tuple(version) |
def rebuild_tree(self, request):
'''
Rebuilds the tree and clears the cache.
'''
self.model.objects.rebuild()
self.message_user(request, _('Menu Tree Rebuilt.'))
return self.clean_cache(request) | Rebuilds the tree and clears the cache. | Below is the the instruction that describes the task:
### Input:
Rebuilds the tree and clears the cache.
### Response:
def rebuild_tree(self, request):
'''
Rebuilds the tree and clears the cache.
'''
self.model.objects.rebuild()
self.message_user(request, _('Menu Tree Rebuilt.'))
return self.clean_cache(request) |
def copy(self):
''' Returns a copy of the macaroon. Note that the the new
macaroon's namespace still points to the same underlying Namespace -
copying the macaroon does not make a copy of the namespace.
:return a Macaroon
'''
m1 = Macaroon(None, None, version=self._version,
namespace=self._namespace)
m1._macaroon = self._macaroon.copy()
m1._caveat_data = self._caveat_data.copy()
return m1 | Returns a copy of the macaroon. Note that the the new
macaroon's namespace still points to the same underlying Namespace -
copying the macaroon does not make a copy of the namespace.
:return a Macaroon | Below is the the instruction that describes the task:
### Input:
Returns a copy of the macaroon. Note that the the new
macaroon's namespace still points to the same underlying Namespace -
copying the macaroon does not make a copy of the namespace.
:return a Macaroon
### Response:
def copy(self):
''' Returns a copy of the macaroon. Note that the the new
macaroon's namespace still points to the same underlying Namespace -
copying the macaroon does not make a copy of the namespace.
:return a Macaroon
'''
m1 = Macaroon(None, None, version=self._version,
namespace=self._namespace)
m1._macaroon = self._macaroon.copy()
m1._caveat_data = self._caveat_data.copy()
return m1 |
def save_url_as(url, save_as):
"""
Download the file `url` and save it to the local disk as
`save_as`.
"""
remote = requests.get(url, verify=False)
if not remote.status_code == Constants.PULP_GET_OK:
raise JuicerPulpError("A %s error occurred trying to get %s" %
(remote.status_code, url))
with open(save_as, 'wb') as data:
data.write(remote.content) | Download the file `url` and save it to the local disk as
`save_as`. | Below is the the instruction that describes the task:
### Input:
Download the file `url` and save it to the local disk as
`save_as`.
### Response:
def save_url_as(url, save_as):
"""
Download the file `url` and save it to the local disk as
`save_as`.
"""
remote = requests.get(url, verify=False)
if not remote.status_code == Constants.PULP_GET_OK:
raise JuicerPulpError("A %s error occurred trying to get %s" %
(remote.status_code, url))
with open(save_as, 'wb') as data:
data.write(remote.content) |
def edit(directory=None, revision='current'):
"""Edit current revision."""
if alembic_version >= (0, 8, 0):
config = current_app.extensions['migrate'].migrate.get_config(
directory)
command.edit(config, revision)
else:
raise RuntimeError('Alembic 0.8.0 or greater is required') | Edit current revision. | Below is the the instruction that describes the task:
### Input:
Edit current revision.
### Response:
def edit(directory=None, revision='current'):
"""Edit current revision."""
if alembic_version >= (0, 8, 0):
config = current_app.extensions['migrate'].migrate.get_config(
directory)
command.edit(config, revision)
else:
raise RuntimeError('Alembic 0.8.0 or greater is required') |
def _create(self, cache_file):
"""Create the tables needed to store the information."""
conn = sqlite3.connect(cache_file)
cur = conn.cursor()
cur.execute("PRAGMA foreign_keys = ON")
cur.execute('''
CREATE TABLE jobs(
hash TEXT NOT NULL UNIQUE PRIMARY KEY, description TEXT NOT NULL,
last_run REAL, next_run REAL, last_run_result INTEGER)''')
cur.execute('''
CREATE TABLE history(
hash TEXT, description TEXT, time REAL, result INTEGER,
FOREIGN KEY(hash) REFERENCES jobs(hash))''')
conn.commit()
conn.close() | Create the tables needed to store the information. | Below is the the instruction that describes the task:
### Input:
Create the tables needed to store the information.
### Response:
def _create(self, cache_file):
"""Create the tables needed to store the information."""
conn = sqlite3.connect(cache_file)
cur = conn.cursor()
cur.execute("PRAGMA foreign_keys = ON")
cur.execute('''
CREATE TABLE jobs(
hash TEXT NOT NULL UNIQUE PRIMARY KEY, description TEXT NOT NULL,
last_run REAL, next_run REAL, last_run_result INTEGER)''')
cur.execute('''
CREATE TABLE history(
hash TEXT, description TEXT, time REAL, result INTEGER,
FOREIGN KEY(hash) REFERENCES jobs(hash))''')
conn.commit()
conn.close() |
def prepend_model(self, value, model):
"""
Prepends model name if it is not already prepended.
For example model is "Offer":
key -> Offer.key
-key -> -Offer.key
Offer.key -> Offer.key
-Offer.key -> -Offer.key
"""
if '.' not in value:
direction = ''
if value.startswith('-'):
value = value[1:]
direction = '-'
value = '%s%s.%s' % (direction, model, value)
return value | Prepends model name if it is not already prepended.
For example model is "Offer":
key -> Offer.key
-key -> -Offer.key
Offer.key -> Offer.key
-Offer.key -> -Offer.key | Below is the the instruction that describes the task:
### Input:
Prepends model name if it is not already prepended.
For example model is "Offer":
key -> Offer.key
-key -> -Offer.key
Offer.key -> Offer.key
-Offer.key -> -Offer.key
### Response:
def prepend_model(self, value, model):
"""
Prepends model name if it is not already prepended.
For example model is "Offer":
key -> Offer.key
-key -> -Offer.key
Offer.key -> Offer.key
-Offer.key -> -Offer.key
"""
if '.' not in value:
direction = ''
if value.startswith('-'):
value = value[1:]
direction = '-'
value = '%s%s.%s' % (direction, model, value)
return value |
def separate_tour_and_o(row):
"""
The tour line typically contains contig list like:
tig00044568+ tig00045748- tig00071055- tig00015093- tig00030900-
This function separates the names from the orientations.
"""
tour = []
tour_o = []
for contig in row.split():
if contig[-1] in ('+', '-', '?'):
tour.append(contig[:-1])
tour_o.append(contig[-1])
else: # Unoriented
tour.append(contig)
tour_o.append('?')
return tour, tour_o | The tour line typically contains contig list like:
tig00044568+ tig00045748- tig00071055- tig00015093- tig00030900-
This function separates the names from the orientations. | Below is the the instruction that describes the task:
### Input:
The tour line typically contains contig list like:
tig00044568+ tig00045748- tig00071055- tig00015093- tig00030900-
This function separates the names from the orientations.
### Response:
def separate_tour_and_o(row):
"""
The tour line typically contains contig list like:
tig00044568+ tig00045748- tig00071055- tig00015093- tig00030900-
This function separates the names from the orientations.
"""
tour = []
tour_o = []
for contig in row.split():
if contig[-1] in ('+', '-', '?'):
tour.append(contig[:-1])
tour_o.append(contig[-1])
else: # Unoriented
tour.append(contig)
tour_o.append('?')
return tour, tour_o |
def getLogger(cls):
"""
Get the logger that logs real-time to the leader.
Note that if the returned logger is used on the leader, you will see the message twice,
since it still goes to the normal log handlers, too.
"""
# Only do the setup once, so we don't add a handler every time we log. Use a lock to do
# so safely even if we're being called in different threads. Use double-checked locking
# to reduce the overhead introduced by the lock.
if cls.logger is None:
with cls.lock:
if cls.logger is None:
cls.logger = logging.getLogger('toil-rt')
try:
level = os.environ[cls.envPrefix + 'LEVEL']
except KeyError:
# There is no server running on the leader, so suppress most log messages
# and skip the UDP stuff.
cls.logger.setLevel(logging.CRITICAL)
else:
# Adopt the logging level set on the leader.
toil.lib.bioio.setLogLevel(level, cls.logger)
try:
address = os.environ[cls.envPrefix + 'ADDRESS']
except KeyError:
pass
else:
# We know where to send messages to, so send them.
host, port = address.split(':')
cls.logger.addHandler(JSONDatagramHandler(host, int(port)))
return cls.logger | Get the logger that logs real-time to the leader.
Note that if the returned logger is used on the leader, you will see the message twice,
since it still goes to the normal log handlers, too. | Below is the the instruction that describes the task:
### Input:
Get the logger that logs real-time to the leader.
Note that if the returned logger is used on the leader, you will see the message twice,
since it still goes to the normal log handlers, too.
### Response:
def getLogger(cls):
"""
Get the logger that logs real-time to the leader.
Note that if the returned logger is used on the leader, you will see the message twice,
since it still goes to the normal log handlers, too.
"""
# Only do the setup once, so we don't add a handler every time we log. Use a lock to do
# so safely even if we're being called in different threads. Use double-checked locking
# to reduce the overhead introduced by the lock.
if cls.logger is None:
with cls.lock:
if cls.logger is None:
cls.logger = logging.getLogger('toil-rt')
try:
level = os.environ[cls.envPrefix + 'LEVEL']
except KeyError:
# There is no server running on the leader, so suppress most log messages
# and skip the UDP stuff.
cls.logger.setLevel(logging.CRITICAL)
else:
# Adopt the logging level set on the leader.
toil.lib.bioio.setLogLevel(level, cls.logger)
try:
address = os.environ[cls.envPrefix + 'ADDRESS']
except KeyError:
pass
else:
# We know where to send messages to, so send them.
host, port = address.split(':')
cls.logger.addHandler(JSONDatagramHandler(host, int(port)))
return cls.logger |
def _read(self):
""" Read a USB HID feature report from the YubiKey. """
request_type = _USB_TYPE_CLASS | _USB_RECIP_INTERFACE | _USB_ENDPOINT_IN
value = _REPORT_TYPE_FEATURE << 8 # apparently required for YubiKey 1.3.2, but not 2.2.x
recv = self._usb_handle.controlMsg(request_type,
_HID_GET_REPORT,
_FEATURE_RPT_SIZE,
value = value,
timeout = _USB_TIMEOUT_MS)
if len(recv) != _FEATURE_RPT_SIZE:
self._debug("Failed reading %i bytes (got %i) from USB HID YubiKey.\n"
% (_FEATURE_RPT_SIZE, recv))
raise YubiKeyUSBHIDError('Failed reading from USB HID YubiKey')
data = b''.join(yubico_util.chr_byte(c) for c in recv)
self._debug("READ : %s" % (yubico_util.hexdump(data, colorize=True)))
return data | Read a USB HID feature report from the YubiKey. | Below is the the instruction that describes the task:
### Input:
Read a USB HID feature report from the YubiKey.
### Response:
def _read(self):
""" Read a USB HID feature report from the YubiKey. """
request_type = _USB_TYPE_CLASS | _USB_RECIP_INTERFACE | _USB_ENDPOINT_IN
value = _REPORT_TYPE_FEATURE << 8 # apparently required for YubiKey 1.3.2, but not 2.2.x
recv = self._usb_handle.controlMsg(request_type,
_HID_GET_REPORT,
_FEATURE_RPT_SIZE,
value = value,
timeout = _USB_TIMEOUT_MS)
if len(recv) != _FEATURE_RPT_SIZE:
self._debug("Failed reading %i bytes (got %i) from USB HID YubiKey.\n"
% (_FEATURE_RPT_SIZE, recv))
raise YubiKeyUSBHIDError('Failed reading from USB HID YubiKey')
data = b''.join(yubico_util.chr_byte(c) for c in recv)
self._debug("READ : %s" % (yubico_util.hexdump(data, colorize=True)))
return data |
def setRaster(self, rows, columns):
""" Sets the raster for the region, allowing sections to be indexed by row/column """
rows = int(rows)
columns = int(columns)
if rows <= 0 or columns <= 0:
return self
self._raster = (rows, columns)
return self.getCell(0, 0) | Sets the raster for the region, allowing sections to be indexed by row/column | Below is the the instruction that describes the task:
### Input:
Sets the raster for the region, allowing sections to be indexed by row/column
### Response:
def setRaster(self, rows, columns):
""" Sets the raster for the region, allowing sections to be indexed by row/column """
rows = int(rows)
columns = int(columns)
if rows <= 0 or columns <= 0:
return self
self._raster = (rows, columns)
return self.getCell(0, 0) |
def _GetNumberOfDaysInCentury(self, year):
"""Retrieves the number of days in a century.
Args:
year (int): year in the century e.g. 1970.
Returns:
int: number of (remaining) days in the century.
Raises:
ValueError: if the year value is out of bounds.
"""
if year < 0:
raise ValueError('Year value out of bounds.')
year, _ = divmod(year, 100)
if self._IsLeapYear(year):
return 36525
return 36524 | Retrieves the number of days in a century.
Args:
year (int): year in the century e.g. 1970.
Returns:
int: number of (remaining) days in the century.
Raises:
ValueError: if the year value is out of bounds. | Below is the the instruction that describes the task:
### Input:
Retrieves the number of days in a century.
Args:
year (int): year in the century e.g. 1970.
Returns:
int: number of (remaining) days in the century.
Raises:
ValueError: if the year value is out of bounds.
### Response:
def _GetNumberOfDaysInCentury(self, year):
"""Retrieves the number of days in a century.
Args:
year (int): year in the century e.g. 1970.
Returns:
int: number of (remaining) days in the century.
Raises:
ValueError: if the year value is out of bounds.
"""
if year < 0:
raise ValueError('Year value out of bounds.')
year, _ = divmod(year, 100)
if self._IsLeapYear(year):
return 36525
return 36524 |
def remove_photo(self, collection_id, photo_id):
"""
Remove a photo from one of the logged-in user’s collections.
Requires the 'write_collections' scope.
:param collection_id [string]: The collection’s ID. Required.
:param photo_id [string]: The photo’s ID. Required.
:return: [Tuple]: The Unsplash Collection and Photo
"""
url = "/collections/%s/remove" % collection_id
data = {
"collection_id": collection_id,
"photo_id": photo_id
}
result = self._delete(url, data=data) or {}
return CollectionModel.parse(result.get("collection")), PhotoModel.parse(result.get("photo")) | Remove a photo from one of the logged-in user’s collections.
Requires the 'write_collections' scope.
:param collection_id [string]: The collection’s ID. Required.
:param photo_id [string]: The photo’s ID. Required.
:return: [Tuple]: The Unsplash Collection and Photo | Below is the the instruction that describes the task:
### Input:
Remove a photo from one of the logged-in user’s collections.
Requires the 'write_collections' scope.
:param collection_id [string]: The collection’s ID. Required.
:param photo_id [string]: The photo’s ID. Required.
:return: [Tuple]: The Unsplash Collection and Photo
### Response:
def remove_photo(self, collection_id, photo_id):
"""
Remove a photo from one of the logged-in user’s collections.
Requires the 'write_collections' scope.
:param collection_id [string]: The collection’s ID. Required.
:param photo_id [string]: The photo’s ID. Required.
:return: [Tuple]: The Unsplash Collection and Photo
"""
url = "/collections/%s/remove" % collection_id
data = {
"collection_id": collection_id,
"photo_id": photo_id
}
result = self._delete(url, data=data) or {}
return CollectionModel.parse(result.get("collection")), PhotoModel.parse(result.get("photo")) |
def get_screen_settings(self, screen_id):
"""Returns the recording settings for a particular screen.
in screen_id of type int
Screen ID to retrieve recording screen settings for.
return record_screen_settings of type :class:`IRecordingScreenSettings`
Recording screen settings for the requested screen.
"""
if not isinstance(screen_id, baseinteger):
raise TypeError("screen_id can only be an instance of type baseinteger")
record_screen_settings = self._call("getScreenSettings",
in_p=[screen_id])
record_screen_settings = IRecordingScreenSettings(record_screen_settings)
return record_screen_settings | Returns the recording settings for a particular screen.
in screen_id of type int
Screen ID to retrieve recording screen settings for.
return record_screen_settings of type :class:`IRecordingScreenSettings`
Recording screen settings for the requested screen. | Below is the the instruction that describes the task:
### Input:
Returns the recording settings for a particular screen.
in screen_id of type int
Screen ID to retrieve recording screen settings for.
return record_screen_settings of type :class:`IRecordingScreenSettings`
Recording screen settings for the requested screen.
### Response:
def get_screen_settings(self, screen_id):
"""Returns the recording settings for a particular screen.
in screen_id of type int
Screen ID to retrieve recording screen settings for.
return record_screen_settings of type :class:`IRecordingScreenSettings`
Recording screen settings for the requested screen.
"""
if not isinstance(screen_id, baseinteger):
raise TypeError("screen_id can only be an instance of type baseinteger")
record_screen_settings = self._call("getScreenSettings",
in_p=[screen_id])
record_screen_settings = IRecordingScreenSettings(record_screen_settings)
return record_screen_settings |
def _make_sj_out_panel(sj_outD, total_jxn_cov_cutoff=20):
"""Filter junctions from many sj_out files and make panel.
Parameters
----------
sj_outD : dict
Dict whose keys are sample names and values are sj_out dataframes
total_jxn_cov_cutoff : int
If the unique read coverage of a junction summed over all samples is not
greater than or equal to this value, the junction will not be included
in the final output.
Returns
-------
sj_outP : pandas.Panel
Panel where each dataframe corresponds to an sj_out file filtered to
remove low coverage junctions. Each dataframe has COUNT_COLS =
('unique_junction_reads', 'multimap_junction_reads', 'max_overhang')
annotDF : pandas.DataFrame
Dataframe with values ANNOTATION_COLS = ('chrom', 'start',
'end', 'intron_motif', 'annotated') that are otherwise
duplicated in the panel.
"""
# num_jxns = dict()
# # set of all junctions
# jxnS = reduce(lambda x,y: set(x) | set(y),
# [ sj_outD[k].index for k in sj_outD.keys() ])
# jxn_keepS = set()
# jxn_setsD = dict()
# for k in sj_outD.keys():
# jxn_setsD[k] = frozenset(sj_outD[k].index)
# for j in jxnS:
# if sum([ sj_outD[k].ix[j,'unique_junction_reads'] for k in sj_outD.keys()
# if j in jxn_setsD[k] ]) >= total_jxn_cov_cutoff:
# jxn_keepS.add(j)
# for k in sj_outD.keys():
# sj_outD[k] = sj_outD[k].ix[jxn_keepS]
sj_outP = pd.Panel(sj_outD)
for col in ['unique_junction_reads', 'multimap_junction_reads',
'max_overhang']:
sj_outP.ix[:,:,col] = sj_outP.ix[:,:,col].fillna(0)
# Some dataframes will be missing information like intron_motif etc. for
# junctions that were not observed in that sample. The info is somewhere in
# the panel though so we can get it.
annotDF = reduce(pd.DataFrame.combine_first,
[ sj_outP.ix[item,:,ANNOTATION_COLS].dropna() for item in
sj_outP.items ])
annotDF['start'] = annotDF['start'].astype(int)
annotDF['end'] = annotDF['end'].astype(int)
annotDF['annotated'] = annotDF['annotated'].astype(bool)
# Sort annotation and panel
annotDF = annotDF.sort_values(by=['chrom', 'start', 'end'])
sj_outP = sj_outP.ix[:, annotDF.index, :]
sj_outP = sj_outP.ix[:,:,COUNT_COLS].astype(int)
return sj_outP, annotDF | Filter junctions from many sj_out files and make panel.
Parameters
----------
sj_outD : dict
Dict whose keys are sample names and values are sj_out dataframes
total_jxn_cov_cutoff : int
If the unique read coverage of a junction summed over all samples is not
greater than or equal to this value, the junction will not be included
in the final output.
Returns
-------
sj_outP : pandas.Panel
Panel where each dataframe corresponds to an sj_out file filtered to
remove low coverage junctions. Each dataframe has COUNT_COLS =
('unique_junction_reads', 'multimap_junction_reads', 'max_overhang')
annotDF : pandas.DataFrame
Dataframe with values ANNOTATION_COLS = ('chrom', 'start',
'end', 'intron_motif', 'annotated') that are otherwise
duplicated in the panel. | Below is the the instruction that describes the task:
### Input:
Filter junctions from many sj_out files and make panel.
Parameters
----------
sj_outD : dict
Dict whose keys are sample names and values are sj_out dataframes
total_jxn_cov_cutoff : int
If the unique read coverage of a junction summed over all samples is not
greater than or equal to this value, the junction will not be included
in the final output.
Returns
-------
sj_outP : pandas.Panel
Panel where each dataframe corresponds to an sj_out file filtered to
remove low coverage junctions. Each dataframe has COUNT_COLS =
('unique_junction_reads', 'multimap_junction_reads', 'max_overhang')
annotDF : pandas.DataFrame
Dataframe with values ANNOTATION_COLS = ('chrom', 'start',
'end', 'intron_motif', 'annotated') that are otherwise
duplicated in the panel.
### Response:
def _make_sj_out_panel(sj_outD, total_jxn_cov_cutoff=20):
"""Filter junctions from many sj_out files and make panel.
Parameters
----------
sj_outD : dict
Dict whose keys are sample names and values are sj_out dataframes
total_jxn_cov_cutoff : int
If the unique read coverage of a junction summed over all samples is not
greater than or equal to this value, the junction will not be included
in the final output.
Returns
-------
sj_outP : pandas.Panel
Panel where each dataframe corresponds to an sj_out file filtered to
remove low coverage junctions. Each dataframe has COUNT_COLS =
('unique_junction_reads', 'multimap_junction_reads', 'max_overhang')
annotDF : pandas.DataFrame
Dataframe with values ANNOTATION_COLS = ('chrom', 'start',
'end', 'intron_motif', 'annotated') that are otherwise
duplicated in the panel.
"""
# num_jxns = dict()
# # set of all junctions
# jxnS = reduce(lambda x,y: set(x) | set(y),
# [ sj_outD[k].index for k in sj_outD.keys() ])
# jxn_keepS = set()
# jxn_setsD = dict()
# for k in sj_outD.keys():
# jxn_setsD[k] = frozenset(sj_outD[k].index)
# for j in jxnS:
# if sum([ sj_outD[k].ix[j,'unique_junction_reads'] for k in sj_outD.keys()
# if j in jxn_setsD[k] ]) >= total_jxn_cov_cutoff:
# jxn_keepS.add(j)
# for k in sj_outD.keys():
# sj_outD[k] = sj_outD[k].ix[jxn_keepS]
sj_outP = pd.Panel(sj_outD)
for col in ['unique_junction_reads', 'multimap_junction_reads',
'max_overhang']:
sj_outP.ix[:,:,col] = sj_outP.ix[:,:,col].fillna(0)
# Some dataframes will be missing information like intron_motif etc. for
# junctions that were not observed in that sample. The info is somewhere in
# the panel though so we can get it.
annotDF = reduce(pd.DataFrame.combine_first,
[ sj_outP.ix[item,:,ANNOTATION_COLS].dropna() for item in
sj_outP.items ])
annotDF['start'] = annotDF['start'].astype(int)
annotDF['end'] = annotDF['end'].astype(int)
annotDF['annotated'] = annotDF['annotated'].astype(bool)
# Sort annotation and panel
annotDF = annotDF.sort_values(by=['chrom', 'start', 'end'])
sj_outP = sj_outP.ix[:, annotDF.index, :]
sj_outP = sj_outP.ix[:,:,COUNT_COLS].astype(int)
return sj_outP, annotDF |
def list_workers(config, *, filter_by_queues=None):
""" Return a list of all available workers.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
filter_by_queues (list): Restrict the returned workers to workers that listen to
at least one of the queue names in this list.
Returns:
list: A list of WorkerStats objects.
"""
celery_app = create_app(config)
worker_stats = celery_app.control.inspect().stats()
queue_stats = celery_app.control.inspect().active_queues()
if worker_stats is None:
return []
workers = []
for name, w_stat in worker_stats.items():
queues = [QueueStats.from_celery(q_stat) for q_stat in queue_stats[name]]
add_worker = filter_by_queues is None
if not add_worker:
for queue in queues:
if queue.name in filter_by_queues:
add_worker = True
break
if add_worker:
workers.append(WorkerStats.from_celery(name, w_stat, queues))
return workers | Return a list of all available workers.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
filter_by_queues (list): Restrict the returned workers to workers that listen to
at least one of the queue names in this list.
Returns:
list: A list of WorkerStats objects. | Below is the the instruction that describes the task:
### Input:
Return a list of all available workers.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
filter_by_queues (list): Restrict the returned workers to workers that listen to
at least one of the queue names in this list.
Returns:
list: A list of WorkerStats objects.
### Response:
def list_workers(config, *, filter_by_queues=None):
""" Return a list of all available workers.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
filter_by_queues (list): Restrict the returned workers to workers that listen to
at least one of the queue names in this list.
Returns:
list: A list of WorkerStats objects.
"""
celery_app = create_app(config)
worker_stats = celery_app.control.inspect().stats()
queue_stats = celery_app.control.inspect().active_queues()
if worker_stats is None:
return []
workers = []
for name, w_stat in worker_stats.items():
queues = [QueueStats.from_celery(q_stat) for q_stat in queue_stats[name]]
add_worker = filter_by_queues is None
if not add_worker:
for queue in queues:
if queue.name in filter_by_queues:
add_worker = True
break
if add_worker:
workers.append(WorkerStats.from_celery(name, w_stat, queues))
return workers |
def get_autoregressive_bias(max_length: int, dtype: str = C.DTYPE_FP32) -> mx.sym.Symbol:
"""
Returns bias/mask to ensure position i can only attend to positions <i.
:param max_length: Sequence length.
:param dtype: dtype of bias
:return: Bias symbol of shape (1, max_length, max_length).
"""
length_array = mx.sym.arange(max_length, dtype=dtype)
# matrix with lower triangle and main diagonal set to 0, upper triangle set to 1
bias = mx.sym.broadcast_greater(mx.sym.reshape(length_array, shape=(1, -1)),
mx.sym.reshape(length_array, shape=(-1, 1)))
bias = bias * -C.LARGE_VALUES[dtype]
bias = mx.sym.reshape(bias, shape=(1, max_length, max_length))
return mx.sym.BlockGrad(bias) | Returns bias/mask to ensure position i can only attend to positions <i.
:param max_length: Sequence length.
:param dtype: dtype of bias
:return: Bias symbol of shape (1, max_length, max_length). | Below is the the instruction that describes the task:
### Input:
Returns bias/mask to ensure position i can only attend to positions <i.
:param max_length: Sequence length.
:param dtype: dtype of bias
:return: Bias symbol of shape (1, max_length, max_length).
### Response:
def get_autoregressive_bias(max_length: int, dtype: str = C.DTYPE_FP32) -> mx.sym.Symbol:
"""
Returns bias/mask to ensure position i can only attend to positions <i.
:param max_length: Sequence length.
:param dtype: dtype of bias
:return: Bias symbol of shape (1, max_length, max_length).
"""
length_array = mx.sym.arange(max_length, dtype=dtype)
# matrix with lower triangle and main diagonal set to 0, upper triangle set to 1
bias = mx.sym.broadcast_greater(mx.sym.reshape(length_array, shape=(1, -1)),
mx.sym.reshape(length_array, shape=(-1, 1)))
bias = bias * -C.LARGE_VALUES[dtype]
bias = mx.sym.reshape(bias, shape=(1, max_length, max_length))
return mx.sym.BlockGrad(bias) |
def graph_from_polygon(polygon, network_type='all_private', simplify=True,
retain_all=False, truncate_by_edge=False, name='unnamed',
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
clean_periphery=True, infrastructure='way["highway"]',
custom_filter=None):
"""
Create a networkx graph from OSM data within the spatial boundaries of the
passed-in shapely polygon.
Parameters
----------
polygon : shapely Polygon or MultiPolygon
the shape to get network data within. coordinates should be in units of
latitude-longitude degrees.
network_type : string
what type of street network to get
simplify : bool
if true, simplify the graph topology
retain_all : bool
if True, return the entire graph even if it is not connected
truncate_by_edge : bool
if True retain node if it's outside bbox but at least one of node's
neighbors are within bbox
name : string
the name of the graph
timeout : int
the timeout interval for requests and to pass to API
memory : int
server memory allocation size for the query, in bytes. If none, server
will use its default allocation size
max_query_area_size : float
max size for any part of the geometry, in square degrees: any polygon
bigger will get divided up for multiple queries to API
clean_periphery : bool
if True (and simplify=True), buffer 0.5km to get a graph larger than
requested, then simplify, then truncate it to requested spatial extent
infrastructure : string
download infrastructure of given type (default is streets
(ie, 'way["highway"]') but other infrastructures may be selected
like power grids (ie, 'way["power"~"line"]'))
custom_filter : string
a custom network filter to be used instead of the network_type presets
Returns
-------
networkx multidigraph
"""
# verify that the geometry is valid and is a shapely Polygon/MultiPolygon
# before proceeding
if not polygon.is_valid:
raise TypeError('Shape does not have a valid geometry')
if not isinstance(polygon, (Polygon, MultiPolygon)):
raise TypeError('Geometry must be a shapely Polygon or MultiPolygon. If you requested '
'graph from place name or address, make sure your query resolves to a '
'Polygon or MultiPolygon, and not some other geometry, like a Point. '
'See OSMnx documentation for details.')
if clean_periphery and simplify:
# create a new buffered polygon 0.5km around the desired one
buffer_dist = 500
polygon_utm, crs_utm = project_geometry(geometry=polygon)
polygon_proj_buff = polygon_utm.buffer(buffer_dist)
polygon_buffered, _ = project_geometry(geometry=polygon_proj_buff, crs=crs_utm, to_latlong=True)
# get the network data from OSM, create the buffered graph, then
# truncate it to the buffered polygon
response_jsons = osm_net_download(polygon=polygon_buffered, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
infrastructure=infrastructure, custom_filter=custom_filter)
G_buffered = create_graph(response_jsons, name=name, retain_all=True,
bidirectional=network_type in settings.bidirectional_network_types)
G_buffered = truncate_graph_polygon(G_buffered, polygon_buffered, retain_all=True, truncate_by_edge=truncate_by_edge)
# simplify the graph topology
G_buffered = simplify_graph(G_buffered)
# truncate graph by polygon to return the graph within the polygon that
# caller wants. don't simplify again - this allows us to retain
# intersections along the street that may now only connect 2 street
# segments in the network, but in reality also connect to an
# intersection just outside the polygon
G = truncate_graph_polygon(G_buffered, polygon, retain_all=retain_all, truncate_by_edge=truncate_by_edge)
# count how many street segments in buffered graph emanate from each
# intersection in un-buffered graph, to retain true counts for each
# intersection, even if some of its neighbors are outside the polygon
G.graph['streets_per_node'] = count_streets_per_node(G_buffered, nodes=G.nodes())
else:
# download a list of API responses for the polygon/multipolygon
response_jsons = osm_net_download(polygon=polygon, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
infrastructure=infrastructure, custom_filter=custom_filter)
# create the graph from the downloaded data
G = create_graph(response_jsons, name=name, retain_all=True,
bidirectional=network_type in settings.bidirectional_network_types)
# truncate the graph to the extent of the polygon
G = truncate_graph_polygon(G, polygon, retain_all=retain_all, truncate_by_edge=truncate_by_edge)
# simplify the graph topology as the last step. don't truncate after
# simplifying or you may have simplified out to an endpoint beyond the
# truncation distance, in which case you will then strip out your entire
# edge
if simplify:
G = simplify_graph(G)
log('graph_from_polygon() returning graph with {:,} nodes and {:,} edges'.format(len(list(G.nodes())), len(list(G.edges()))))
return G | Create a networkx graph from OSM data within the spatial boundaries of the
passed-in shapely polygon.
Parameters
----------
polygon : shapely Polygon or MultiPolygon
the shape to get network data within. coordinates should be in units of
latitude-longitude degrees.
network_type : string
what type of street network to get
simplify : bool
if true, simplify the graph topology
retain_all : bool
if True, return the entire graph even if it is not connected
truncate_by_edge : bool
if True retain node if it's outside bbox but at least one of node's
neighbors are within bbox
name : string
the name of the graph
timeout : int
the timeout interval for requests and to pass to API
memory : int
server memory allocation size for the query, in bytes. If none, server
will use its default allocation size
max_query_area_size : float
max size for any part of the geometry, in square degrees: any polygon
bigger will get divided up for multiple queries to API
clean_periphery : bool
if True (and simplify=True), buffer 0.5km to get a graph larger than
requested, then simplify, then truncate it to requested spatial extent
infrastructure : string
download infrastructure of given type (default is streets
(ie, 'way["highway"]') but other infrastructures may be selected
like power grids (ie, 'way["power"~"line"]'))
custom_filter : string
a custom network filter to be used instead of the network_type presets
Returns
-------
networkx multidigraph | Below is the the instruction that describes the task:
### Input:
Create a networkx graph from OSM data within the spatial boundaries of the
passed-in shapely polygon.
Parameters
----------
polygon : shapely Polygon or MultiPolygon
the shape to get network data within. coordinates should be in units of
latitude-longitude degrees.
network_type : string
what type of street network to get
simplify : bool
if true, simplify the graph topology
retain_all : bool
if True, return the entire graph even if it is not connected
truncate_by_edge : bool
if True retain node if it's outside bbox but at least one of node's
neighbors are within bbox
name : string
the name of the graph
timeout : int
the timeout interval for requests and to pass to API
memory : int
server memory allocation size for the query, in bytes. If none, server
will use its default allocation size
max_query_area_size : float
max size for any part of the geometry, in square degrees: any polygon
bigger will get divided up for multiple queries to API
clean_periphery : bool
if True (and simplify=True), buffer 0.5km to get a graph larger than
requested, then simplify, then truncate it to requested spatial extent
infrastructure : string
download infrastructure of given type (default is streets
(ie, 'way["highway"]') but other infrastructures may be selected
like power grids (ie, 'way["power"~"line"]'))
custom_filter : string
a custom network filter to be used instead of the network_type presets
Returns
-------
networkx multidigraph
### Response:
def graph_from_polygon(polygon, network_type='all_private', simplify=True,
retain_all=False, truncate_by_edge=False, name='unnamed',
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
clean_periphery=True, infrastructure='way["highway"]',
custom_filter=None):
"""
Create a networkx graph from OSM data within the spatial boundaries of the
passed-in shapely polygon.
Parameters
----------
polygon : shapely Polygon or MultiPolygon
the shape to get network data within. coordinates should be in units of
latitude-longitude degrees.
network_type : string
what type of street network to get
simplify : bool
if true, simplify the graph topology
retain_all : bool
if True, return the entire graph even if it is not connected
truncate_by_edge : bool
if True retain node if it's outside bbox but at least one of node's
neighbors are within bbox
name : string
the name of the graph
timeout : int
the timeout interval for requests and to pass to API
memory : int
server memory allocation size for the query, in bytes. If none, server
will use its default allocation size
max_query_area_size : float
max size for any part of the geometry, in square degrees: any polygon
bigger will get divided up for multiple queries to API
clean_periphery : bool
if True (and simplify=True), buffer 0.5km to get a graph larger than
requested, then simplify, then truncate it to requested spatial extent
infrastructure : string
download infrastructure of given type (default is streets
(ie, 'way["highway"]') but other infrastructures may be selected
like power grids (ie, 'way["power"~"line"]'))
custom_filter : string
a custom network filter to be used instead of the network_type presets
Returns
-------
networkx multidigraph
"""
# verify that the geometry is valid and is a shapely Polygon/MultiPolygon
# before proceeding
if not polygon.is_valid:
raise TypeError('Shape does not have a valid geometry')
if not isinstance(polygon, (Polygon, MultiPolygon)):
raise TypeError('Geometry must be a shapely Polygon or MultiPolygon. If you requested '
'graph from place name or address, make sure your query resolves to a '
'Polygon or MultiPolygon, and not some other geometry, like a Point. '
'See OSMnx documentation for details.')
if clean_periphery and simplify:
# create a new buffered polygon 0.5km around the desired one
buffer_dist = 500
polygon_utm, crs_utm = project_geometry(geometry=polygon)
polygon_proj_buff = polygon_utm.buffer(buffer_dist)
polygon_buffered, _ = project_geometry(geometry=polygon_proj_buff, crs=crs_utm, to_latlong=True)
# get the network data from OSM, create the buffered graph, then
# truncate it to the buffered polygon
response_jsons = osm_net_download(polygon=polygon_buffered, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
infrastructure=infrastructure, custom_filter=custom_filter)
G_buffered = create_graph(response_jsons, name=name, retain_all=True,
bidirectional=network_type in settings.bidirectional_network_types)
G_buffered = truncate_graph_polygon(G_buffered, polygon_buffered, retain_all=True, truncate_by_edge=truncate_by_edge)
# simplify the graph topology
G_buffered = simplify_graph(G_buffered)
# truncate graph by polygon to return the graph within the polygon that
# caller wants. don't simplify again - this allows us to retain
# intersections along the street that may now only connect 2 street
# segments in the network, but in reality also connect to an
# intersection just outside the polygon
G = truncate_graph_polygon(G_buffered, polygon, retain_all=retain_all, truncate_by_edge=truncate_by_edge)
# count how many street segments in buffered graph emanate from each
# intersection in un-buffered graph, to retain true counts for each
# intersection, even if some of its neighbors are outside the polygon
G.graph['streets_per_node'] = count_streets_per_node(G_buffered, nodes=G.nodes())
else:
# download a list of API responses for the polygon/multipolygon
response_jsons = osm_net_download(polygon=polygon, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
infrastructure=infrastructure, custom_filter=custom_filter)
# create the graph from the downloaded data
G = create_graph(response_jsons, name=name, retain_all=True,
bidirectional=network_type in settings.bidirectional_network_types)
# truncate the graph to the extent of the polygon
G = truncate_graph_polygon(G, polygon, retain_all=retain_all, truncate_by_edge=truncate_by_edge)
# simplify the graph topology as the last step. don't truncate after
# simplifying or you may have simplified out to an endpoint beyond the
# truncation distance, in which case you will then strip out your entire
# edge
if simplify:
G = simplify_graph(G)
log('graph_from_polygon() returning graph with {:,} nodes and {:,} edges'.format(len(list(G.nodes())), len(list(G.edges()))))
return G |
def __interpret_slices(self, slices):
"""
Convert python slice objects into a more useful and computable form:
- requested_bbox: A bounding box representing the volume requested
- steps: the requested stride over x,y,z
- channel_slice: A python slice object over the channel dimension
Returned as a tuple: (requested_bbox, steps, channel_slice)
"""
maxsize = list(self.bounds.maxpt) + [ self.num_channels ]
minsize = list(self.bounds.minpt) + [ 0 ]
slices = generate_slices(slices, minsize, maxsize, bounded=self.bounded)
channel_slice = slices.pop()
minpt = Vec(*[ slc.start for slc in slices ])
maxpt = Vec(*[ slc.stop for slc in slices ])
steps = Vec(*[ slc.step for slc in slices ])
return Bbox(minpt, maxpt), steps, channel_slice | Convert python slice objects into a more useful and computable form:
- requested_bbox: A bounding box representing the volume requested
- steps: the requested stride over x,y,z
- channel_slice: A python slice object over the channel dimension
Returned as a tuple: (requested_bbox, steps, channel_slice) | Below is the the instruction that describes the task:
### Input:
Convert python slice objects into a more useful and computable form:
- requested_bbox: A bounding box representing the volume requested
- steps: the requested stride over x,y,z
- channel_slice: A python slice object over the channel dimension
Returned as a tuple: (requested_bbox, steps, channel_slice)
### Response:
def __interpret_slices(self, slices):
"""
Convert python slice objects into a more useful and computable form:
- requested_bbox: A bounding box representing the volume requested
- steps: the requested stride over x,y,z
- channel_slice: A python slice object over the channel dimension
Returned as a tuple: (requested_bbox, steps, channel_slice)
"""
maxsize = list(self.bounds.maxpt) + [ self.num_channels ]
minsize = list(self.bounds.minpt) + [ 0 ]
slices = generate_slices(slices, minsize, maxsize, bounded=self.bounded)
channel_slice = slices.pop()
minpt = Vec(*[ slc.start for slc in slices ])
maxpt = Vec(*[ slc.stop for slc in slices ])
steps = Vec(*[ slc.step for slc in slices ])
return Bbox(minpt, maxpt), steps, channel_slice |
def to_wire_dict (self):
"""Return a simplified transport object for logging and caching.
The transport object must contain these attributes:
- url_data.valid: bool
Indicates if URL is valid
- url_data.result: unicode
Result string
- url_data.warnings: list of tuples (tag, warning message)
List of tagged warnings for this URL.
- url_data.name: unicode string or None
name of URL (eg. filename or link name)
- url_data.parent_url: unicode or None
Parent URL
- url_data.base_ref: unicode
HTML base reference URL of parent
- url_data.url: unicode
Fully qualified URL.
- url_data.domain: unicode
URL domain part.
- url_data.checktime: int
Number of seconds needed to check this link, default: zero.
- url_data.dltime: int
Number of seconds needed to download URL content, default: -1
- url_data.size: int
Size of downloaded URL content, default: -1
- url_data.info: list of unicode
Additional information about this URL.
- url_data.line: int
Line number of this URL at parent document, or -1
- url_data.column: int
Column number of this URL at parent document, or -1
- url_data.page: int
Page number of this URL at parent document, or -1
- url_data.cache_url: unicode
Cache url for this URL.
- url_data.content_type: unicode
MIME content type for URL content.
- url_data.level: int
Recursion level until reaching this URL from start URL
- url_data.last_modified: datetime
Last modification date of retrieved page (or None).
"""
return dict(valid=self.valid,
extern=self.extern[0],
result=self.result,
warnings=self.warnings[:],
name=self.name or u"",
title=self.get_title(),
parent_url=self.parent_url or u"",
base_ref=self.base_ref or u"",
base_url=self.base_url or u"",
url=self.url or u"",
domain=(self.urlparts[1] if self.urlparts else u""),
checktime=self.checktime,
dltime=self.dltime,
size=self.size,
info=self.info,
line=self.line,
column=self.column,
page=self.page,
cache_url=self.cache_url,
content_type=self.content_type,
level=self.recursion_level,
modified=self.modified,
) | Return a simplified transport object for logging and caching.
The transport object must contain these attributes:
- url_data.valid: bool
Indicates if URL is valid
- url_data.result: unicode
Result string
- url_data.warnings: list of tuples (tag, warning message)
List of tagged warnings for this URL.
- url_data.name: unicode string or None
name of URL (eg. filename or link name)
- url_data.parent_url: unicode or None
Parent URL
- url_data.base_ref: unicode
HTML base reference URL of parent
- url_data.url: unicode
Fully qualified URL.
- url_data.domain: unicode
URL domain part.
- url_data.checktime: int
Number of seconds needed to check this link, default: zero.
- url_data.dltime: int
Number of seconds needed to download URL content, default: -1
- url_data.size: int
Size of downloaded URL content, default: -1
- url_data.info: list of unicode
Additional information about this URL.
- url_data.line: int
Line number of this URL at parent document, or -1
- url_data.column: int
Column number of this URL at parent document, or -1
- url_data.page: int
Page number of this URL at parent document, or -1
- url_data.cache_url: unicode
Cache url for this URL.
- url_data.content_type: unicode
MIME content type for URL content.
- url_data.level: int
Recursion level until reaching this URL from start URL
- url_data.last_modified: datetime
Last modification date of retrieved page (or None). | Below is the the instruction that describes the task:
### Input:
Return a simplified transport object for logging and caching.
The transport object must contain these attributes:
- url_data.valid: bool
Indicates if URL is valid
- url_data.result: unicode
Result string
- url_data.warnings: list of tuples (tag, warning message)
List of tagged warnings for this URL.
- url_data.name: unicode string or None
name of URL (eg. filename or link name)
- url_data.parent_url: unicode or None
Parent URL
- url_data.base_ref: unicode
HTML base reference URL of parent
- url_data.url: unicode
Fully qualified URL.
- url_data.domain: unicode
URL domain part.
- url_data.checktime: int
Number of seconds needed to check this link, default: zero.
- url_data.dltime: int
Number of seconds needed to download URL content, default: -1
- url_data.size: int
Size of downloaded URL content, default: -1
- url_data.info: list of unicode
Additional information about this URL.
- url_data.line: int
Line number of this URL at parent document, or -1
- url_data.column: int
Column number of this URL at parent document, or -1
- url_data.page: int
Page number of this URL at parent document, or -1
- url_data.cache_url: unicode
Cache url for this URL.
- url_data.content_type: unicode
MIME content type for URL content.
- url_data.level: int
Recursion level until reaching this URL from start URL
- url_data.last_modified: datetime
Last modification date of retrieved page (or None).
### Response:
def to_wire_dict (self):
"""Return a simplified transport object for logging and caching.
The transport object must contain these attributes:
- url_data.valid: bool
Indicates if URL is valid
- url_data.result: unicode
Result string
- url_data.warnings: list of tuples (tag, warning message)
List of tagged warnings for this URL.
- url_data.name: unicode string or None
name of URL (eg. filename or link name)
- url_data.parent_url: unicode or None
Parent URL
- url_data.base_ref: unicode
HTML base reference URL of parent
- url_data.url: unicode
Fully qualified URL.
- url_data.domain: unicode
URL domain part.
- url_data.checktime: int
Number of seconds needed to check this link, default: zero.
- url_data.dltime: int
Number of seconds needed to download URL content, default: -1
- url_data.size: int
Size of downloaded URL content, default: -1
- url_data.info: list of unicode
Additional information about this URL.
- url_data.line: int
Line number of this URL at parent document, or -1
- url_data.column: int
Column number of this URL at parent document, or -1
- url_data.page: int
Page number of this URL at parent document, or -1
- url_data.cache_url: unicode
Cache url for this URL.
- url_data.content_type: unicode
MIME content type for URL content.
- url_data.level: int
Recursion level until reaching this URL from start URL
- url_data.last_modified: datetime
Last modification date of retrieved page (or None).
"""
return dict(valid=self.valid,
extern=self.extern[0],
result=self.result,
warnings=self.warnings[:],
name=self.name or u"",
title=self.get_title(),
parent_url=self.parent_url or u"",
base_ref=self.base_ref or u"",
base_url=self.base_url or u"",
url=self.url or u"",
domain=(self.urlparts[1] if self.urlparts else u""),
checktime=self.checktime,
dltime=self.dltime,
size=self.size,
info=self.info,
line=self.line,
column=self.column,
page=self.page,
cache_url=self.cache_url,
content_type=self.content_type,
level=self.recursion_level,
modified=self.modified,
) |
def generate_context(name='', argspec='', note='', math=False, collapse=False,
img_path='', css_path=CSS_PATH):
"""
Generate the html_context dictionary for our Sphinx conf file.
This is a set of variables to be passed to the Jinja template engine and
that are used to control how the webpage is rendered in connection with
Sphinx
Parameters
----------
name : str
Object's name.
note : str
A note describing what type has the function or method being
introspected
argspec : str
Argspec of the the function or method being introspected
math : bool
Turn on/off Latex rendering on the OI. If False, Latex will be shown in
plain text.
collapse : bool
Collapse sections
img_path : str
Path for images relative to the file containing the docstring
Returns
-------
A dict of strings to be used by Jinja to generate the webpage
"""
if img_path and os.name == 'nt':
img_path = img_path.replace('\\', '/')
context = \
{
# Arg dependent variables
'math_on': 'true' if math else '',
'name': name,
'argspec': argspec,
'note': note,
'collapse': collapse,
'img_path': img_path,
# Static variables
'css_path': css_path,
'js_path': JS_PATH,
'jquery_path': JQUERY_PATH,
'mathjax_path': MATHJAX_PATH,
'right_sphinx_version': '' if sphinx.__version__ < "1.1" else 'true',
'platform': sys.platform
}
return context | Generate the html_context dictionary for our Sphinx conf file.
This is a set of variables to be passed to the Jinja template engine and
that are used to control how the webpage is rendered in connection with
Sphinx
Parameters
----------
name : str
Object's name.
note : str
A note describing what type has the function or method being
introspected
argspec : str
Argspec of the the function or method being introspected
math : bool
Turn on/off Latex rendering on the OI. If False, Latex will be shown in
plain text.
collapse : bool
Collapse sections
img_path : str
Path for images relative to the file containing the docstring
Returns
-------
A dict of strings to be used by Jinja to generate the webpage | Below is the the instruction that describes the task:
### Input:
Generate the html_context dictionary for our Sphinx conf file.
This is a set of variables to be passed to the Jinja template engine and
that are used to control how the webpage is rendered in connection with
Sphinx
Parameters
----------
name : str
Object's name.
note : str
A note describing what type has the function or method being
introspected
argspec : str
Argspec of the the function or method being introspected
math : bool
Turn on/off Latex rendering on the OI. If False, Latex will be shown in
plain text.
collapse : bool
Collapse sections
img_path : str
Path for images relative to the file containing the docstring
Returns
-------
A dict of strings to be used by Jinja to generate the webpage
### Response:
def generate_context(name='', argspec='', note='', math=False, collapse=False,
img_path='', css_path=CSS_PATH):
"""
Generate the html_context dictionary for our Sphinx conf file.
This is a set of variables to be passed to the Jinja template engine and
that are used to control how the webpage is rendered in connection with
Sphinx
Parameters
----------
name : str
Object's name.
note : str
A note describing what type has the function or method being
introspected
argspec : str
Argspec of the the function or method being introspected
math : bool
Turn on/off Latex rendering on the OI. If False, Latex will be shown in
plain text.
collapse : bool
Collapse sections
img_path : str
Path for images relative to the file containing the docstring
Returns
-------
A dict of strings to be used by Jinja to generate the webpage
"""
if img_path and os.name == 'nt':
img_path = img_path.replace('\\', '/')
context = \
{
# Arg dependent variables
'math_on': 'true' if math else '',
'name': name,
'argspec': argspec,
'note': note,
'collapse': collapse,
'img_path': img_path,
# Static variables
'css_path': css_path,
'js_path': JS_PATH,
'jquery_path': JQUERY_PATH,
'mathjax_path': MATHJAX_PATH,
'right_sphinx_version': '' if sphinx.__version__ < "1.1" else 'true',
'platform': sys.platform
}
return context |
def asVersion(self):
"""
Convert the version data in this item to a
L{twisted.python.versions.Version}.
"""
return versions.Version(self.package, self.major, self.minor, self.micro) | Convert the version data in this item to a
L{twisted.python.versions.Version}. | Below is the the instruction that describes the task:
### Input:
Convert the version data in this item to a
L{twisted.python.versions.Version}.
### Response:
def asVersion(self):
"""
Convert the version data in this item to a
L{twisted.python.versions.Version}.
"""
return versions.Version(self.package, self.major, self.minor, self.micro) |
def _send_request(self, path, data, method):
"""
Uses the HTTP transport to query the Route53 API. Runs the response
through lxml's parser, before we hand it off for further picking
apart by our call-specific parsers.
:param str path: The RESTful path to tack on to the :py:attr:`endpoint`.
:param data: The params to send along with the request.
:type data: Either a dict or bytes, depending on the request type.
:param str method: One of 'GET', 'POST', or 'DELETE'.
:rtype: lxml.etree._Element
:returns: An lxml Element root.
"""
response_body = self._transport.send_request(path, data, method)
root = etree.fromstring(response_body)
#print(prettyprint_xml(root))
return root | Uses the HTTP transport to query the Route53 API. Runs the response
through lxml's parser, before we hand it off for further picking
apart by our call-specific parsers.
:param str path: The RESTful path to tack on to the :py:attr:`endpoint`.
:param data: The params to send along with the request.
:type data: Either a dict or bytes, depending on the request type.
:param str method: One of 'GET', 'POST', or 'DELETE'.
:rtype: lxml.etree._Element
:returns: An lxml Element root. | Below is the the instruction that describes the task:
### Input:
Uses the HTTP transport to query the Route53 API. Runs the response
through lxml's parser, before we hand it off for further picking
apart by our call-specific parsers.
:param str path: The RESTful path to tack on to the :py:attr:`endpoint`.
:param data: The params to send along with the request.
:type data: Either a dict or bytes, depending on the request type.
:param str method: One of 'GET', 'POST', or 'DELETE'.
:rtype: lxml.etree._Element
:returns: An lxml Element root.
### Response:
def _send_request(self, path, data, method):
"""
Uses the HTTP transport to query the Route53 API. Runs the response
through lxml's parser, before we hand it off for further picking
apart by our call-specific parsers.
:param str path: The RESTful path to tack on to the :py:attr:`endpoint`.
:param data: The params to send along with the request.
:type data: Either a dict or bytes, depending on the request type.
:param str method: One of 'GET', 'POST', or 'DELETE'.
:rtype: lxml.etree._Element
:returns: An lxml Element root.
"""
response_body = self._transport.send_request(path, data, method)
root = etree.fromstring(response_body)
#print(prettyprint_xml(root))
return root |
def search_and_extract_orfs_matching_protein_database(self,
unpack,
search_method,
maximum_range,
threads,
evalue,
min_orf_length,
restrict_read_length,
diamond_database,
output_search_file,
hit_reads_fasta,
hit_reads_orfs_fasta):
'''As per aa_db_search() except slightly lower level. Search an
input read set (unpack) and then extract the proteins that hit together
with their containing nucleotide sequences.
Parameters
----------
output_search_file: str
path to hmmsearch output table or diamond basename
hit_reads_fasta: str
path to nucleotide sequences containing hit proteins
hit_reads_orfs_fasta: str
path to hit proteins, unaligned
Returns
-------
direction_information: dict
{read_1: False
...
read n: True}
where True = Forward direction
and False = Reverse direction
result: DBSearchResult object containing file locations and hit
information
'''
# Define method of opening sequence files to stdout
if unpack.is_zcattable():
clazz = ZcatOrfM
else:
clazz = OrfM
orfm = clazz(min_orf_length=min_orf_length,
restrict_read_length=restrict_read_length)
extracting_orfm = OrfM(min_orf_length=min_orf_length,
restrict_read_length=restrict_read_length)
if search_method == 'hmmsearch':
# run hmmsearch
search_result = self.hmmsearch(
output_search_file,
unpack.read_file,
unpack,
unpack.sequence_type(),
threads,
evalue,
orfm
)
elif search_method == 'diamond':
# run diamond
search_result = Diamond(
database=diamond_database,
threads=threads,
evalue=evalue,
).run(
unpack.read_file,
unpack.sequence_type(),
daa_file_basename=output_search_file
)
search_result = [search_result]
else: # if the search_method isn't recognised
raise Exception("Programming error: unexpected search_method %s" % search_method)
orfm_regex = OrfM.regular_expression()
hits = self._get_sequence_directions(search_result)
orf_hit_readnames = hits.keys() # Orf read hit names
if unpack.sequence_type() == 'nucleotide':
hits={(orfm_regex.match(key).groups(0)[0] if orfm_regex.match(key) else key): item for key, item in hits.iteritems()}
hit_readnames = hits.keys() # Store raw read hit names
else:
hit_readnames=orf_hit_readnames
hit_reads_fasta, direction_information = self._extract_from_raw_reads(
hit_reads_fasta,
hit_readnames,
unpack.read_file,
unpack.format(),
hits
)
if not hit_readnames:
hit_read_counts = [0, len(hit_readnames)]
result = DBSearchResult(None,
search_result,
hit_read_counts,
None)
return result, direction_information
if unpack.sequence_type() == 'nucleotide':
# Extract the orfs of these reads that hit the original search
self._extract_orfs(
hit_reads_fasta,
extracting_orfm,
orf_hit_readnames,
hit_reads_orfs_fasta,
search_method,
list(search_result[0].each([SequenceSearchResult.QUERY_ID_FIELD,
SequenceSearchResult.ALIGNMENT_DIRECTION,
SequenceSearchResult.QUERY_FROM_FIELD,
SequenceSearchResult.QUERY_TO_FIELD])
)
)
hit_reads_fasta = hit_reads_orfs_fasta
slash_endings=self._check_for_slash_endings(hit_readnames)
result = DBSearchResult(hit_reads_fasta,
search_result,
[0, len([itertools.chain(*hits.values())])], # array of hits [euk hits, true hits]. Euk hits alway 0 unless searching from 16S
slash_endings) # Any reads that end in /1 or /2
if maximum_range:
n_hits = sum([len(x["strand"]) for x in hits.values()])
else:
n_hits = len(hits.keys())
logging.info("%s read(s) detected" % n_hits)
return result, direction_information | As per aa_db_search() except slightly lower level. Search an
input read set (unpack) and then extract the proteins that hit together
with their containing nucleotide sequences.
Parameters
----------
output_search_file: str
path to hmmsearch output table or diamond basename
hit_reads_fasta: str
path to nucleotide sequences containing hit proteins
hit_reads_orfs_fasta: str
path to hit proteins, unaligned
Returns
-------
direction_information: dict
{read_1: False
...
read n: True}
where True = Forward direction
and False = Reverse direction
result: DBSearchResult object containing file locations and hit
information | Below is the the instruction that describes the task:
### Input:
As per aa_db_search() except slightly lower level. Search an
input read set (unpack) and then extract the proteins that hit together
with their containing nucleotide sequences.
Parameters
----------
output_search_file: str
path to hmmsearch output table or diamond basename
hit_reads_fasta: str
path to nucleotide sequences containing hit proteins
hit_reads_orfs_fasta: str
path to hit proteins, unaligned
Returns
-------
direction_information: dict
{read_1: False
...
read n: True}
where True = Forward direction
and False = Reverse direction
result: DBSearchResult object containing file locations and hit
information
### Response:
def search_and_extract_orfs_matching_protein_database(self,
unpack,
search_method,
maximum_range,
threads,
evalue,
min_orf_length,
restrict_read_length,
diamond_database,
output_search_file,
hit_reads_fasta,
hit_reads_orfs_fasta):
'''As per aa_db_search() except slightly lower level. Search an
input read set (unpack) and then extract the proteins that hit together
with their containing nucleotide sequences.
Parameters
----------
output_search_file: str
path to hmmsearch output table or diamond basename
hit_reads_fasta: str
path to nucleotide sequences containing hit proteins
hit_reads_orfs_fasta: str
path to hit proteins, unaligned
Returns
-------
direction_information: dict
{read_1: False
...
read n: True}
where True = Forward direction
and False = Reverse direction
result: DBSearchResult object containing file locations and hit
information
'''
# Define method of opening sequence files to stdout
if unpack.is_zcattable():
clazz = ZcatOrfM
else:
clazz = OrfM
orfm = clazz(min_orf_length=min_orf_length,
restrict_read_length=restrict_read_length)
extracting_orfm = OrfM(min_orf_length=min_orf_length,
restrict_read_length=restrict_read_length)
if search_method == 'hmmsearch':
# run hmmsearch
search_result = self.hmmsearch(
output_search_file,
unpack.read_file,
unpack,
unpack.sequence_type(),
threads,
evalue,
orfm
)
elif search_method == 'diamond':
# run diamond
search_result = Diamond(
database=diamond_database,
threads=threads,
evalue=evalue,
).run(
unpack.read_file,
unpack.sequence_type(),
daa_file_basename=output_search_file
)
search_result = [search_result]
else: # if the search_method isn't recognised
raise Exception("Programming error: unexpected search_method %s" % search_method)
orfm_regex = OrfM.regular_expression()
hits = self._get_sequence_directions(search_result)
orf_hit_readnames = hits.keys() # Orf read hit names
if unpack.sequence_type() == 'nucleotide':
hits={(orfm_regex.match(key).groups(0)[0] if orfm_regex.match(key) else key): item for key, item in hits.iteritems()}
hit_readnames = hits.keys() # Store raw read hit names
else:
hit_readnames=orf_hit_readnames
hit_reads_fasta, direction_information = self._extract_from_raw_reads(
hit_reads_fasta,
hit_readnames,
unpack.read_file,
unpack.format(),
hits
)
if not hit_readnames:
hit_read_counts = [0, len(hit_readnames)]
result = DBSearchResult(None,
search_result,
hit_read_counts,
None)
return result, direction_information
if unpack.sequence_type() == 'nucleotide':
# Extract the orfs of these reads that hit the original search
self._extract_orfs(
hit_reads_fasta,
extracting_orfm,
orf_hit_readnames,
hit_reads_orfs_fasta,
search_method,
list(search_result[0].each([SequenceSearchResult.QUERY_ID_FIELD,
SequenceSearchResult.ALIGNMENT_DIRECTION,
SequenceSearchResult.QUERY_FROM_FIELD,
SequenceSearchResult.QUERY_TO_FIELD])
)
)
hit_reads_fasta = hit_reads_orfs_fasta
slash_endings=self._check_for_slash_endings(hit_readnames)
result = DBSearchResult(hit_reads_fasta,
search_result,
[0, len([itertools.chain(*hits.values())])], # array of hits [euk hits, true hits]. Euk hits alway 0 unless searching from 16S
slash_endings) # Any reads that end in /1 or /2
if maximum_range:
n_hits = sum([len(x["strand"]) for x in hits.values()])
else:
n_hits = len(hits.keys())
logging.info("%s read(s) detected" % n_hits)
return result, direction_information |
def add_stream_logger(level=logging.DEBUG, name=None):
"""
Add a stream logger. This can be used for printing all SDK calls to stdout
while working in an interactive session. Note this is a logger for the
entire module, which will apply to all environments started in the same
session. If you need a specific logger pass a ``logfile`` to
:func:`~sdk.init`
Args:
level(int): :mod:`logging` log level
name(str): logger name, will default to the root logger.
Returns:
None
"""
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setFormatter(get_default_log_formatter())
handler.setLevel(level)
logger.addHandler(handler) | Add a stream logger. This can be used for printing all SDK calls to stdout
while working in an interactive session. Note this is a logger for the
entire module, which will apply to all environments started in the same
session. If you need a specific logger pass a ``logfile`` to
:func:`~sdk.init`
Args:
level(int): :mod:`logging` log level
name(str): logger name, will default to the root logger.
Returns:
None | Below is the the instruction that describes the task:
### Input:
Add a stream logger. This can be used for printing all SDK calls to stdout
while working in an interactive session. Note this is a logger for the
entire module, which will apply to all environments started in the same
session. If you need a specific logger pass a ``logfile`` to
:func:`~sdk.init`
Args:
level(int): :mod:`logging` log level
name(str): logger name, will default to the root logger.
Returns:
None
### Response:
def add_stream_logger(level=logging.DEBUG, name=None):
"""
Add a stream logger. This can be used for printing all SDK calls to stdout
while working in an interactive session. Note this is a logger for the
entire module, which will apply to all environments started in the same
session. If you need a specific logger pass a ``logfile`` to
:func:`~sdk.init`
Args:
level(int): :mod:`logging` log level
name(str): logger name, will default to the root logger.
Returns:
None
"""
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setFormatter(get_default_log_formatter())
handler.setLevel(level)
logger.addHandler(handler) |
def authorize(self):
""" Use the magic of a unicorn and summon the set-top box to listen
to us.
/
,.. /
,' ';
,,.__ _,' /'; .
:',' ~~~~ '. '~
:' ( ) )::,
'. '. .=----=..-~ .;'
' ;' :: ':. '"
(: ': ;)
\\ '" ./
'" '"
Seriously, I've no idea what I'm doing here.
"""
# Read the version of the set-top box and write it back. Why? I've no
# idea.
version = self.con.makefile().readline()
self.con.send(version.encode())
# The set-top box returns with 2 bytes. I've no idea what they mean.
self.con.recv(2)
# The following reads and writes are used to authenticate. But I don't
# fully understand what is going on.
self.con.send(struct.pack('>B', 1))
msg = self.con.recv(4)
response = struct.unpack(">I", msg)
if response[0] != 0:
log.debug("Failed to authorize with set-top at %s:%s.",
self.ip, self.port)
raise AuthenticationError()
# Dunno where this is good for. But otherwise the client doesn't work.
self.con.send(b'0')
log.debug('Authorized succesfully with set-top box at %s:%s.',
self.ip, self.port) | Use the magic of a unicorn and summon the set-top box to listen
to us.
/
,.. /
,' ';
,,.__ _,' /'; .
:',' ~~~~ '. '~
:' ( ) )::,
'. '. .=----=..-~ .;'
' ;' :: ':. '"
(: ': ;)
\\ '" ./
'" '"
Seriously, I've no idea what I'm doing here. | Below is the the instruction that describes the task:
### Input:
Use the magic of a unicorn and summon the set-top box to listen
to us.
/
,.. /
,' ';
,,.__ _,' /'; .
:',' ~~~~ '. '~
:' ( ) )::,
'. '. .=----=..-~ .;'
' ;' :: ':. '"
(: ': ;)
\\ '" ./
'" '"
Seriously, I've no idea what I'm doing here.
### Response:
def authorize(self):
""" Use the magic of a unicorn and summon the set-top box to listen
to us.
/
,.. /
,' ';
,,.__ _,' /'; .
:',' ~~~~ '. '~
:' ( ) )::,
'. '. .=----=..-~ .;'
' ;' :: ':. '"
(: ': ;)
\\ '" ./
'" '"
Seriously, I've no idea what I'm doing here.
"""
# Read the version of the set-top box and write it back. Why? I've no
# idea.
version = self.con.makefile().readline()
self.con.send(version.encode())
# The set-top box returns with 2 bytes. I've no idea what they mean.
self.con.recv(2)
# The following reads and writes are used to authenticate. But I don't
# fully understand what is going on.
self.con.send(struct.pack('>B', 1))
msg = self.con.recv(4)
response = struct.unpack(">I", msg)
if response[0] != 0:
log.debug("Failed to authorize with set-top at %s:%s.",
self.ip, self.port)
raise AuthenticationError()
# Dunno where this is good for. But otherwise the client doesn't work.
self.con.send(b'0')
log.debug('Authorized succesfully with set-top box at %s:%s.',
self.ip, self.port) |
def intersperse_hs_in_std_res(slice_, hs_dims, res):
"""Perform the insertions of place-holding rows and cols for insertions."""
for dim, inds in enumerate(slice_.inserted_hs_indices()):
if dim not in hs_dims:
continue
for i in inds:
res = np.insert(res, i, np.nan, axis=(dim - slice_.ndim))
return res | Perform the insertions of place-holding rows and cols for insertions. | Below is the the instruction that describes the task:
### Input:
Perform the insertions of place-holding rows and cols for insertions.
### Response:
def intersperse_hs_in_std_res(slice_, hs_dims, res):
"""Perform the insertions of place-holding rows and cols for insertions."""
for dim, inds in enumerate(slice_.inserted_hs_indices()):
if dim not in hs_dims:
continue
for i in inds:
res = np.insert(res, i, np.nan, axis=(dim - slice_.ndim))
return res |
def init(**config):
""" Initialize the crypto backend.
The backend can be one of two plugins:
- 'x509' - Uses x509 certificates.
- 'gpg' - Uses GnuPG keys.
"""
global _implementation
global _validate_implementations
if config.get('crypto_backend') == 'gpg':
_implementation = gpg
else:
_implementation = x509
_validate_implementations = []
for mod in config.get('crypto_validate_backends', []):
if mod == 'gpg':
_validate_implementations.append(gpg)
elif mod == 'x509':
_validate_implementations.append(x509)
else:
raise ValueError("%r is not a valid crypto backend" % mod)
if not _validate_implementations:
_validate_implementations.append(_implementation) | Initialize the crypto backend.
The backend can be one of two plugins:
- 'x509' - Uses x509 certificates.
- 'gpg' - Uses GnuPG keys. | Below is the the instruction that describes the task:
### Input:
Initialize the crypto backend.
The backend can be one of two plugins:
- 'x509' - Uses x509 certificates.
- 'gpg' - Uses GnuPG keys.
### Response:
def init(**config):
""" Initialize the crypto backend.
The backend can be one of two plugins:
- 'x509' - Uses x509 certificates.
- 'gpg' - Uses GnuPG keys.
"""
global _implementation
global _validate_implementations
if config.get('crypto_backend') == 'gpg':
_implementation = gpg
else:
_implementation = x509
_validate_implementations = []
for mod in config.get('crypto_validate_backends', []):
if mod == 'gpg':
_validate_implementations.append(gpg)
elif mod == 'x509':
_validate_implementations.append(x509)
else:
raise ValueError("%r is not a valid crypto backend" % mod)
if not _validate_implementations:
_validate_implementations.append(_implementation) |
def _config_win32_nameservers(self, nameservers):
"""Configure a NameServer registry entry."""
# we call str() on nameservers to convert it from unicode to ascii
nameservers = str(nameservers)
split_char = self._determine_split_char(nameservers)
ns_list = nameservers.split(split_char)
for ns in ns_list:
if not ns in self.nameservers:
self.nameservers.append(ns) | Configure a NameServer registry entry. | Below is the the instruction that describes the task:
### Input:
Configure a NameServer registry entry.
### Response:
def _config_win32_nameservers(self, nameservers):
"""Configure a NameServer registry entry."""
# we call str() on nameservers to convert it from unicode to ascii
nameservers = str(nameservers)
split_char = self._determine_split_char(nameservers)
ns_list = nameservers.split(split_char)
for ns in ns_list:
if not ns in self.nameservers:
self.nameservers.append(ns) |
def properties(obj, type=None, set=None):
'''
List properties for given btrfs object. The object can be path of BTRFS device,
mount point, or any directories/files inside the BTRFS filesystem.
General options:
* **type**: Possible types are s[ubvol], f[ilesystem], i[node] and d[evice].
* **force**: Force overwrite existing filesystem on the disk
* **set**: <key=value,key1=value1...> Options for a filesystem properties.
CLI Example:
.. code-block:: bash
salt '*' btrfs.properties /mountpoint
salt '*' btrfs.properties /dev/sda1 type=subvol set='ro=false,label="My Storage"'
'''
if type and type not in ['s', 'subvol', 'f', 'filesystem', 'i', 'inode', 'd', 'device']:
raise CommandExecutionError("Unknown property type: \"{0}\" specified".format(type))
cmd = ['btrfs']
cmd.append('property')
cmd.append(set and 'set' or 'list')
if type:
cmd.append('-t{0}'.format(type))
cmd.append(obj)
if set:
try:
for key, value in [[item.strip() for item in keyset.split("=")]
for keyset in set.split(",")]:
cmd.append(key)
cmd.append(value)
except Exception as ex:
raise CommandExecutionError(ex)
out = __salt__['cmd.run_all'](' '.join(cmd))
salt.utils.fsutils._verify_run(out)
if not set:
ret = {}
for prop, descr in six.iteritems(_parse_proplist(out['stdout'])):
ret[prop] = {'description': descr}
value = __salt__['cmd.run_all'](
"btrfs property get {0} {1}".format(obj, prop))['stdout']
ret[prop]['value'] = value and value.split("=")[-1] or "N/A"
return ret | List properties for given btrfs object. The object can be path of BTRFS device,
mount point, or any directories/files inside the BTRFS filesystem.
General options:
* **type**: Possible types are s[ubvol], f[ilesystem], i[node] and d[evice].
* **force**: Force overwrite existing filesystem on the disk
* **set**: <key=value,key1=value1...> Options for a filesystem properties.
CLI Example:
.. code-block:: bash
salt '*' btrfs.properties /mountpoint
salt '*' btrfs.properties /dev/sda1 type=subvol set='ro=false,label="My Storage"' | Below is the the instruction that describes the task:
### Input:
List properties for given btrfs object. The object can be path of BTRFS device,
mount point, or any directories/files inside the BTRFS filesystem.
General options:
* **type**: Possible types are s[ubvol], f[ilesystem], i[node] and d[evice].
* **force**: Force overwrite existing filesystem on the disk
* **set**: <key=value,key1=value1...> Options for a filesystem properties.
CLI Example:
.. code-block:: bash
salt '*' btrfs.properties /mountpoint
salt '*' btrfs.properties /dev/sda1 type=subvol set='ro=false,label="My Storage"'
### Response:
def properties(obj, type=None, set=None):
'''
List properties for given btrfs object. The object can be path of BTRFS device,
mount point, or any directories/files inside the BTRFS filesystem.
General options:
* **type**: Possible types are s[ubvol], f[ilesystem], i[node] and d[evice].
* **force**: Force overwrite existing filesystem on the disk
* **set**: <key=value,key1=value1...> Options for a filesystem properties.
CLI Example:
.. code-block:: bash
salt '*' btrfs.properties /mountpoint
salt '*' btrfs.properties /dev/sda1 type=subvol set='ro=false,label="My Storage"'
'''
if type and type not in ['s', 'subvol', 'f', 'filesystem', 'i', 'inode', 'd', 'device']:
raise CommandExecutionError("Unknown property type: \"{0}\" specified".format(type))
cmd = ['btrfs']
cmd.append('property')
cmd.append(set and 'set' or 'list')
if type:
cmd.append('-t{0}'.format(type))
cmd.append(obj)
if set:
try:
for key, value in [[item.strip() for item in keyset.split("=")]
for keyset in set.split(",")]:
cmd.append(key)
cmd.append(value)
except Exception as ex:
raise CommandExecutionError(ex)
out = __salt__['cmd.run_all'](' '.join(cmd))
salt.utils.fsutils._verify_run(out)
if not set:
ret = {}
for prop, descr in six.iteritems(_parse_proplist(out['stdout'])):
ret[prop] = {'description': descr}
value = __salt__['cmd.run_all'](
"btrfs property get {0} {1}".format(obj, prop))['stdout']
ret[prop]['value'] = value and value.split("=")[-1] or "N/A"
return ret |
def _get_agent_grounding(agent):
"""Convert an agent to the corresponding PyBEL DSL object (to be filled with variants later)."""
def _get_id(_agent, key):
_id = _agent.db_refs.get(key)
if isinstance(_id, list):
_id = _id[0]
return _id
hgnc_id = _get_id(agent, 'HGNC')
if hgnc_id:
hgnc_name = hgnc_client.get_hgnc_name(hgnc_id)
if not hgnc_name:
logger.warning('Agent %s with HGNC ID %s has no HGNC name.',
agent, hgnc_id)
return
return protein('HGNC', hgnc_name)
uniprot_id = _get_id(agent, 'UP')
if uniprot_id:
return protein('UP', uniprot_id)
fplx_id = _get_id(agent, 'FPLX')
if fplx_id:
return protein('FPLX', fplx_id)
pfam_id = _get_id(agent, 'PF')
if pfam_id:
return protein('PFAM', pfam_id)
ip_id = _get_id(agent, 'IP')
if ip_id:
return protein('IP', ip_id)
fa_id = _get_id(agent, 'FA')
if fa_id:
return protein('NXPFA', fa_id)
chebi_id = _get_id(agent, 'CHEBI')
if chebi_id:
if chebi_id.startswith('CHEBI:'):
chebi_id = chebi_id[len('CHEBI:'):]
return abundance('CHEBI', chebi_id)
pubchem_id = _get_id(agent, 'PUBCHEM')
if pubchem_id:
return abundance('PUBCHEM', pubchem_id)
go_id = _get_id(agent, 'GO')
if go_id:
return bioprocess('GO', go_id)
mesh_id = _get_id(agent, 'MESH')
if mesh_id:
return bioprocess('MESH', mesh_id)
return | Convert an agent to the corresponding PyBEL DSL object (to be filled with variants later). | Below is the the instruction that describes the task:
### Input:
Convert an agent to the corresponding PyBEL DSL object (to be filled with variants later).
### Response:
def _get_agent_grounding(agent):
"""Convert an agent to the corresponding PyBEL DSL object (to be filled with variants later)."""
def _get_id(_agent, key):
_id = _agent.db_refs.get(key)
if isinstance(_id, list):
_id = _id[0]
return _id
hgnc_id = _get_id(agent, 'HGNC')
if hgnc_id:
hgnc_name = hgnc_client.get_hgnc_name(hgnc_id)
if not hgnc_name:
logger.warning('Agent %s with HGNC ID %s has no HGNC name.',
agent, hgnc_id)
return
return protein('HGNC', hgnc_name)
uniprot_id = _get_id(agent, 'UP')
if uniprot_id:
return protein('UP', uniprot_id)
fplx_id = _get_id(agent, 'FPLX')
if fplx_id:
return protein('FPLX', fplx_id)
pfam_id = _get_id(agent, 'PF')
if pfam_id:
return protein('PFAM', pfam_id)
ip_id = _get_id(agent, 'IP')
if ip_id:
return protein('IP', ip_id)
fa_id = _get_id(agent, 'FA')
if fa_id:
return protein('NXPFA', fa_id)
chebi_id = _get_id(agent, 'CHEBI')
if chebi_id:
if chebi_id.startswith('CHEBI:'):
chebi_id = chebi_id[len('CHEBI:'):]
return abundance('CHEBI', chebi_id)
pubchem_id = _get_id(agent, 'PUBCHEM')
if pubchem_id:
return abundance('PUBCHEM', pubchem_id)
go_id = _get_id(agent, 'GO')
if go_id:
return bioprocess('GO', go_id)
mesh_id = _get_id(agent, 'MESH')
if mesh_id:
return bioprocess('MESH', mesh_id)
return |
def _Pcn_crp(x, dsz, Nv, dimN=2, dimC=1):
"""
Projection onto dictionary update constraint set: support
projection and normalisation. The result is cropped to the
support of the largest filter in the dictionary.
Parameters
----------
x : array_like
Input array
dsz : tuple
Filter support size(s), specified using the same format as the
`dsz` parameter of :func:`bcrop`
Nv : tuple
Sizes of problem spatial indices
dimN : int, optional (default 2)
Number of problem spatial indices
dimC : int, optional (default 1)
Number of problem channel indices
Returns
-------
y : ndarray
Projection of input onto constraint set
"""
return normalise(bcrop(x, dsz, dimN), dimN + dimC) | Projection onto dictionary update constraint set: support
projection and normalisation. The result is cropped to the
support of the largest filter in the dictionary.
Parameters
----------
x : array_like
Input array
dsz : tuple
Filter support size(s), specified using the same format as the
`dsz` parameter of :func:`bcrop`
Nv : tuple
Sizes of problem spatial indices
dimN : int, optional (default 2)
Number of problem spatial indices
dimC : int, optional (default 1)
Number of problem channel indices
Returns
-------
y : ndarray
Projection of input onto constraint set | Below is the the instruction that describes the task:
### Input:
Projection onto dictionary update constraint set: support
projection and normalisation. The result is cropped to the
support of the largest filter in the dictionary.
Parameters
----------
x : array_like
Input array
dsz : tuple
Filter support size(s), specified using the same format as the
`dsz` parameter of :func:`bcrop`
Nv : tuple
Sizes of problem spatial indices
dimN : int, optional (default 2)
Number of problem spatial indices
dimC : int, optional (default 1)
Number of problem channel indices
Returns
-------
y : ndarray
Projection of input onto constraint set
### Response:
def _Pcn_crp(x, dsz, Nv, dimN=2, dimC=1):
"""
Projection onto dictionary update constraint set: support
projection and normalisation. The result is cropped to the
support of the largest filter in the dictionary.
Parameters
----------
x : array_like
Input array
dsz : tuple
Filter support size(s), specified using the same format as the
`dsz` parameter of :func:`bcrop`
Nv : tuple
Sizes of problem spatial indices
dimN : int, optional (default 2)
Number of problem spatial indices
dimC : int, optional (default 1)
Number of problem channel indices
Returns
-------
y : ndarray
Projection of input onto constraint set
"""
return normalise(bcrop(x, dsz, dimN), dimN + dimC) |
def record(until='escape', suppress=False, trigger_on_release=False):
"""
Records all keyboard events from all keyboards until the user presses the
given hotkey. Then returns the list of events recorded, of type
`keyboard.KeyboardEvent`. Pairs well with
`play(events)`.
Note: this is a blocking function.
Note: for more details on the keyboard hook and events see `hook`.
"""
start_recording()
wait(until, suppress=suppress, trigger_on_release=trigger_on_release)
return stop_recording() | Records all keyboard events from all keyboards until the user presses the
given hotkey. Then returns the list of events recorded, of type
`keyboard.KeyboardEvent`. Pairs well with
`play(events)`.
Note: this is a blocking function.
Note: for more details on the keyboard hook and events see `hook`. | Below is the the instruction that describes the task:
### Input:
Records all keyboard events from all keyboards until the user presses the
given hotkey. Then returns the list of events recorded, of type
`keyboard.KeyboardEvent`. Pairs well with
`play(events)`.
Note: this is a blocking function.
Note: for more details on the keyboard hook and events see `hook`.
### Response:
def record(until='escape', suppress=False, trigger_on_release=False):
"""
Records all keyboard events from all keyboards until the user presses the
given hotkey. Then returns the list of events recorded, of type
`keyboard.KeyboardEvent`. Pairs well with
`play(events)`.
Note: this is a blocking function.
Note: for more details on the keyboard hook and events see `hook`.
"""
start_recording()
wait(until, suppress=suppress, trigger_on_release=trigger_on_release)
return stop_recording() |
def days(start, end=None):
"""Iterate over the days between the given datetime_tzs.
Args:
start: datetime_tz to start from.
end: (Optional) Date to end at, if not given the iterator will never
terminate.
Returns:
An iterator which generates datetime_tz objects a day apart.
"""
return iterate.between(start, datetime.timedelta(days=1), end) | Iterate over the days between the given datetime_tzs.
Args:
start: datetime_tz to start from.
end: (Optional) Date to end at, if not given the iterator will never
terminate.
Returns:
An iterator which generates datetime_tz objects a day apart. | Below is the the instruction that describes the task:
### Input:
Iterate over the days between the given datetime_tzs.
Args:
start: datetime_tz to start from.
end: (Optional) Date to end at, if not given the iterator will never
terminate.
Returns:
An iterator which generates datetime_tz objects a day apart.
### Response:
def days(start, end=None):
"""Iterate over the days between the given datetime_tzs.
Args:
start: datetime_tz to start from.
end: (Optional) Date to end at, if not given the iterator will never
terminate.
Returns:
An iterator which generates datetime_tz objects a day apart.
"""
return iterate.between(start, datetime.timedelta(days=1), end) |
def deactivate(self):
""" deactivate the environment """
try:
self.phase = PHASE.DEACTIVATE
self.logger.info("Deactivating environment %s..." % self.namespace)
self.directory.rewrite_config = False
self.instantiate_features()
self._specialize()
for feature in self.features.run_order:
self.logger.info("Deactivating %s..." % feature[0])
self.run_action(feature, 'deactivate')
self.clear_all()
self._finalize()
except Exception:
self.logger.debug("", exc_info=sys.exc_info())
et, ei, tb = sys.exc_info()
reraise(et, ei, tb) | deactivate the environment | Below is the the instruction that describes the task:
### Input:
deactivate the environment
### Response:
def deactivate(self):
""" deactivate the environment """
try:
self.phase = PHASE.DEACTIVATE
self.logger.info("Deactivating environment %s..." % self.namespace)
self.directory.rewrite_config = False
self.instantiate_features()
self._specialize()
for feature in self.features.run_order:
self.logger.info("Deactivating %s..." % feature[0])
self.run_action(feature, 'deactivate')
self.clear_all()
self._finalize()
except Exception:
self.logger.debug("", exc_info=sys.exc_info())
et, ei, tb = sys.exc_info()
reraise(et, ei, tb) |
def getsdm(*args, **kwargs):
""" Wrap sdmpy.SDM to get around schema change error """
try:
sdm = sdmpy.SDM(*args, **kwargs)
except XMLSyntaxError:
kwargs['use_xsd'] = False
sdm = sdmpy.SDM(*args, **kwargs)
return sdm | Wrap sdmpy.SDM to get around schema change error | Below is the the instruction that describes the task:
### Input:
Wrap sdmpy.SDM to get around schema change error
### Response:
def getsdm(*args, **kwargs):
""" Wrap sdmpy.SDM to get around schema change error """
try:
sdm = sdmpy.SDM(*args, **kwargs)
except XMLSyntaxError:
kwargs['use_xsd'] = False
sdm = sdmpy.SDM(*args, **kwargs)
return sdm |
def reload(self):
"""
Reload the configuration from the file. This is in its own function
so that it can be called at any time by another class.
"""
self._conf = configparser.ConfigParser()
# Preserve the case of sections and keys.
self._conf.optionxform = str
self._conf.read(self.config_file_path)
if 'general' not in self._conf.keys():
raise IncompleteConfigurationFile('Missing the general section')
general = self._replace_data_types(dict(self._conf.items('general')))
self._conf.remove_section('general')
plugin = []
for section in self._conf.sections():
plugin.append(dict(self._conf.items(section)))
plugin[-1].update({'name': section})
plugin[-1] = self._replace_data_types(plugin[-1])
return (plugin, general) | Reload the configuration from the file. This is in its own function
so that it can be called at any time by another class. | Below is the the instruction that describes the task:
### Input:
Reload the configuration from the file. This is in its own function
so that it can be called at any time by another class.
### Response:
def reload(self):
"""
Reload the configuration from the file. This is in its own function
so that it can be called at any time by another class.
"""
self._conf = configparser.ConfigParser()
# Preserve the case of sections and keys.
self._conf.optionxform = str
self._conf.read(self.config_file_path)
if 'general' not in self._conf.keys():
raise IncompleteConfigurationFile('Missing the general section')
general = self._replace_data_types(dict(self._conf.items('general')))
self._conf.remove_section('general')
plugin = []
for section in self._conf.sections():
plugin.append(dict(self._conf.items(section)))
plugin[-1].update({'name': section})
plugin[-1] = self._replace_data_types(plugin[-1])
return (plugin, general) |
def param_fetch_one(self, name):
'''initiate fetch of one parameter'''
try:
idx = int(name)
self.mav.param_request_read_send(self.target_system, self.target_component, "", idx)
except Exception:
self.mav.param_request_read_send(self.target_system, self.target_component, name, -1) | initiate fetch of one parameter | Below is the the instruction that describes the task:
### Input:
initiate fetch of one parameter
### Response:
def param_fetch_one(self, name):
'''initiate fetch of one parameter'''
try:
idx = int(name)
self.mav.param_request_read_send(self.target_system, self.target_component, "", idx)
except Exception:
self.mav.param_request_read_send(self.target_system, self.target_component, name, -1) |
def get_file_contents(source_path: str) -> str:
"""
Loads the contents of the source into a string for execution using multiple
loading methods to handle cross-platform encoding edge cases. If none of
the load methods work, a string is returned that contains an error function
response that will be displayed when the step is run alert the user to the
error.
:param source_path:
Path of the step file to load.
"""
open_funcs = [
functools.partial(codecs.open, source_path, encoding='utf-8'),
functools.partial(open, source_path, 'r')
]
for open_func in open_funcs:
try:
with open_func() as f:
return f.read()
except Exception:
pass
return (
'raise IOError("Unable to load step file at: {}")'
.format(source_path)
) | Loads the contents of the source into a string for execution using multiple
loading methods to handle cross-platform encoding edge cases. If none of
the load methods work, a string is returned that contains an error function
response that will be displayed when the step is run alert the user to the
error.
:param source_path:
Path of the step file to load. | Below is the the instruction that describes the task:
### Input:
Loads the contents of the source into a string for execution using multiple
loading methods to handle cross-platform encoding edge cases. If none of
the load methods work, a string is returned that contains an error function
response that will be displayed when the step is run alert the user to the
error.
:param source_path:
Path of the step file to load.
### Response:
def get_file_contents(source_path: str) -> str:
"""
Loads the contents of the source into a string for execution using multiple
loading methods to handle cross-platform encoding edge cases. If none of
the load methods work, a string is returned that contains an error function
response that will be displayed when the step is run alert the user to the
error.
:param source_path:
Path of the step file to load.
"""
open_funcs = [
functools.partial(codecs.open, source_path, encoding='utf-8'),
functools.partial(open, source_path, 'r')
]
for open_func in open_funcs:
try:
with open_func() as f:
return f.read()
except Exception:
pass
return (
'raise IOError("Unable to load step file at: {}")'
.format(source_path)
) |
def namedb_get_account_tokens(cur, address):
"""
Get an account's tokens
Returns the list of tokens on success
Returns None if not found
"""
sql = 'SELECT DISTINCT type FROM accounts WHERE address = ?;'
args = (address,)
rows = namedb_query_execute(cur, sql, args)
ret = []
for row in rows:
ret.append(row['type'])
return ret | Get an account's tokens
Returns the list of tokens on success
Returns None if not found | Below is the the instruction that describes the task:
### Input:
Get an account's tokens
Returns the list of tokens on success
Returns None if not found
### Response:
def namedb_get_account_tokens(cur, address):
"""
Get an account's tokens
Returns the list of tokens on success
Returns None if not found
"""
sql = 'SELECT DISTINCT type FROM accounts WHERE address = ?;'
args = (address,)
rows = namedb_query_execute(cur, sql, args)
ret = []
for row in rows:
ret.append(row['type'])
return ret |
def _backtrace(elt, dom):
'''Return a "backtrace" from the given element to the DOM root,
in XPath syntax.
'''
s = ''
while elt != dom:
name, parent = elt.nodeName, elt.parentNode
if parent is None: break
matches = [ c for c in _child_elements(parent)
if c.nodeName == name ]
if len(matches) == 1:
s = '/' + name + s
else:
i = matches.index(elt) + 1
s = ('/%s[%d]' % (name, i)) + s
elt = parent
return s | Return a "backtrace" from the given element to the DOM root,
in XPath syntax. | Below is the the instruction that describes the task:
### Input:
Return a "backtrace" from the given element to the DOM root,
in XPath syntax.
### Response:
def _backtrace(elt, dom):
'''Return a "backtrace" from the given element to the DOM root,
in XPath syntax.
'''
s = ''
while elt != dom:
name, parent = elt.nodeName, elt.parentNode
if parent is None: break
matches = [ c for c in _child_elements(parent)
if c.nodeName == name ]
if len(matches) == 1:
s = '/' + name + s
else:
i = matches.index(elt) + 1
s = ('/%s[%d]' % (name, i)) + s
elt = parent
return s |
def write_k_record(self, *args):
"""
Write a K record::
writer.write_k_record_extensions([
('FXA', 3), ('SIU', 2), ('ENL', 3),
])
writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2])
# -> J030810FXA1112SIU1315ENL
# -> K02030402313002
:param time: UTC time of the k record (default:
:meth:`~datetime.datetime.utcnow`)
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_k_record_extensions`
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
extensions = args[0]
time = None
else:
time, extensions = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
if not (isinstance(extensions, list) and
isinstance(self.k_record_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.k_record_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.k_record_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('K', record) | Write a K record::
writer.write_k_record_extensions([
('FXA', 3), ('SIU', 2), ('ENL', 3),
])
writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2])
# -> J030810FXA1112SIU1315ENL
# -> K02030402313002
:param time: UTC time of the k record (default:
:meth:`~datetime.datetime.utcnow`)
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_k_record_extensions` | Below is the the instruction that describes the task:
### Input:
Write a K record::
writer.write_k_record_extensions([
('FXA', 3), ('SIU', 2), ('ENL', 3),
])
writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2])
# -> J030810FXA1112SIU1315ENL
# -> K02030402313002
:param time: UTC time of the k record (default:
:meth:`~datetime.datetime.utcnow`)
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_k_record_extensions`
### Response:
def write_k_record(self, *args):
"""
Write a K record::
writer.write_k_record_extensions([
('FXA', 3), ('SIU', 2), ('ENL', 3),
])
writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2])
# -> J030810FXA1112SIU1315ENL
# -> K02030402313002
:param time: UTC time of the k record (default:
:meth:`~datetime.datetime.utcnow`)
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_k_record_extensions`
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
extensions = args[0]
time = None
else:
time, extensions = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
if not (isinstance(extensions, list) and
isinstance(self.k_record_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.k_record_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.k_record_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('K', record) |
def mapper(module, entry_point,
modpath='pkg_resources', globber='root', modname='es6',
fext=JS_EXT, registry=_utils):
"""
General mapper
Loads components from the micro registry.
"""
modname_f = modname if callable(modname) else _utils['modname'][modname]
return {
modname_f(modname_fragments): join(base, subpath)
for modname_fragments, base, subpath in modgen(
module, entry_point=entry_point,
modpath=modpath, globber=globber,
fext=fext, registry=_utils)
} | General mapper
Loads components from the micro registry. | Below is the the instruction that describes the task:
### Input:
General mapper
Loads components from the micro registry.
### Response:
def mapper(module, entry_point,
modpath='pkg_resources', globber='root', modname='es6',
fext=JS_EXT, registry=_utils):
"""
General mapper
Loads components from the micro registry.
"""
modname_f = modname if callable(modname) else _utils['modname'][modname]
return {
modname_f(modname_fragments): join(base, subpath)
for modname_fragments, base, subpath in modgen(
module, entry_point=entry_point,
modpath=modpath, globber=globber,
fext=fext, registry=_utils)
} |
def set_secret_key(token):
"""
Initializes a Authentication and sets it as the new default global authentication.
It also performs some checks before saving the authentication.
:Example
>>> # Expected format for secret key:
>>> import payplug
>>> payplug.set_secret_key('sk_test_somerandomcharacters')
:param token: your secret token (live or sandbox)
:type token: string
"""
if not isinstance(token, string_types):
raise exceptions.ConfigurationError('Expected string value for token.')
config.secret_key = token | Initializes a Authentication and sets it as the new default global authentication.
It also performs some checks before saving the authentication.
:Example
>>> # Expected format for secret key:
>>> import payplug
>>> payplug.set_secret_key('sk_test_somerandomcharacters')
:param token: your secret token (live or sandbox)
:type token: string | Below is the the instruction that describes the task:
### Input:
Initializes a Authentication and sets it as the new default global authentication.
It also performs some checks before saving the authentication.
:Example
>>> # Expected format for secret key:
>>> import payplug
>>> payplug.set_secret_key('sk_test_somerandomcharacters')
:param token: your secret token (live or sandbox)
:type token: string
### Response:
def set_secret_key(token):
"""
Initializes a Authentication and sets it as the new default global authentication.
It also performs some checks before saving the authentication.
:Example
>>> # Expected format for secret key:
>>> import payplug
>>> payplug.set_secret_key('sk_test_somerandomcharacters')
:param token: your secret token (live or sandbox)
:type token: string
"""
if not isinstance(token, string_types):
raise exceptions.ConfigurationError('Expected string value for token.')
config.secret_key = token |
def valuecount(table, field, value, missing=None):
"""
Count the number of occurrences of `value` under the given field. Returns
the absolute count and relative frequency as a pair. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar'],
... ['a', 1],
... ['b', 2],
... ['b', 7]]
>>> etl.valuecount(table, 'foo', 'b')
(2, 0.6666666666666666)
The `field` argument can be a single field name or index (starting from
zero) or a tuple of field names and/or indexes.
"""
total = 0
vs = 0
for v in values(table, field, missing=missing):
total += 1
if v == value:
vs += 1
return vs, float(vs)/total | Count the number of occurrences of `value` under the given field. Returns
the absolute count and relative frequency as a pair. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar'],
... ['a', 1],
... ['b', 2],
... ['b', 7]]
>>> etl.valuecount(table, 'foo', 'b')
(2, 0.6666666666666666)
The `field` argument can be a single field name or index (starting from
zero) or a tuple of field names and/or indexes. | Below is the the instruction that describes the task:
### Input:
Count the number of occurrences of `value` under the given field. Returns
the absolute count and relative frequency as a pair. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar'],
... ['a', 1],
... ['b', 2],
... ['b', 7]]
>>> etl.valuecount(table, 'foo', 'b')
(2, 0.6666666666666666)
The `field` argument can be a single field name or index (starting from
zero) or a tuple of field names and/or indexes.
### Response:
def valuecount(table, field, value, missing=None):
"""
Count the number of occurrences of `value` under the given field. Returns
the absolute count and relative frequency as a pair. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar'],
... ['a', 1],
... ['b', 2],
... ['b', 7]]
>>> etl.valuecount(table, 'foo', 'b')
(2, 0.6666666666666666)
The `field` argument can be a single field name or index (starting from
zero) or a tuple of field names and/or indexes.
"""
total = 0
vs = 0
for v in values(table, field, missing=missing):
total += 1
if v == value:
vs += 1
return vs, float(vs)/total |
def _calc_mask(self):
"""Computes a boolean mask from the user defined constraints."""
mask = []
for row in self._constraints:
mask.append(tuple(x is None for x in row))
return tuple(mask) | Computes a boolean mask from the user defined constraints. | Below is the the instruction that describes the task:
### Input:
Computes a boolean mask from the user defined constraints.
### Response:
def _calc_mask(self):
"""Computes a boolean mask from the user defined constraints."""
mask = []
for row in self._constraints:
mask.append(tuple(x is None for x in row))
return tuple(mask) |
def _background_gradient(s, cmap='PuBu', low=0, high=0,
text_color_threshold=0.408):
"""
Color background in a range according to the data.
"""
if (not isinstance(text_color_threshold, (float, int)) or
not 0 <= text_color_threshold <= 1):
msg = "`text_color_threshold` must be a value from 0 to 1."
raise ValueError(msg)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = s.values.min()
smax = s.values.max()
rng = smax - smin
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
# matplotlib colors.Normalize modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
rgbas = plt.cm.get_cmap(cmap)(norm(s.values))
def relative_luminance(rgba):
"""
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
r, g, b = (
x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba):
dark = relative_luminance(rgba) < text_color_threshold
text_color = '#f1f1f1' if dark else '#000000'
return 'background-color: {b};color: {c};'.format(
b=colors.rgb2hex(rgba), c=text_color
)
if s.ndim == 1:
return [css(rgba) for rgba in rgbas]
else:
return pd.DataFrame(
[[css(rgba) for rgba in row] for row in rgbas],
index=s.index, columns=s.columns
) | Color background in a range according to the data. | Below is the the instruction that describes the task:
### Input:
Color background in a range according to the data.
### Response:
def _background_gradient(s, cmap='PuBu', low=0, high=0,
text_color_threshold=0.408):
"""
Color background in a range according to the data.
"""
if (not isinstance(text_color_threshold, (float, int)) or
not 0 <= text_color_threshold <= 1):
msg = "`text_color_threshold` must be a value from 0 to 1."
raise ValueError(msg)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = s.values.min()
smax = s.values.max()
rng = smax - smin
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
# matplotlib colors.Normalize modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
rgbas = plt.cm.get_cmap(cmap)(norm(s.values))
def relative_luminance(rgba):
"""
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
r, g, b = (
x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba):
dark = relative_luminance(rgba) < text_color_threshold
text_color = '#f1f1f1' if dark else '#000000'
return 'background-color: {b};color: {c};'.format(
b=colors.rgb2hex(rgba), c=text_color
)
if s.ndim == 1:
return [css(rgba) for rgba in rgbas]
else:
return pd.DataFrame(
[[css(rgba) for rgba in row] for row in rgbas],
index=s.index, columns=s.columns
) |
def js_query(self, query: str) -> Awaitable:
"""Send query to related DOM on browser.
:param str query: single string which indicates query type.
"""
if self.connected:
self.js_exec(query, self.__reqid)
fut = Future() # type: Future[str]
self.__tasks[self.__reqid] = fut
self.__reqid += 1
return fut
f = Future() # type: Future[None]
f.set_result(None)
return f | Send query to related DOM on browser.
:param str query: single string which indicates query type. | Below is the the instruction that describes the task:
### Input:
Send query to related DOM on browser.
:param str query: single string which indicates query type.
### Response:
def js_query(self, query: str) -> Awaitable:
"""Send query to related DOM on browser.
:param str query: single string which indicates query type.
"""
if self.connected:
self.js_exec(query, self.__reqid)
fut = Future() # type: Future[str]
self.__tasks[self.__reqid] = fut
self.__reqid += 1
return fut
f = Future() # type: Future[None]
f.set_result(None)
return f |
def _ini_format(stream, options):
"""format options using the INI format"""
for optname, optdict, value in options:
value = _format_option_value(optdict, value)
help_opt = optdict.get("help")
if help_opt:
help_opt = normalize_text(help_opt, line_len=79, indent="# ")
print(file=stream)
print(help_opt, file=stream)
else:
print(file=stream)
if value is None:
print("#%s=" % optname, file=stream)
else:
value = str(value).strip()
if re.match(r"^([\w-]+,)+[\w-]+$", str(value)):
separator = "\n " + " " * len(optname)
value = separator.join(x + "," for x in str(value).split(","))
# remove trailing ',' from last element of the list
value = value[:-1]
print("%s=%s" % (optname, value), file=stream) | format options using the INI format | Below is the the instruction that describes the task:
### Input:
format options using the INI format
### Response:
def _ini_format(stream, options):
"""format options using the INI format"""
for optname, optdict, value in options:
value = _format_option_value(optdict, value)
help_opt = optdict.get("help")
if help_opt:
help_opt = normalize_text(help_opt, line_len=79, indent="# ")
print(file=stream)
print(help_opt, file=stream)
else:
print(file=stream)
if value is None:
print("#%s=" % optname, file=stream)
else:
value = str(value).strip()
if re.match(r"^([\w-]+,)+[\w-]+$", str(value)):
separator = "\n " + " " * len(optname)
value = separator.join(x + "," for x in str(value).split(","))
# remove trailing ',' from last element of the list
value = value[:-1]
print("%s=%s" % (optname, value), file=stream) |
def sanitize(vpc_config):
"""
Checks that an instance of VpcConfig has the expected keys and values, removes unexpected keys,
and raises ValueErrors if any expectations are violated
Args:
vpc_config (dict): a VpcConfig dict containing 'Subnets' and 'SecurityGroupIds'
Returns:
A valid VpcConfig dict containing only 'Subnets' and 'SecurityGroupIds' from the vpc_config parameter
If vpc_config parameter is None, returns None
Raises:
ValueError if any expectations are violated:
* vpc_config must be a non-empty dict
* vpc_config must have key `Subnets` and the value must be a non-empty list
* vpc_config must have key `SecurityGroupIds` and the value must be a non-empty list
"""
if vpc_config is None:
return vpc_config
elif type(vpc_config) is not dict:
raise ValueError('vpc_config is not a dict: {}'.format(vpc_config))
elif not vpc_config:
raise ValueError('vpc_config is empty')
subnets = vpc_config.get(SUBNETS_KEY)
if subnets is None:
raise ValueError('vpc_config is missing key: {}'.format(SUBNETS_KEY))
if type(subnets) is not list:
raise ValueError('vpc_config value for {} is not a list: {}'.format(SUBNETS_KEY, subnets))
elif not subnets:
raise ValueError('vpc_config value for {} is empty'.format(SUBNETS_KEY))
security_group_ids = vpc_config.get(SECURITY_GROUP_IDS_KEY)
if security_group_ids is None:
raise ValueError('vpc_config is missing key: {}'.format(SECURITY_GROUP_IDS_KEY))
if type(security_group_ids) is not list:
raise ValueError('vpc_config value for {} is not a list: {}'.format(SECURITY_GROUP_IDS_KEY, security_group_ids))
elif not security_group_ids:
raise ValueError('vpc_config value for {} is empty'.format(SECURITY_GROUP_IDS_KEY))
return to_dict(subnets, security_group_ids) | Checks that an instance of VpcConfig has the expected keys and values, removes unexpected keys,
and raises ValueErrors if any expectations are violated
Args:
vpc_config (dict): a VpcConfig dict containing 'Subnets' and 'SecurityGroupIds'
Returns:
A valid VpcConfig dict containing only 'Subnets' and 'SecurityGroupIds' from the vpc_config parameter
If vpc_config parameter is None, returns None
Raises:
ValueError if any expectations are violated:
* vpc_config must be a non-empty dict
* vpc_config must have key `Subnets` and the value must be a non-empty list
* vpc_config must have key `SecurityGroupIds` and the value must be a non-empty list | Below is the the instruction that describes the task:
### Input:
Checks that an instance of VpcConfig has the expected keys and values, removes unexpected keys,
and raises ValueErrors if any expectations are violated
Args:
vpc_config (dict): a VpcConfig dict containing 'Subnets' and 'SecurityGroupIds'
Returns:
A valid VpcConfig dict containing only 'Subnets' and 'SecurityGroupIds' from the vpc_config parameter
If vpc_config parameter is None, returns None
Raises:
ValueError if any expectations are violated:
* vpc_config must be a non-empty dict
* vpc_config must have key `Subnets` and the value must be a non-empty list
* vpc_config must have key `SecurityGroupIds` and the value must be a non-empty list
### Response:
def sanitize(vpc_config):
"""
Checks that an instance of VpcConfig has the expected keys and values, removes unexpected keys,
and raises ValueErrors if any expectations are violated
Args:
vpc_config (dict): a VpcConfig dict containing 'Subnets' and 'SecurityGroupIds'
Returns:
A valid VpcConfig dict containing only 'Subnets' and 'SecurityGroupIds' from the vpc_config parameter
If vpc_config parameter is None, returns None
Raises:
ValueError if any expectations are violated:
* vpc_config must be a non-empty dict
* vpc_config must have key `Subnets` and the value must be a non-empty list
* vpc_config must have key `SecurityGroupIds` and the value must be a non-empty list
"""
if vpc_config is None:
return vpc_config
elif type(vpc_config) is not dict:
raise ValueError('vpc_config is not a dict: {}'.format(vpc_config))
elif not vpc_config:
raise ValueError('vpc_config is empty')
subnets = vpc_config.get(SUBNETS_KEY)
if subnets is None:
raise ValueError('vpc_config is missing key: {}'.format(SUBNETS_KEY))
if type(subnets) is not list:
raise ValueError('vpc_config value for {} is not a list: {}'.format(SUBNETS_KEY, subnets))
elif not subnets:
raise ValueError('vpc_config value for {} is empty'.format(SUBNETS_KEY))
security_group_ids = vpc_config.get(SECURITY_GROUP_IDS_KEY)
if security_group_ids is None:
raise ValueError('vpc_config is missing key: {}'.format(SECURITY_GROUP_IDS_KEY))
if type(security_group_ids) is not list:
raise ValueError('vpc_config value for {} is not a list: {}'.format(SECURITY_GROUP_IDS_KEY, security_group_ids))
elif not security_group_ids:
raise ValueError('vpc_config value for {} is empty'.format(SECURITY_GROUP_IDS_KEY))
return to_dict(subnets, security_group_ids) |
def encode(
self, word, max_length=4, var='American', reverse=False, zero_pad=True
):
"""Return the Soundex code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
var : str
The variant of the algorithm to employ (defaults to ``American``):
- ``American`` follows the American Soundex algorithm, as
described at :cite:`US:2007` and in :cite:`Knuth:1998`; this
is also called Miracode
- ``special`` follows the rules from the 1880-1910 US Census
retrospective re-analysis, in which h & w are not treated as
blocking consonants but as vowels. Cf. :cite:`Repici:2013`.
- ``Census`` follows the rules laid out in GIL 55
:cite:`US:1997` by the US Census, including coding prefixed
and unprefixed versions of some names
reverse : bool
Reverse the word before computing the selected Soundex (defaults to
False); This results in "Reverse Soundex", which is useful for
blocking in cases where the initial elements may be in error.
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length
string
Returns
-------
str
The Soundex value
Examples
--------
>>> pe = Soundex()
>>> pe.encode("Christopher")
'C623'
>>> pe.encode("Niall")
'N400'
>>> pe.encode('Smith')
'S530'
>>> pe.encode('Schmidt')
'S530'
>>> pe.encode('Christopher', max_length=-1)
'C623160000000000000000000000000000000000000000000000000000000000'
>>> pe.encode('Christopher', max_length=-1, zero_pad=False)
'C62316'
>>> pe.encode('Christopher', reverse=True)
'R132'
>>> pe.encode('Ashcroft')
'A261'
>>> pe.encode('Asicroft')
'A226'
>>> pe.encode('Ashcroft', var='special')
'A226'
>>> pe.encode('Asicroft', var='special')
'A226'
"""
# Require a max_length of at least 4 and not more than 64
if max_length != -1:
max_length = min(max(4, max_length), 64)
else:
max_length = 64
# uppercase, normalize, decompose, and filter non-A-Z out
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
if var == 'Census':
if word[:3] in {'VAN', 'CON'} and len(word) > 4:
return (
soundex(word, max_length, 'American', reverse, zero_pad),
soundex(
word[3:], max_length, 'American', reverse, zero_pad
),
)
if word[:2] in {'DE', 'DI', 'LA', 'LE'} and len(word) > 3:
return (
soundex(word, max_length, 'American', reverse, zero_pad),
soundex(
word[2:], max_length, 'American', reverse, zero_pad
),
)
# Otherwise, proceed as usual (var='American' mode, ostensibly)
word = ''.join(c for c in word if c in self._uc_set)
# Nothing to convert, return base case
if not word:
if zero_pad:
return '0' * max_length
return '0'
# Reverse word if computing Reverse Soundex
if reverse:
word = word[::-1]
# apply the Soundex algorithm
sdx = word.translate(self._trans)
if var == 'special':
sdx = sdx.replace('9', '0') # special rule for 1880-1910 census
else:
sdx = sdx.replace('9', '') # rule 1
sdx = self._delete_consecutive_repeats(sdx) # rule 3
if word[0] in 'HW':
sdx = word[0] + sdx
else:
sdx = word[0] + sdx[1:]
sdx = sdx.replace('0', '') # rule 1
if zero_pad:
sdx += '0' * max_length # rule 4
return sdx[:max_length] | Return the Soundex code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
var : str
The variant of the algorithm to employ (defaults to ``American``):
- ``American`` follows the American Soundex algorithm, as
described at :cite:`US:2007` and in :cite:`Knuth:1998`; this
is also called Miracode
- ``special`` follows the rules from the 1880-1910 US Census
retrospective re-analysis, in which h & w are not treated as
blocking consonants but as vowels. Cf. :cite:`Repici:2013`.
- ``Census`` follows the rules laid out in GIL 55
:cite:`US:1997` by the US Census, including coding prefixed
and unprefixed versions of some names
reverse : bool
Reverse the word before computing the selected Soundex (defaults to
False); This results in "Reverse Soundex", which is useful for
blocking in cases where the initial elements may be in error.
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length
string
Returns
-------
str
The Soundex value
Examples
--------
>>> pe = Soundex()
>>> pe.encode("Christopher")
'C623'
>>> pe.encode("Niall")
'N400'
>>> pe.encode('Smith')
'S530'
>>> pe.encode('Schmidt')
'S530'
>>> pe.encode('Christopher', max_length=-1)
'C623160000000000000000000000000000000000000000000000000000000000'
>>> pe.encode('Christopher', max_length=-1, zero_pad=False)
'C62316'
>>> pe.encode('Christopher', reverse=True)
'R132'
>>> pe.encode('Ashcroft')
'A261'
>>> pe.encode('Asicroft')
'A226'
>>> pe.encode('Ashcroft', var='special')
'A226'
>>> pe.encode('Asicroft', var='special')
'A226' | Below is the the instruction that describes the task:
### Input:
Return the Soundex code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
var : str
The variant of the algorithm to employ (defaults to ``American``):
- ``American`` follows the American Soundex algorithm, as
described at :cite:`US:2007` and in :cite:`Knuth:1998`; this
is also called Miracode
- ``special`` follows the rules from the 1880-1910 US Census
retrospective re-analysis, in which h & w are not treated as
blocking consonants but as vowels. Cf. :cite:`Repici:2013`.
- ``Census`` follows the rules laid out in GIL 55
:cite:`US:1997` by the US Census, including coding prefixed
and unprefixed versions of some names
reverse : bool
Reverse the word before computing the selected Soundex (defaults to
False); This results in "Reverse Soundex", which is useful for
blocking in cases where the initial elements may be in error.
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length
string
Returns
-------
str
The Soundex value
Examples
--------
>>> pe = Soundex()
>>> pe.encode("Christopher")
'C623'
>>> pe.encode("Niall")
'N400'
>>> pe.encode('Smith')
'S530'
>>> pe.encode('Schmidt')
'S530'
>>> pe.encode('Christopher', max_length=-1)
'C623160000000000000000000000000000000000000000000000000000000000'
>>> pe.encode('Christopher', max_length=-1, zero_pad=False)
'C62316'
>>> pe.encode('Christopher', reverse=True)
'R132'
>>> pe.encode('Ashcroft')
'A261'
>>> pe.encode('Asicroft')
'A226'
>>> pe.encode('Ashcroft', var='special')
'A226'
>>> pe.encode('Asicroft', var='special')
'A226'
### Response:
def encode(
self, word, max_length=4, var='American', reverse=False, zero_pad=True
):
"""Return the Soundex code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
var : str
The variant of the algorithm to employ (defaults to ``American``):
- ``American`` follows the American Soundex algorithm, as
described at :cite:`US:2007` and in :cite:`Knuth:1998`; this
is also called Miracode
- ``special`` follows the rules from the 1880-1910 US Census
retrospective re-analysis, in which h & w are not treated as
blocking consonants but as vowels. Cf. :cite:`Repici:2013`.
- ``Census`` follows the rules laid out in GIL 55
:cite:`US:1997` by the US Census, including coding prefixed
and unprefixed versions of some names
reverse : bool
Reverse the word before computing the selected Soundex (defaults to
False); This results in "Reverse Soundex", which is useful for
blocking in cases where the initial elements may be in error.
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length
string
Returns
-------
str
The Soundex value
Examples
--------
>>> pe = Soundex()
>>> pe.encode("Christopher")
'C623'
>>> pe.encode("Niall")
'N400'
>>> pe.encode('Smith')
'S530'
>>> pe.encode('Schmidt')
'S530'
>>> pe.encode('Christopher', max_length=-1)
'C623160000000000000000000000000000000000000000000000000000000000'
>>> pe.encode('Christopher', max_length=-1, zero_pad=False)
'C62316'
>>> pe.encode('Christopher', reverse=True)
'R132'
>>> pe.encode('Ashcroft')
'A261'
>>> pe.encode('Asicroft')
'A226'
>>> pe.encode('Ashcroft', var='special')
'A226'
>>> pe.encode('Asicroft', var='special')
'A226'
"""
# Require a max_length of at least 4 and not more than 64
if max_length != -1:
max_length = min(max(4, max_length), 64)
else:
max_length = 64
# uppercase, normalize, decompose, and filter non-A-Z out
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
if var == 'Census':
if word[:3] in {'VAN', 'CON'} and len(word) > 4:
return (
soundex(word, max_length, 'American', reverse, zero_pad),
soundex(
word[3:], max_length, 'American', reverse, zero_pad
),
)
if word[:2] in {'DE', 'DI', 'LA', 'LE'} and len(word) > 3:
return (
soundex(word, max_length, 'American', reverse, zero_pad),
soundex(
word[2:], max_length, 'American', reverse, zero_pad
),
)
# Otherwise, proceed as usual (var='American' mode, ostensibly)
word = ''.join(c for c in word if c in self._uc_set)
# Nothing to convert, return base case
if not word:
if zero_pad:
return '0' * max_length
return '0'
# Reverse word if computing Reverse Soundex
if reverse:
word = word[::-1]
# apply the Soundex algorithm
sdx = word.translate(self._trans)
if var == 'special':
sdx = sdx.replace('9', '0') # special rule for 1880-1910 census
else:
sdx = sdx.replace('9', '') # rule 1
sdx = self._delete_consecutive_repeats(sdx) # rule 3
if word[0] in 'HW':
sdx = word[0] + sdx
else:
sdx = word[0] + sdx[1:]
sdx = sdx.replace('0', '') # rule 1
if zero_pad:
sdx += '0' * max_length # rule 4
return sdx[:max_length] |
def make_keys_safe(dct):
"""Modify the keys in |dct| to be valid attribute names."""
result = {}
for key, val in dct.items():
key = key.replace('-', '_')
if key in keyword.kwlist:
key = key + '_'
result[key] = val
return result | Modify the keys in |dct| to be valid attribute names. | Below is the the instruction that describes the task:
### Input:
Modify the keys in |dct| to be valid attribute names.
### Response:
def make_keys_safe(dct):
"""Modify the keys in |dct| to be valid attribute names."""
result = {}
for key, val in dct.items():
key = key.replace('-', '_')
if key in keyword.kwlist:
key = key + '_'
result[key] = val
return result |
def get_group_index(labels, shape, sort, xnull):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels: sequence of arrays
Integers identifying levels at each location
shape: sequence of ints same length as labels
Number of unique levels at each location
sort: boolean
If the ranks of returned ids should match lexical ranks of labels
xnull: boolean
If true nulls are excluded. i.e. -1 values in the labels are
passed through
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
"""
def _int64_cut_off(shape):
acc = 1
for i, mul in enumerate(shape):
acc *= int(mul)
if not acc < _INT64_MAX:
return i
return len(shape)
def maybe_lift(lab, size):
# promote nan values (assigned -1 label in lab array)
# so that all output values are non-negative
return (lab + 1, size + 1) if (lab == -1).any() else (lab, size)
labels = map(ensure_int64, labels)
if not xnull:
labels, shape = map(list, zip(*map(maybe_lift, labels, shape)))
labels = list(labels)
shape = list(shape)
# Iteratively process all the labels in chunks sized so less
# than _INT64_MAX unique int ids will be required for each chunk
while True:
# how many levels can be done without overflow:
nlev = _int64_cut_off(shape)
# compute flat ids for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
out = stride * labels[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
if shape[i] == 0:
stride = 0
else:
stride //= shape[i]
out += labels[i] * stride
if xnull: # exclude nulls
mask = labels[0] == -1
for lab in labels[1:nlev]:
mask |= lab == -1
out[mask] = -1
if nlev == len(shape): # all levels done!
break
# compress what has been done so far in order to avoid overflow
# to retain lexical ranks, obs_ids should be sorted
comp_ids, obs_ids = compress_group_index(out, sort=sort)
labels = [comp_ids] + labels[nlev:]
shape = [len(obs_ids)] + shape[nlev:]
return out | For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels: sequence of arrays
Integers identifying levels at each location
shape: sequence of ints same length as labels
Number of unique levels at each location
sort: boolean
If the ranks of returned ids should match lexical ranks of labels
xnull: boolean
If true nulls are excluded. i.e. -1 values in the labels are
passed through
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location. | Below is the the instruction that describes the task:
### Input:
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels: sequence of arrays
Integers identifying levels at each location
shape: sequence of ints same length as labels
Number of unique levels at each location
sort: boolean
If the ranks of returned ids should match lexical ranks of labels
xnull: boolean
If true nulls are excluded. i.e. -1 values in the labels are
passed through
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
### Response:
def get_group_index(labels, shape, sort, xnull):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels: sequence of arrays
Integers identifying levels at each location
shape: sequence of ints same length as labels
Number of unique levels at each location
sort: boolean
If the ranks of returned ids should match lexical ranks of labels
xnull: boolean
If true nulls are excluded. i.e. -1 values in the labels are
passed through
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
"""
def _int64_cut_off(shape):
acc = 1
for i, mul in enumerate(shape):
acc *= int(mul)
if not acc < _INT64_MAX:
return i
return len(shape)
def maybe_lift(lab, size):
# promote nan values (assigned -1 label in lab array)
# so that all output values are non-negative
return (lab + 1, size + 1) if (lab == -1).any() else (lab, size)
labels = map(ensure_int64, labels)
if not xnull:
labels, shape = map(list, zip(*map(maybe_lift, labels, shape)))
labels = list(labels)
shape = list(shape)
# Iteratively process all the labels in chunks sized so less
# than _INT64_MAX unique int ids will be required for each chunk
while True:
# how many levels can be done without overflow:
nlev = _int64_cut_off(shape)
# compute flat ids for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
out = stride * labels[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
if shape[i] == 0:
stride = 0
else:
stride //= shape[i]
out += labels[i] * stride
if xnull: # exclude nulls
mask = labels[0] == -1
for lab in labels[1:nlev]:
mask |= lab == -1
out[mask] = -1
if nlev == len(shape): # all levels done!
break
# compress what has been done so far in order to avoid overflow
# to retain lexical ranks, obs_ids should be sorted
comp_ids, obs_ids = compress_group_index(out, sort=sort)
labels = [comp_ids] + labels[nlev:]
shape = [len(obs_ids)] + shape[nlev:]
return out |
def get_search_index_for(catalog):
"""Returns the search index to query
"""
searchable_text_index = "SearchableText"
listing_searchable_text_index = "listing_searchable_text"
if catalog == CATALOG_ANALYSIS_REQUEST_LISTING:
tool = api.get_tool(catalog)
indexes = tool.indexes()
if listing_searchable_text_index in indexes:
return listing_searchable_text_index
return searchable_text_index | Returns the search index to query | Below is the the instruction that describes the task:
### Input:
Returns the search index to query
### Response:
def get_search_index_for(catalog):
"""Returns the search index to query
"""
searchable_text_index = "SearchableText"
listing_searchable_text_index = "listing_searchable_text"
if catalog == CATALOG_ANALYSIS_REQUEST_LISTING:
tool = api.get_tool(catalog)
indexes = tool.indexes()
if listing_searchable_text_index in indexes:
return listing_searchable_text_index
return searchable_text_index |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.