repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
Kortemme-Lab/klab | klab/bio/pymolmod/colors.py | ColorScheme.lookup | def lookup(self, path, must_be_leaf = False):
'''Looks up a part of the color scheme. If used for looking up colors, must_be_leaf should be True.'''
assert(type(path) == type(self.name))
d = self.color_scheme
tokens = path.split('.')
for t in tokens[:-1]:
d = d.get(t)
if d == None:
raise Exception("Path '%s' not found.")
if must_be_leaf:
assert(type(d[tokens[-1]]) == type(self.name))
return d[tokens[-1]] | python | def lookup(self, path, must_be_leaf = False):
'''Looks up a part of the color scheme. If used for looking up colors, must_be_leaf should be True.'''
assert(type(path) == type(self.name))
d = self.color_scheme
tokens = path.split('.')
for t in tokens[:-1]:
d = d.get(t)
if d == None:
raise Exception("Path '%s' not found.")
if must_be_leaf:
assert(type(d[tokens[-1]]) == type(self.name))
return d[tokens[-1]] | [
"def",
"lookup",
"(",
"self",
",",
"path",
",",
"must_be_leaf",
"=",
"False",
")",
":",
"assert",
"(",
"type",
"(",
"path",
")",
"==",
"type",
"(",
"self",
".",
"name",
")",
")",
"d",
"=",
"self",
".",
"color_scheme",
"tokens",
"=",
"path",
".",
"split",
"(",
"'.'",
")",
"for",
"t",
"in",
"tokens",
"[",
":",
"-",
"1",
"]",
":",
"d",
"=",
"d",
".",
"get",
"(",
"t",
")",
"if",
"d",
"==",
"None",
":",
"raise",
"Exception",
"(",
"\"Path '%s' not found.\"",
")",
"if",
"must_be_leaf",
":",
"assert",
"(",
"type",
"(",
"d",
"[",
"tokens",
"[",
"-",
"1",
"]",
"]",
")",
"==",
"type",
"(",
"self",
".",
"name",
")",
")",
"return",
"d",
"[",
"tokens",
"[",
"-",
"1",
"]",
"]"
]
| Looks up a part of the color scheme. If used for looking up colors, must_be_leaf should be True. | [
"Looks",
"up",
"a",
"part",
"of",
"the",
"color",
"scheme",
".",
"If",
"used",
"for",
"looking",
"up",
"colors",
"must_be_leaf",
"should",
"be",
"True",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pymolmod/colors.py#L399-L411 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/api.py | resolve_pid | def resolve_pid(fetched_pid):
"""Retrieve the real PID given a fetched PID.
:param pid: fetched PID to resolve.
"""
return PersistentIdentifier.get(
pid_type=fetched_pid.pid_type,
pid_value=fetched_pid.pid_value,
pid_provider=fetched_pid.provider.pid_provider
) | python | def resolve_pid(fetched_pid):
"""Retrieve the real PID given a fetched PID.
:param pid: fetched PID to resolve.
"""
return PersistentIdentifier.get(
pid_type=fetched_pid.pid_type,
pid_value=fetched_pid.pid_value,
pid_provider=fetched_pid.provider.pid_provider
) | [
"def",
"resolve_pid",
"(",
"fetched_pid",
")",
":",
"return",
"PersistentIdentifier",
".",
"get",
"(",
"pid_type",
"=",
"fetched_pid",
".",
"pid_type",
",",
"pid_value",
"=",
"fetched_pid",
".",
"pid_value",
",",
"pid_provider",
"=",
"fetched_pid",
".",
"provider",
".",
"pid_provider",
")"
]
| Retrieve the real PID given a fetched PID.
:param pid: fetched PID to resolve. | [
"Retrieve",
"the",
"real",
"PID",
"given",
"a",
"fetched",
"PID",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/api.py#L69-L78 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/api.py | PIDQuery.ordered | def ordered(self, ord='desc'):
"""Order the query result on the relations' indexes."""
if ord not in ('asc', 'desc', ):
raise
ord_f = getattr(PIDRelation.index, ord)()
return self.order_by(ord_f) | python | def ordered(self, ord='desc'):
"""Order the query result on the relations' indexes."""
if ord not in ('asc', 'desc', ):
raise
ord_f = getattr(PIDRelation.index, ord)()
return self.order_by(ord_f) | [
"def",
"ordered",
"(",
"self",
",",
"ord",
"=",
"'desc'",
")",
":",
"if",
"ord",
"not",
"in",
"(",
"'asc'",
",",
"'desc'",
",",
")",
":",
"raise",
"ord_f",
"=",
"getattr",
"(",
"PIDRelation",
".",
"index",
",",
"ord",
")",
"(",
")",
"return",
"self",
".",
"order_by",
"(",
"ord_f",
")"
]
| Order the query result on the relations' indexes. | [
"Order",
"the",
"query",
"result",
"on",
"the",
"relations",
"indexes",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/api.py#L53-L58 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/api.py | PIDQuery.status | def status(self, status_in):
"""Filter the PIDs based on their status."""
if isinstance(status_in, PIDStatus):
status_in = [status_in, ]
return self.filter(
self._filtered_pid_class.status.in_(status_in)
) | python | def status(self, status_in):
"""Filter the PIDs based on their status."""
if isinstance(status_in, PIDStatus):
status_in = [status_in, ]
return self.filter(
self._filtered_pid_class.status.in_(status_in)
) | [
"def",
"status",
"(",
"self",
",",
"status_in",
")",
":",
"if",
"isinstance",
"(",
"status_in",
",",
"PIDStatus",
")",
":",
"status_in",
"=",
"[",
"status_in",
",",
"]",
"return",
"self",
".",
"filter",
"(",
"self",
".",
"_filtered_pid_class",
".",
"status",
".",
"in_",
"(",
"status_in",
")",
")"
]
| Filter the PIDs based on their status. | [
"Filter",
"the",
"PIDs",
"based",
"on",
"their",
"status",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/api.py#L60-L66 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/api.py | PIDNode._resolved_pid | def _resolved_pid(self):
"""Resolve self.pid if it is a fetched pid."""
if not isinstance(self.pid, PersistentIdentifier):
return resolve_pid(self.pid)
return self.pid | python | def _resolved_pid(self):
"""Resolve self.pid if it is a fetched pid."""
if not isinstance(self.pid, PersistentIdentifier):
return resolve_pid(self.pid)
return self.pid | [
"def",
"_resolved_pid",
"(",
"self",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"pid",
",",
"PersistentIdentifier",
")",
":",
"return",
"resolve_pid",
"(",
"self",
".",
"pid",
")",
"return",
"self",
".",
"pid"
]
| Resolve self.pid if it is a fetched pid. | [
"Resolve",
"self",
".",
"pid",
"if",
"it",
"is",
"a",
"fetched",
"pid",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/api.py#L105-L109 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/api.py | PIDNode._get_child_relation | def _get_child_relation(self, child_pid):
"""Retrieve the relation between this node and a child PID."""
return PIDRelation.query.filter_by(
parent=self._resolved_pid,
child=child_pid,
relation_type=self.relation_type.id).one() | python | def _get_child_relation(self, child_pid):
"""Retrieve the relation between this node and a child PID."""
return PIDRelation.query.filter_by(
parent=self._resolved_pid,
child=child_pid,
relation_type=self.relation_type.id).one() | [
"def",
"_get_child_relation",
"(",
"self",
",",
"child_pid",
")",
":",
"return",
"PIDRelation",
".",
"query",
".",
"filter_by",
"(",
"parent",
"=",
"self",
".",
"_resolved_pid",
",",
"child",
"=",
"child_pid",
",",
"relation_type",
"=",
"self",
".",
"relation_type",
".",
"id",
")",
".",
"one",
"(",
")"
]
| Retrieve the relation between this node and a child PID. | [
"Retrieve",
"the",
"relation",
"between",
"this",
"node",
"and",
"a",
"child",
"PID",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/api.py#L111-L116 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/api.py | PIDNode._check_child_limits | def _check_child_limits(self, child_pid):
"""Check that inserting a child is within the limits."""
if self.max_children is not None and \
self.children.count() >= self.max_children:
raise PIDRelationConsistencyError(
"Max number of children is set to {}.".
format(self.max_children))
if self.max_parents is not None and \
PIDRelation.query.filter_by(
child=child_pid,
relation_type=self.relation_type.id)\
.count() >= self.max_parents:
raise PIDRelationConsistencyError(
"This pid already has the maximum number of parents.") | python | def _check_child_limits(self, child_pid):
"""Check that inserting a child is within the limits."""
if self.max_children is not None and \
self.children.count() >= self.max_children:
raise PIDRelationConsistencyError(
"Max number of children is set to {}.".
format(self.max_children))
if self.max_parents is not None and \
PIDRelation.query.filter_by(
child=child_pid,
relation_type=self.relation_type.id)\
.count() >= self.max_parents:
raise PIDRelationConsistencyError(
"This pid already has the maximum number of parents.") | [
"def",
"_check_child_limits",
"(",
"self",
",",
"child_pid",
")",
":",
"if",
"self",
".",
"max_children",
"is",
"not",
"None",
"and",
"self",
".",
"children",
".",
"count",
"(",
")",
">=",
"self",
".",
"max_children",
":",
"raise",
"PIDRelationConsistencyError",
"(",
"\"Max number of children is set to {}.\"",
".",
"format",
"(",
"self",
".",
"max_children",
")",
")",
"if",
"self",
".",
"max_parents",
"is",
"not",
"None",
"and",
"PIDRelation",
".",
"query",
".",
"filter_by",
"(",
"child",
"=",
"child_pid",
",",
"relation_type",
"=",
"self",
".",
"relation_type",
".",
"id",
")",
".",
"count",
"(",
")",
">=",
"self",
".",
"max_parents",
":",
"raise",
"PIDRelationConsistencyError",
"(",
"\"This pid already has the maximum number of parents.\"",
")"
]
| Check that inserting a child is within the limits. | [
"Check",
"that",
"inserting",
"a",
"child",
"is",
"within",
"the",
"limits",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/api.py#L118-L131 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/api.py | PIDNode._connected_pids | def _connected_pids(self, from_parent=True):
"""Follow a relationship to find connected PIDs.abs.
:param from_parent: search children from the current pid if True, else
search for its parents.
:type from_parent: bool
"""
to_pid = aliased(PersistentIdentifier, name='to_pid')
if from_parent:
to_relation = PIDRelation.child_id
from_relation = PIDRelation.parent_id
else:
to_relation = PIDRelation.parent_id
from_relation = PIDRelation.child_id
query = PIDQuery(
[to_pid], db.session(), _filtered_pid_class=to_pid
).join(
PIDRelation,
to_pid.id == to_relation
)
# accept both PersistentIdentifier models and fake PIDs with just
# pid_value, pid_type as they are fetched with the PID fetcher.
if isinstance(self.pid, PersistentIdentifier):
query = query.filter(from_relation == self.pid.id)
else:
from_pid = aliased(PersistentIdentifier, name='from_pid')
query = query.join(
from_pid,
from_pid.id == from_relation
).filter(
from_pid.pid_value == self.pid.pid_value,
from_pid.pid_type == self.pid.pid_type,
)
return query | python | def _connected_pids(self, from_parent=True):
"""Follow a relationship to find connected PIDs.abs.
:param from_parent: search children from the current pid if True, else
search for its parents.
:type from_parent: bool
"""
to_pid = aliased(PersistentIdentifier, name='to_pid')
if from_parent:
to_relation = PIDRelation.child_id
from_relation = PIDRelation.parent_id
else:
to_relation = PIDRelation.parent_id
from_relation = PIDRelation.child_id
query = PIDQuery(
[to_pid], db.session(), _filtered_pid_class=to_pid
).join(
PIDRelation,
to_pid.id == to_relation
)
# accept both PersistentIdentifier models and fake PIDs with just
# pid_value, pid_type as they are fetched with the PID fetcher.
if isinstance(self.pid, PersistentIdentifier):
query = query.filter(from_relation == self.pid.id)
else:
from_pid = aliased(PersistentIdentifier, name='from_pid')
query = query.join(
from_pid,
from_pid.id == from_relation
).filter(
from_pid.pid_value == self.pid.pid_value,
from_pid.pid_type == self.pid.pid_type,
)
return query | [
"def",
"_connected_pids",
"(",
"self",
",",
"from_parent",
"=",
"True",
")",
":",
"to_pid",
"=",
"aliased",
"(",
"PersistentIdentifier",
",",
"name",
"=",
"'to_pid'",
")",
"if",
"from_parent",
":",
"to_relation",
"=",
"PIDRelation",
".",
"child_id",
"from_relation",
"=",
"PIDRelation",
".",
"parent_id",
"else",
":",
"to_relation",
"=",
"PIDRelation",
".",
"parent_id",
"from_relation",
"=",
"PIDRelation",
".",
"child_id",
"query",
"=",
"PIDQuery",
"(",
"[",
"to_pid",
"]",
",",
"db",
".",
"session",
"(",
")",
",",
"_filtered_pid_class",
"=",
"to_pid",
")",
".",
"join",
"(",
"PIDRelation",
",",
"to_pid",
".",
"id",
"==",
"to_relation",
")",
"# accept both PersistentIdentifier models and fake PIDs with just",
"# pid_value, pid_type as they are fetched with the PID fetcher.",
"if",
"isinstance",
"(",
"self",
".",
"pid",
",",
"PersistentIdentifier",
")",
":",
"query",
"=",
"query",
".",
"filter",
"(",
"from_relation",
"==",
"self",
".",
"pid",
".",
"id",
")",
"else",
":",
"from_pid",
"=",
"aliased",
"(",
"PersistentIdentifier",
",",
"name",
"=",
"'from_pid'",
")",
"query",
"=",
"query",
".",
"join",
"(",
"from_pid",
",",
"from_pid",
".",
"id",
"==",
"from_relation",
")",
".",
"filter",
"(",
"from_pid",
".",
"pid_value",
"==",
"self",
".",
"pid",
".",
"pid_value",
",",
"from_pid",
".",
"pid_type",
"==",
"self",
".",
"pid",
".",
"pid_type",
",",
")",
"return",
"query"
]
| Follow a relationship to find connected PIDs.abs.
:param from_parent: search children from the current pid if True, else
search for its parents.
:type from_parent: bool | [
"Follow",
"a",
"relationship",
"to",
"find",
"connected",
"PIDs",
".",
"abs",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/api.py#L133-L167 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/api.py | PIDNode.insert_child | def insert_child(self, child_pid):
"""Add the given PID to the list of children PIDs."""
self._check_child_limits(child_pid)
try:
# TODO: Here add the check for the max parents and the max children
with db.session.begin_nested():
if not isinstance(child_pid, PersistentIdentifier):
child_pid = resolve_pid(child_pid)
return PIDRelation.create(
self._resolved_pid, child_pid, self.relation_type.id, None
)
except IntegrityError:
raise PIDRelationConsistencyError("PID Relation already exists.") | python | def insert_child(self, child_pid):
"""Add the given PID to the list of children PIDs."""
self._check_child_limits(child_pid)
try:
# TODO: Here add the check for the max parents and the max children
with db.session.begin_nested():
if not isinstance(child_pid, PersistentIdentifier):
child_pid = resolve_pid(child_pid)
return PIDRelation.create(
self._resolved_pid, child_pid, self.relation_type.id, None
)
except IntegrityError:
raise PIDRelationConsistencyError("PID Relation already exists.") | [
"def",
"insert_child",
"(",
"self",
",",
"child_pid",
")",
":",
"self",
".",
"_check_child_limits",
"(",
"child_pid",
")",
"try",
":",
"# TODO: Here add the check for the max parents and the max children",
"with",
"db",
".",
"session",
".",
"begin_nested",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"child_pid",
",",
"PersistentIdentifier",
")",
":",
"child_pid",
"=",
"resolve_pid",
"(",
"child_pid",
")",
"return",
"PIDRelation",
".",
"create",
"(",
"self",
".",
"_resolved_pid",
",",
"child_pid",
",",
"self",
".",
"relation_type",
".",
"id",
",",
"None",
")",
"except",
"IntegrityError",
":",
"raise",
"PIDRelationConsistencyError",
"(",
"\"PID Relation already exists.\"",
")"
]
| Add the given PID to the list of children PIDs. | [
"Add",
"the",
"given",
"PID",
"to",
"the",
"list",
"of",
"children",
"PIDs",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/api.py#L189-L201 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/api.py | PIDNodeOrdered.index | def index(self, child_pid):
"""Index of the child in the relation."""
if not isinstance(child_pid, PersistentIdentifier):
child_pid = resolve_pid(child_pid)
relation = PIDRelation.query.filter_by(
parent=self._resolved_pid,
child=child_pid,
relation_type=self.relation_type.id).one()
return relation.index | python | def index(self, child_pid):
"""Index of the child in the relation."""
if not isinstance(child_pid, PersistentIdentifier):
child_pid = resolve_pid(child_pid)
relation = PIDRelation.query.filter_by(
parent=self._resolved_pid,
child=child_pid,
relation_type=self.relation_type.id).one()
return relation.index | [
"def",
"index",
"(",
"self",
",",
"child_pid",
")",
":",
"if",
"not",
"isinstance",
"(",
"child_pid",
",",
"PersistentIdentifier",
")",
":",
"child_pid",
"=",
"resolve_pid",
"(",
"child_pid",
")",
"relation",
"=",
"PIDRelation",
".",
"query",
".",
"filter_by",
"(",
"parent",
"=",
"self",
".",
"_resolved_pid",
",",
"child",
"=",
"child_pid",
",",
"relation_type",
"=",
"self",
".",
"relation_type",
".",
"id",
")",
".",
"one",
"(",
")",
"return",
"relation",
".",
"index"
]
| Index of the child in the relation. | [
"Index",
"of",
"the",
"child",
"in",
"the",
"relation",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/api.py#L222-L230 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/api.py | PIDNodeOrdered.is_last_child | def is_last_child(self, child_pid):
"""
Determine if 'pid' is the latest version of a resource.
Resolves True for Versioned PIDs which are the oldest of its siblings.
False otherwise, also for Head PIDs.
"""
last_child = self.last_child
if last_child is None:
return False
return last_child == child_pid | python | def is_last_child(self, child_pid):
"""
Determine if 'pid' is the latest version of a resource.
Resolves True for Versioned PIDs which are the oldest of its siblings.
False otherwise, also for Head PIDs.
"""
last_child = self.last_child
if last_child is None:
return False
return last_child == child_pid | [
"def",
"is_last_child",
"(",
"self",
",",
"child_pid",
")",
":",
"last_child",
"=",
"self",
".",
"last_child",
"if",
"last_child",
"is",
"None",
":",
"return",
"False",
"return",
"last_child",
"==",
"child_pid"
]
| Determine if 'pid' is the latest version of a resource.
Resolves True for Versioned PIDs which are the oldest of its siblings.
False otherwise, also for Head PIDs. | [
"Determine",
"if",
"pid",
"is",
"the",
"latest",
"version",
"of",
"a",
"resource",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/api.py#L232-L242 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/api.py | PIDNodeOrdered.last_child | def last_child(self):
"""
Get the latest PID as pointed by the Head PID.
If the 'pid' is a Head PID, return the latest of its children.
If the 'pid' is a Version PID, return the latest of its siblings.
Return None for the non-versioned PIDs.
"""
return self.children.filter(
PIDRelation.index.isnot(None)).ordered().first() | python | def last_child(self):
"""
Get the latest PID as pointed by the Head PID.
If the 'pid' is a Head PID, return the latest of its children.
If the 'pid' is a Version PID, return the latest of its siblings.
Return None for the non-versioned PIDs.
"""
return self.children.filter(
PIDRelation.index.isnot(None)).ordered().first() | [
"def",
"last_child",
"(",
"self",
")",
":",
"return",
"self",
".",
"children",
".",
"filter",
"(",
"PIDRelation",
".",
"index",
".",
"isnot",
"(",
"None",
")",
")",
".",
"ordered",
"(",
")",
".",
"first",
"(",
")"
]
| Get the latest PID as pointed by the Head PID.
If the 'pid' is a Head PID, return the latest of its children.
If the 'pid' is a Version PID, return the latest of its siblings.
Return None for the non-versioned PIDs. | [
"Get",
"the",
"latest",
"PID",
"as",
"pointed",
"by",
"the",
"Head",
"PID",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/api.py#L245-L254 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/api.py | PIDNodeOrdered.next_child | def next_child(self, child_pid):
"""Get the next child PID in the PID relation."""
relation = self._get_child_relation(child_pid)
if relation.index is not None:
return self.children.filter(
PIDRelation.index > relation.index
).ordered(ord='asc').first()
else:
return None | python | def next_child(self, child_pid):
"""Get the next child PID in the PID relation."""
relation = self._get_child_relation(child_pid)
if relation.index is not None:
return self.children.filter(
PIDRelation.index > relation.index
).ordered(ord='asc').first()
else:
return None | [
"def",
"next_child",
"(",
"self",
",",
"child_pid",
")",
":",
"relation",
"=",
"self",
".",
"_get_child_relation",
"(",
"child_pid",
")",
"if",
"relation",
".",
"index",
"is",
"not",
"None",
":",
"return",
"self",
".",
"children",
".",
"filter",
"(",
"PIDRelation",
".",
"index",
">",
"relation",
".",
"index",
")",
".",
"ordered",
"(",
"ord",
"=",
"'asc'",
")",
".",
"first",
"(",
")",
"else",
":",
"return",
"None"
]
| Get the next child PID in the PID relation. | [
"Get",
"the",
"next",
"child",
"PID",
"in",
"the",
"PID",
"relation",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/api.py#L256-L264 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/api.py | PIDNodeOrdered.insert_child | def insert_child(self, child_pid, index=-1):
"""Insert a new child into a PID concept.
Argument 'index' can take the following values:
0,1,2,... - insert child PID at the specified position
-1 - insert the child PID at the last position
None - insert child without order (no re-ordering is done)
NOTE: If 'index' is specified, all sibling relations should
have PIDRelation.index information.
"""
self._check_child_limits(child_pid)
if index is None:
index = -1
try:
with db.session.begin_nested():
if not isinstance(child_pid, PersistentIdentifier):
child_pid = resolve_pid(child_pid)
child_relations = self._resolved_pid.child_relations.filter(
PIDRelation.relation_type == self.relation_type.id
).order_by(PIDRelation.index).all()
relation_obj = PIDRelation.create(
self._resolved_pid, child_pid, self.relation_type.id, None)
if index == -1:
child_relations.append(relation_obj)
else:
child_relations.insert(index, relation_obj)
for idx, c in enumerate(child_relations):
c.index = idx
except IntegrityError:
raise PIDRelationConsistencyError("PID Relation already exists.") | python | def insert_child(self, child_pid, index=-1):
"""Insert a new child into a PID concept.
Argument 'index' can take the following values:
0,1,2,... - insert child PID at the specified position
-1 - insert the child PID at the last position
None - insert child without order (no re-ordering is done)
NOTE: If 'index' is specified, all sibling relations should
have PIDRelation.index information.
"""
self._check_child_limits(child_pid)
if index is None:
index = -1
try:
with db.session.begin_nested():
if not isinstance(child_pid, PersistentIdentifier):
child_pid = resolve_pid(child_pid)
child_relations = self._resolved_pid.child_relations.filter(
PIDRelation.relation_type == self.relation_type.id
).order_by(PIDRelation.index).all()
relation_obj = PIDRelation.create(
self._resolved_pid, child_pid, self.relation_type.id, None)
if index == -1:
child_relations.append(relation_obj)
else:
child_relations.insert(index, relation_obj)
for idx, c in enumerate(child_relations):
c.index = idx
except IntegrityError:
raise PIDRelationConsistencyError("PID Relation already exists.") | [
"def",
"insert_child",
"(",
"self",
",",
"child_pid",
",",
"index",
"=",
"-",
"1",
")",
":",
"self",
".",
"_check_child_limits",
"(",
"child_pid",
")",
"if",
"index",
"is",
"None",
":",
"index",
"=",
"-",
"1",
"try",
":",
"with",
"db",
".",
"session",
".",
"begin_nested",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"child_pid",
",",
"PersistentIdentifier",
")",
":",
"child_pid",
"=",
"resolve_pid",
"(",
"child_pid",
")",
"child_relations",
"=",
"self",
".",
"_resolved_pid",
".",
"child_relations",
".",
"filter",
"(",
"PIDRelation",
".",
"relation_type",
"==",
"self",
".",
"relation_type",
".",
"id",
")",
".",
"order_by",
"(",
"PIDRelation",
".",
"index",
")",
".",
"all",
"(",
")",
"relation_obj",
"=",
"PIDRelation",
".",
"create",
"(",
"self",
".",
"_resolved_pid",
",",
"child_pid",
",",
"self",
".",
"relation_type",
".",
"id",
",",
"None",
")",
"if",
"index",
"==",
"-",
"1",
":",
"child_relations",
".",
"append",
"(",
"relation_obj",
")",
"else",
":",
"child_relations",
".",
"insert",
"(",
"index",
",",
"relation_obj",
")",
"for",
"idx",
",",
"c",
"in",
"enumerate",
"(",
"child_relations",
")",
":",
"c",
".",
"index",
"=",
"idx",
"except",
"IntegrityError",
":",
"raise",
"PIDRelationConsistencyError",
"(",
"\"PID Relation already exists.\"",
")"
]
| Insert a new child into a PID concept.
Argument 'index' can take the following values:
0,1,2,... - insert child PID at the specified position
-1 - insert the child PID at the last position
None - insert child without order (no re-ordering is done)
NOTE: If 'index' is specified, all sibling relations should
have PIDRelation.index information. | [
"Insert",
"a",
"new",
"child",
"into",
"a",
"PID",
"concept",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/api.py#L276-L307 | train |
projectshift/shift-boiler | boiler/jinja/functions.py | asset | def asset(url=None):
"""
Asset helper
Generates path to a static asset based on configuration base path and
support for versioning. Will easily allow you to move your assets away to
a CDN without changing templates. Versioning allows you to cache your asset
changes forever by the webserver.
:param url: string - relative path to asset
:return: string - full versioned url
"""
# fallback to url_for('static') if assets path not configured
url = url.lstrip('/')
assets_path = app.config.get('ASSETS_PATH')
if not assets_path:
url_for = app.jinja_env.globals.get('url_for')
url = url_for('static', filename=url)
else:
assets_path = assets_path.rstrip('/')
url = assets_path + '/' + url
version = app.config.get('ASSETS_VERSION')
if not version:
return url
sign = '?'
if sign in url:
sign = '&'
pattern = '{url}{sign}v{version}'
return pattern.format(url=url, sign=sign, version=version) | python | def asset(url=None):
"""
Asset helper
Generates path to a static asset based on configuration base path and
support for versioning. Will easily allow you to move your assets away to
a CDN without changing templates. Versioning allows you to cache your asset
changes forever by the webserver.
:param url: string - relative path to asset
:return: string - full versioned url
"""
# fallback to url_for('static') if assets path not configured
url = url.lstrip('/')
assets_path = app.config.get('ASSETS_PATH')
if not assets_path:
url_for = app.jinja_env.globals.get('url_for')
url = url_for('static', filename=url)
else:
assets_path = assets_path.rstrip('/')
url = assets_path + '/' + url
version = app.config.get('ASSETS_VERSION')
if not version:
return url
sign = '?'
if sign in url:
sign = '&'
pattern = '{url}{sign}v{version}'
return pattern.format(url=url, sign=sign, version=version) | [
"def",
"asset",
"(",
"url",
"=",
"None",
")",
":",
"# fallback to url_for('static') if assets path not configured",
"url",
"=",
"url",
".",
"lstrip",
"(",
"'/'",
")",
"assets_path",
"=",
"app",
".",
"config",
".",
"get",
"(",
"'ASSETS_PATH'",
")",
"if",
"not",
"assets_path",
":",
"url_for",
"=",
"app",
".",
"jinja_env",
".",
"globals",
".",
"get",
"(",
"'url_for'",
")",
"url",
"=",
"url_for",
"(",
"'static'",
",",
"filename",
"=",
"url",
")",
"else",
":",
"assets_path",
"=",
"assets_path",
".",
"rstrip",
"(",
"'/'",
")",
"url",
"=",
"assets_path",
"+",
"'/'",
"+",
"url",
"version",
"=",
"app",
".",
"config",
".",
"get",
"(",
"'ASSETS_VERSION'",
")",
"if",
"not",
"version",
":",
"return",
"url",
"sign",
"=",
"'?'",
"if",
"sign",
"in",
"url",
":",
"sign",
"=",
"'&'",
"pattern",
"=",
"'{url}{sign}v{version}'",
"return",
"pattern",
".",
"format",
"(",
"url",
"=",
"url",
",",
"sign",
"=",
"sign",
",",
"version",
"=",
"version",
")"
]
| Asset helper
Generates path to a static asset based on configuration base path and
support for versioning. Will easily allow you to move your assets away to
a CDN without changing templates. Versioning allows you to cache your asset
changes forever by the webserver.
:param url: string - relative path to asset
:return: string - full versioned url | [
"Asset",
"helper",
"Generates",
"path",
"to",
"a",
"static",
"asset",
"based",
"on",
"configuration",
"base",
"path",
"and",
"support",
"for",
"versioning",
".",
"Will",
"easily",
"allow",
"you",
"to",
"move",
"your",
"assets",
"away",
"to",
"a",
"CDN",
"without",
"changing",
"templates",
".",
"Versioning",
"allows",
"you",
"to",
"cache",
"your",
"asset",
"changes",
"forever",
"by",
"the",
"webserver",
"."
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/jinja/functions.py#L6-L37 | train |
clement-alexandre/TotemBionet | totembionet/src/model_picker/model_picker.py | pick_a_model_randomly | def pick_a_model_randomly(models: List[Any]) -> Any:
""" Naive picking function, return one of the models chosen randomly. """
try:
return random.choice(models)
except IndexError as e:
raise ModelPickerException(cause=e) | python | def pick_a_model_randomly(models: List[Any]) -> Any:
""" Naive picking function, return one of the models chosen randomly. """
try:
return random.choice(models)
except IndexError as e:
raise ModelPickerException(cause=e) | [
"def",
"pick_a_model_randomly",
"(",
"models",
":",
"List",
"[",
"Any",
"]",
")",
"->",
"Any",
":",
"try",
":",
"return",
"random",
".",
"choice",
"(",
"models",
")",
"except",
"IndexError",
"as",
"e",
":",
"raise",
"ModelPickerException",
"(",
"cause",
"=",
"e",
")"
]
| Naive picking function, return one of the models chosen randomly. | [
"Naive",
"picking",
"function",
"return",
"one",
"of",
"the",
"models",
"chosen",
"randomly",
"."
]
| f37a2f9358c1ce49f21c4a868b904da5dcd4614f | https://github.com/clement-alexandre/TotemBionet/blob/f37a2f9358c1ce49f21c4a868b904da5dcd4614f/totembionet/src/model_picker/model_picker.py#L16-L21 | train |
uogbuji/versa | tools/py/pipeline/core_actions.py | link | def link(origin=None, rel=None, value=None, attributes=None, source=None):
'''
Action function generator to create a link based on the context's current link, or on provided parameters
:param origin: IRI/string, or list of same; origins for the created relationships.
If None, the action context provides the parameter.
:param rel: IRI/string, or list of same; IDs for the created relationships.
If None, the action context provides the parameter.
:param value: IRI/string, or list of same; values/targets for the created relationships.
If None, the action context provides the parameter.
:param source: pattern action to be executed, generating contexts to determine the output statements. If given, overrides specific origin, rel or value params
:return: Versa action function to do the actual work
'''
attributes = attributes or {}
#rel = I(iri.absolutize(rel, ctx.base))
def _link(ctx):
if source:
if not callable(source):
raise ValueError('Link source must be a pattern action function')
contexts = source(ctx)
for ctx in contexts:
ctx.output_model.add(ctx.current_link[ORIGIN], ctx.current_link[RELATIONSHIP], ctx.current_link[TARGET], attributes)
return
(o, r, v, a) = ctx.current_link
_origin = origin(ctx) if callable(origin) else origin
o_list = [o] if _origin is None else (_origin if isinstance(_origin, list) else [_origin])
#_origin = _origin if isinstance(_origin, set) else set([_origin])
_rel = rel(ctx) if callable(rel) else rel
r_list = [r] if _rel is None else (_rel if isinstance(_rel, list) else [_rel])
#_rel = _rel if isinstance(_rel, set) else set([_rel])
_value = value(ctx) if callable(value) else value
v_list = [v] if _value is None else (_value if isinstance(_value, list) else [_value])
#_target = _target if isinstance(_target, set) else set([_target])
_attributes = attributes(ctx) if callable(attributes) else attributes
#(ctx_o, ctx_r, ctx_t, ctx_a) = ctx.current_link
#FIXME: Add test for IRI output via wrapper action function
for (o, r, v, a) in [ (o, r, v, a) for o in o_list for r in r_list for v in v_list ]:
ctx.output_model.add(o, r, v, attributes)
return
return _link | python | def link(origin=None, rel=None, value=None, attributes=None, source=None):
'''
Action function generator to create a link based on the context's current link, or on provided parameters
:param origin: IRI/string, or list of same; origins for the created relationships.
If None, the action context provides the parameter.
:param rel: IRI/string, or list of same; IDs for the created relationships.
If None, the action context provides the parameter.
:param value: IRI/string, or list of same; values/targets for the created relationships.
If None, the action context provides the parameter.
:param source: pattern action to be executed, generating contexts to determine the output statements. If given, overrides specific origin, rel or value params
:return: Versa action function to do the actual work
'''
attributes = attributes or {}
#rel = I(iri.absolutize(rel, ctx.base))
def _link(ctx):
if source:
if not callable(source):
raise ValueError('Link source must be a pattern action function')
contexts = source(ctx)
for ctx in contexts:
ctx.output_model.add(ctx.current_link[ORIGIN], ctx.current_link[RELATIONSHIP], ctx.current_link[TARGET], attributes)
return
(o, r, v, a) = ctx.current_link
_origin = origin(ctx) if callable(origin) else origin
o_list = [o] if _origin is None else (_origin if isinstance(_origin, list) else [_origin])
#_origin = _origin if isinstance(_origin, set) else set([_origin])
_rel = rel(ctx) if callable(rel) else rel
r_list = [r] if _rel is None else (_rel if isinstance(_rel, list) else [_rel])
#_rel = _rel if isinstance(_rel, set) else set([_rel])
_value = value(ctx) if callable(value) else value
v_list = [v] if _value is None else (_value if isinstance(_value, list) else [_value])
#_target = _target if isinstance(_target, set) else set([_target])
_attributes = attributes(ctx) if callable(attributes) else attributes
#(ctx_o, ctx_r, ctx_t, ctx_a) = ctx.current_link
#FIXME: Add test for IRI output via wrapper action function
for (o, r, v, a) in [ (o, r, v, a) for o in o_list for r in r_list for v in v_list ]:
ctx.output_model.add(o, r, v, attributes)
return
return _link | [
"def",
"link",
"(",
"origin",
"=",
"None",
",",
"rel",
"=",
"None",
",",
"value",
"=",
"None",
",",
"attributes",
"=",
"None",
",",
"source",
"=",
"None",
")",
":",
"attributes",
"=",
"attributes",
"or",
"{",
"}",
"#rel = I(iri.absolutize(rel, ctx.base))",
"def",
"_link",
"(",
"ctx",
")",
":",
"if",
"source",
":",
"if",
"not",
"callable",
"(",
"source",
")",
":",
"raise",
"ValueError",
"(",
"'Link source must be a pattern action function'",
")",
"contexts",
"=",
"source",
"(",
"ctx",
")",
"for",
"ctx",
"in",
"contexts",
":",
"ctx",
".",
"output_model",
".",
"add",
"(",
"ctx",
".",
"current_link",
"[",
"ORIGIN",
"]",
",",
"ctx",
".",
"current_link",
"[",
"RELATIONSHIP",
"]",
",",
"ctx",
".",
"current_link",
"[",
"TARGET",
"]",
",",
"attributes",
")",
"return",
"(",
"o",
",",
"r",
",",
"v",
",",
"a",
")",
"=",
"ctx",
".",
"current_link",
"_origin",
"=",
"origin",
"(",
"ctx",
")",
"if",
"callable",
"(",
"origin",
")",
"else",
"origin",
"o_list",
"=",
"[",
"o",
"]",
"if",
"_origin",
"is",
"None",
"else",
"(",
"_origin",
"if",
"isinstance",
"(",
"_origin",
",",
"list",
")",
"else",
"[",
"_origin",
"]",
")",
"#_origin = _origin if isinstance(_origin, set) else set([_origin])",
"_rel",
"=",
"rel",
"(",
"ctx",
")",
"if",
"callable",
"(",
"rel",
")",
"else",
"rel",
"r_list",
"=",
"[",
"r",
"]",
"if",
"_rel",
"is",
"None",
"else",
"(",
"_rel",
"if",
"isinstance",
"(",
"_rel",
",",
"list",
")",
"else",
"[",
"_rel",
"]",
")",
"#_rel = _rel if isinstance(_rel, set) else set([_rel])",
"_value",
"=",
"value",
"(",
"ctx",
")",
"if",
"callable",
"(",
"value",
")",
"else",
"value",
"v_list",
"=",
"[",
"v",
"]",
"if",
"_value",
"is",
"None",
"else",
"(",
"_value",
"if",
"isinstance",
"(",
"_value",
",",
"list",
")",
"else",
"[",
"_value",
"]",
")",
"#_target = _target if isinstance(_target, set) else set([_target])",
"_attributes",
"=",
"attributes",
"(",
"ctx",
")",
"if",
"callable",
"(",
"attributes",
")",
"else",
"attributes",
"#(ctx_o, ctx_r, ctx_t, ctx_a) = ctx.current_link",
"#FIXME: Add test for IRI output via wrapper action function",
"for",
"(",
"o",
",",
"r",
",",
"v",
",",
"a",
")",
"in",
"[",
"(",
"o",
",",
"r",
",",
"v",
",",
"a",
")",
"for",
"o",
"in",
"o_list",
"for",
"r",
"in",
"r_list",
"for",
"v",
"in",
"v_list",
"]",
":",
"ctx",
".",
"output_model",
".",
"add",
"(",
"o",
",",
"r",
",",
"v",
",",
"attributes",
")",
"return",
"return",
"_link"
]
| Action function generator to create a link based on the context's current link, or on provided parameters
:param origin: IRI/string, or list of same; origins for the created relationships.
If None, the action context provides the parameter.
:param rel: IRI/string, or list of same; IDs for the created relationships.
If None, the action context provides the parameter.
:param value: IRI/string, or list of same; values/targets for the created relationships.
If None, the action context provides the parameter.
:param source: pattern action to be executed, generating contexts to determine the output statements. If given, overrides specific origin, rel or value params
:return: Versa action function to do the actual work | [
"Action",
"function",
"generator",
"to",
"create",
"a",
"link",
"based",
"on",
"the",
"context",
"s",
"current",
"link",
"or",
"on",
"provided",
"parameters"
]
| f092ffc7ed363a5b170890955168500f32de0dd5 | https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/pipeline/core_actions.py#L17-L64 | train |
uogbuji/versa | tools/py/pipeline/core_actions.py | attr | def attr(aid):
'''
Action function generator to retrieve an attribute from the current link
'''
def _attr(ctx):
return ctx.current_link[ATTRIBUTES].get(aid)
return _attr | python | def attr(aid):
'''
Action function generator to retrieve an attribute from the current link
'''
def _attr(ctx):
return ctx.current_link[ATTRIBUTES].get(aid)
return _attr | [
"def",
"attr",
"(",
"aid",
")",
":",
"def",
"_attr",
"(",
"ctx",
")",
":",
"return",
"ctx",
".",
"current_link",
"[",
"ATTRIBUTES",
"]",
".",
"get",
"(",
"aid",
")",
"return",
"_attr"
]
| Action function generator to retrieve an attribute from the current link | [
"Action",
"function",
"generator",
"to",
"retrieve",
"an",
"attribute",
"from",
"the",
"current",
"link"
]
| f092ffc7ed363a5b170890955168500f32de0dd5 | https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/pipeline/core_actions.py#L82-L88 | train |
uogbuji/versa | tools/py/pipeline/core_actions.py | values | def values(*rels):
'''
Action function generator to compute a set of relationships from criteria
:param rels: List of relationships to compute
:return: Versa action function to do the actual work
'''
#Action function generator to multiplex a relationship at processing time
def _values(ctx):
'''
Versa action function Utility to specify a list of relationships
:param ctx: Versa context used in processing (e.g. includes the prototype link
:return: Tuple of key/value tuples from the attributes; suitable for hashing
'''
computed_rels = [ rel(ctx) if callable(rel) else rel for rel in rels ]
return computed_rels
return _values | python | def values(*rels):
'''
Action function generator to compute a set of relationships from criteria
:param rels: List of relationships to compute
:return: Versa action function to do the actual work
'''
#Action function generator to multiplex a relationship at processing time
def _values(ctx):
'''
Versa action function Utility to specify a list of relationships
:param ctx: Versa context used in processing (e.g. includes the prototype link
:return: Tuple of key/value tuples from the attributes; suitable for hashing
'''
computed_rels = [ rel(ctx) if callable(rel) else rel for rel in rels ]
return computed_rels
return _values | [
"def",
"values",
"(",
"*",
"rels",
")",
":",
"#Action function generator to multiplex a relationship at processing time",
"def",
"_values",
"(",
"ctx",
")",
":",
"'''\n Versa action function Utility to specify a list of relationships\n\n :param ctx: Versa context used in processing (e.g. includes the prototype link\n :return: Tuple of key/value tuples from the attributes; suitable for hashing\n '''",
"computed_rels",
"=",
"[",
"rel",
"(",
"ctx",
")",
"if",
"callable",
"(",
"rel",
")",
"else",
"rel",
"for",
"rel",
"in",
"rels",
"]",
"return",
"computed_rels",
"return",
"_values"
]
| Action function generator to compute a set of relationships from criteria
:param rels: List of relationships to compute
:return: Versa action function to do the actual work | [
"Action",
"function",
"generator",
"to",
"compute",
"a",
"set",
"of",
"relationships",
"from",
"criteria"
]
| f092ffc7ed363a5b170890955168500f32de0dd5 | https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/pipeline/core_actions.py#L109-L126 | train |
uogbuji/versa | tools/py/pipeline/core_actions.py | foreach | def foreach(origin=None, rel=None, target=None, attributes=None):
'''
Action function generator to compute a combination of links
:return: Versa action function to do the actual work
'''
def _foreach(ctx):
'''
Versa action function utility to compute a list of values from a list of expressions
:param ctx: Versa context used in processing (e.g. includes the prototype link)
'''
_origin = origin(ctx) if callable(origin) else origin
_rel = rel(ctx) if callable(rel) else rel
_target = target(ctx) if callable(target) else target
_attributes = attributes(ctx) if callable(attributes) else attributes
(o, r, t, a) = ctx.current_link
o = [o] if _origin is None else (_origin if isinstance(_origin, list) else [_origin])
r = [r] if _rel is None else (_rel if isinstance(_rel, list) else [_rel])
t = [t] if _target is None else (_target if isinstance(_target, list) else [_target])
#a = [a] if _attributes is None else _attributes
a = [a] if _attributes is None else (_attributes if isinstance(_attributes, list) else [_attributes])
#print([(curr_o, curr_r, curr_t, curr_a) for (curr_o, curr_r, curr_t, curr_a)
# in product(o, r, t, a)])
return [ ctx.copy(current_link=(curr_o, curr_r, curr_t, curr_a))
for (curr_o, curr_r, curr_t, curr_a)
in itertools.product(o, r, t, a) ]
#for (curr_o, curr_r, curr_t, curr_a) in product(origin or [o], rel or [r], target or [t], attributes or [a]):
# newctx = ctx.copy(current_link=(curr_o, curr_r, curr_t, curr_a))
#ctx.output_model.add(I(objid), VTYPE_REL, I(iri.absolutize(_typ, ctx.base)), {})
return _foreach | python | def foreach(origin=None, rel=None, target=None, attributes=None):
'''
Action function generator to compute a combination of links
:return: Versa action function to do the actual work
'''
def _foreach(ctx):
'''
Versa action function utility to compute a list of values from a list of expressions
:param ctx: Versa context used in processing (e.g. includes the prototype link)
'''
_origin = origin(ctx) if callable(origin) else origin
_rel = rel(ctx) if callable(rel) else rel
_target = target(ctx) if callable(target) else target
_attributes = attributes(ctx) if callable(attributes) else attributes
(o, r, t, a) = ctx.current_link
o = [o] if _origin is None else (_origin if isinstance(_origin, list) else [_origin])
r = [r] if _rel is None else (_rel if isinstance(_rel, list) else [_rel])
t = [t] if _target is None else (_target if isinstance(_target, list) else [_target])
#a = [a] if _attributes is None else _attributes
a = [a] if _attributes is None else (_attributes if isinstance(_attributes, list) else [_attributes])
#print([(curr_o, curr_r, curr_t, curr_a) for (curr_o, curr_r, curr_t, curr_a)
# in product(o, r, t, a)])
return [ ctx.copy(current_link=(curr_o, curr_r, curr_t, curr_a))
for (curr_o, curr_r, curr_t, curr_a)
in itertools.product(o, r, t, a) ]
#for (curr_o, curr_r, curr_t, curr_a) in product(origin or [o], rel or [r], target or [t], attributes or [a]):
# newctx = ctx.copy(current_link=(curr_o, curr_r, curr_t, curr_a))
#ctx.output_model.add(I(objid), VTYPE_REL, I(iri.absolutize(_typ, ctx.base)), {})
return _foreach | [
"def",
"foreach",
"(",
"origin",
"=",
"None",
",",
"rel",
"=",
"None",
",",
"target",
"=",
"None",
",",
"attributes",
"=",
"None",
")",
":",
"def",
"_foreach",
"(",
"ctx",
")",
":",
"'''\n Versa action function utility to compute a list of values from a list of expressions\n\n :param ctx: Versa context used in processing (e.g. includes the prototype link)\n '''",
"_origin",
"=",
"origin",
"(",
"ctx",
")",
"if",
"callable",
"(",
"origin",
")",
"else",
"origin",
"_rel",
"=",
"rel",
"(",
"ctx",
")",
"if",
"callable",
"(",
"rel",
")",
"else",
"rel",
"_target",
"=",
"target",
"(",
"ctx",
")",
"if",
"callable",
"(",
"target",
")",
"else",
"target",
"_attributes",
"=",
"attributes",
"(",
"ctx",
")",
"if",
"callable",
"(",
"attributes",
")",
"else",
"attributes",
"(",
"o",
",",
"r",
",",
"t",
",",
"a",
")",
"=",
"ctx",
".",
"current_link",
"o",
"=",
"[",
"o",
"]",
"if",
"_origin",
"is",
"None",
"else",
"(",
"_origin",
"if",
"isinstance",
"(",
"_origin",
",",
"list",
")",
"else",
"[",
"_origin",
"]",
")",
"r",
"=",
"[",
"r",
"]",
"if",
"_rel",
"is",
"None",
"else",
"(",
"_rel",
"if",
"isinstance",
"(",
"_rel",
",",
"list",
")",
"else",
"[",
"_rel",
"]",
")",
"t",
"=",
"[",
"t",
"]",
"if",
"_target",
"is",
"None",
"else",
"(",
"_target",
"if",
"isinstance",
"(",
"_target",
",",
"list",
")",
"else",
"[",
"_target",
"]",
")",
"#a = [a] if _attributes is None else _attributes",
"a",
"=",
"[",
"a",
"]",
"if",
"_attributes",
"is",
"None",
"else",
"(",
"_attributes",
"if",
"isinstance",
"(",
"_attributes",
",",
"list",
")",
"else",
"[",
"_attributes",
"]",
")",
"#print([(curr_o, curr_r, curr_t, curr_a) for (curr_o, curr_r, curr_t, curr_a)",
"# in product(o, r, t, a)])",
"return",
"[",
"ctx",
".",
"copy",
"(",
"current_link",
"=",
"(",
"curr_o",
",",
"curr_r",
",",
"curr_t",
",",
"curr_a",
")",
")",
"for",
"(",
"curr_o",
",",
"curr_r",
",",
"curr_t",
",",
"curr_a",
")",
"in",
"itertools",
".",
"product",
"(",
"o",
",",
"r",
",",
"t",
",",
"a",
")",
"]",
"#for (curr_o, curr_r, curr_t, curr_a) in product(origin or [o], rel or [r], target or [t], attributes or [a]):",
"# newctx = ctx.copy(current_link=(curr_o, curr_r, curr_t, curr_a))",
"#ctx.output_model.add(I(objid), VTYPE_REL, I(iri.absolutize(_typ, ctx.base)), {})",
"return",
"_foreach"
]
| Action function generator to compute a combination of links
:return: Versa action function to do the actual work | [
"Action",
"function",
"generator",
"to",
"compute",
"a",
"combination",
"of",
"links"
]
| f092ffc7ed363a5b170890955168500f32de0dd5 | https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/pipeline/core_actions.py#L152-L182 | train |
uogbuji/versa | tools/py/pipeline/core_actions.py | res | def res(arg):
'''
Convert the argument into an IRI ref
'''
def _res(ctx):
_arg = arg(ctx) if callable(arg) else arg
return I(arg)
return _res | python | def res(arg):
'''
Convert the argument into an IRI ref
'''
def _res(ctx):
_arg = arg(ctx) if callable(arg) else arg
return I(arg)
return _res | [
"def",
"res",
"(",
"arg",
")",
":",
"def",
"_res",
"(",
"ctx",
")",
":",
"_arg",
"=",
"arg",
"(",
"ctx",
")",
"if",
"callable",
"(",
"arg",
")",
"else",
"arg",
"return",
"I",
"(",
"arg",
")",
"return",
"_res"
]
| Convert the argument into an IRI ref | [
"Convert",
"the",
"argument",
"into",
"an",
"IRI",
"ref"
]
| f092ffc7ed363a5b170890955168500f32de0dd5 | https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/pipeline/core_actions.py#L322-L329 | train |
ronhanson/python-tbx | tbx/code.py | static_singleton | def static_singleton(*args, **kwargs):
"""
STATIC Singleton Design Pattern Decorator
Class is initialized with arguments passed into the decorator.
:Usage:
>>> @static_singleton('yop')
class Bob(Person):
def __init__(arg1):
self.info = arg1
def says(self):
print self.info
b1 = Bob #note that we call it by the name of the class, no instance created here, kind of static linking to an instance
b2 = Bob #here b1 is the same object as b2
Bob.says() # it will display 'yop'
"""
def __static_singleton_wrapper(cls):
if cls not in __singleton_instances:
__singleton_instances[cls] = cls(*args, **kwargs)
return __singleton_instances[cls]
return __static_singleton_wrapper | python | def static_singleton(*args, **kwargs):
"""
STATIC Singleton Design Pattern Decorator
Class is initialized with arguments passed into the decorator.
:Usage:
>>> @static_singleton('yop')
class Bob(Person):
def __init__(arg1):
self.info = arg1
def says(self):
print self.info
b1 = Bob #note that we call it by the name of the class, no instance created here, kind of static linking to an instance
b2 = Bob #here b1 is the same object as b2
Bob.says() # it will display 'yop'
"""
def __static_singleton_wrapper(cls):
if cls not in __singleton_instances:
__singleton_instances[cls] = cls(*args, **kwargs)
return __singleton_instances[cls]
return __static_singleton_wrapper | [
"def",
"static_singleton",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"__static_singleton_wrapper",
"(",
"cls",
")",
":",
"if",
"cls",
"not",
"in",
"__singleton_instances",
":",
"__singleton_instances",
"[",
"cls",
"]",
"=",
"cls",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"__singleton_instances",
"[",
"cls",
"]",
"return",
"__static_singleton_wrapper"
]
| STATIC Singleton Design Pattern Decorator
Class is initialized with arguments passed into the decorator.
:Usage:
>>> @static_singleton('yop')
class Bob(Person):
def __init__(arg1):
self.info = arg1
def says(self):
print self.info
b1 = Bob #note that we call it by the name of the class, no instance created here, kind of static linking to an instance
b2 = Bob #here b1 is the same object as b2
Bob.says() # it will display 'yop' | [
"STATIC",
"Singleton",
"Design",
"Pattern",
"Decorator",
"Class",
"is",
"initialized",
"with",
"arguments",
"passed",
"into",
"the",
"decorator",
"."
]
| 87f72ae0cadecafbcd144f1e930181fba77f6b83 | https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/code.py#L19-L42 | train |
ronhanson/python-tbx | tbx/code.py | get_method_documentation | def get_method_documentation(method):
"""
This function uses "inspect" to retrieve information about a method.
Also, if you place comment on the method, method can be docummented with "reStructured Text".
:param method: method to describe
:returns:
{
'name' : <string> - name of the method,
'friendly_name' : <string> - friendly name of the method,
'parameters' : {
'required' : [ 'param1', 'param2' ],
'optionnal' : {
'param3' : 'default_value3',
'param4' : 'default_value4',
},
'help' : {
'summary' : <string> - Summary - general description like in the comment,
'parameters' : {
'param1' : 'description',
'param2' : 'description',
},
'return' : <string> - Can be multiline,
}
}
"""
from inspect import getargspec
result = {
'name': method.__name__,
'friendly_name': ' '.join([name.capitalize() for name in method.__name__.split('_')]),
}
arg_specs = getargspec(method)
arguments = {}
if not arg_specs.defaults:
if len(arg_specs.args[1:]) > 0:
arguments['required'] = list(arg_specs.args[1:])
else:
if len(arg_specs.args[1:-(len(arg_specs.defaults))]):
arguments['required'] = list(arg_specs.args[1:-(len(arg_specs.defaults))])
arguments['optional'] = {}
for i in range(len(arg_specs.defaults)):
arguments['optional'][arg_specs.args[-(len(arg_specs.defaults)) + i]] = arg_specs.defaults[i]
if arguments != {}:
result['parameters'] = arguments
doc = method.__doc__.strip() if method.__doc__ else ''
if ':' in method.__doc__:
doc = {'summary': method.__doc__[0:doc.find(' :')].strip()}
params = re.findall(r":param ([^\s]*): (.*)\n", method.__doc__)
if len(params) > 0:
doc['parameters'] = {}
for param in params:
doc['parameters'][param[0]] = param[1].strip()
regex = re.compile(r":returns:(.*)", re.MULTILINE | re.DOTALL)
returns = regex.search(method.__doc__)
if returns and returns.group(0):
doc['return'] = returns.group(0).replace(':returns:', '').replace('\n ', '\n').strip()
if doc != '':
result['help'] = doc
return result | python | def get_method_documentation(method):
"""
This function uses "inspect" to retrieve information about a method.
Also, if you place comment on the method, method can be docummented with "reStructured Text".
:param method: method to describe
:returns:
{
'name' : <string> - name of the method,
'friendly_name' : <string> - friendly name of the method,
'parameters' : {
'required' : [ 'param1', 'param2' ],
'optionnal' : {
'param3' : 'default_value3',
'param4' : 'default_value4',
},
'help' : {
'summary' : <string> - Summary - general description like in the comment,
'parameters' : {
'param1' : 'description',
'param2' : 'description',
},
'return' : <string> - Can be multiline,
}
}
"""
from inspect import getargspec
result = {
'name': method.__name__,
'friendly_name': ' '.join([name.capitalize() for name in method.__name__.split('_')]),
}
arg_specs = getargspec(method)
arguments = {}
if not arg_specs.defaults:
if len(arg_specs.args[1:]) > 0:
arguments['required'] = list(arg_specs.args[1:])
else:
if len(arg_specs.args[1:-(len(arg_specs.defaults))]):
arguments['required'] = list(arg_specs.args[1:-(len(arg_specs.defaults))])
arguments['optional'] = {}
for i in range(len(arg_specs.defaults)):
arguments['optional'][arg_specs.args[-(len(arg_specs.defaults)) + i]] = arg_specs.defaults[i]
if arguments != {}:
result['parameters'] = arguments
doc = method.__doc__.strip() if method.__doc__ else ''
if ':' in method.__doc__:
doc = {'summary': method.__doc__[0:doc.find(' :')].strip()}
params = re.findall(r":param ([^\s]*): (.*)\n", method.__doc__)
if len(params) > 0:
doc['parameters'] = {}
for param in params:
doc['parameters'][param[0]] = param[1].strip()
regex = re.compile(r":returns:(.*)", re.MULTILINE | re.DOTALL)
returns = regex.search(method.__doc__)
if returns and returns.group(0):
doc['return'] = returns.group(0).replace(':returns:', '').replace('\n ', '\n').strip()
if doc != '':
result['help'] = doc
return result | [
"def",
"get_method_documentation",
"(",
"method",
")",
":",
"from",
"inspect",
"import",
"getargspec",
"result",
"=",
"{",
"'name'",
":",
"method",
".",
"__name__",
",",
"'friendly_name'",
":",
"' '",
".",
"join",
"(",
"[",
"name",
".",
"capitalize",
"(",
")",
"for",
"name",
"in",
"method",
".",
"__name__",
".",
"split",
"(",
"'_'",
")",
"]",
")",
",",
"}",
"arg_specs",
"=",
"getargspec",
"(",
"method",
")",
"arguments",
"=",
"{",
"}",
"if",
"not",
"arg_specs",
".",
"defaults",
":",
"if",
"len",
"(",
"arg_specs",
".",
"args",
"[",
"1",
":",
"]",
")",
">",
"0",
":",
"arguments",
"[",
"'required'",
"]",
"=",
"list",
"(",
"arg_specs",
".",
"args",
"[",
"1",
":",
"]",
")",
"else",
":",
"if",
"len",
"(",
"arg_specs",
".",
"args",
"[",
"1",
":",
"-",
"(",
"len",
"(",
"arg_specs",
".",
"defaults",
")",
")",
"]",
")",
":",
"arguments",
"[",
"'required'",
"]",
"=",
"list",
"(",
"arg_specs",
".",
"args",
"[",
"1",
":",
"-",
"(",
"len",
"(",
"arg_specs",
".",
"defaults",
")",
")",
"]",
")",
"arguments",
"[",
"'optional'",
"]",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"arg_specs",
".",
"defaults",
")",
")",
":",
"arguments",
"[",
"'optional'",
"]",
"[",
"arg_specs",
".",
"args",
"[",
"-",
"(",
"len",
"(",
"arg_specs",
".",
"defaults",
")",
")",
"+",
"i",
"]",
"]",
"=",
"arg_specs",
".",
"defaults",
"[",
"i",
"]",
"if",
"arguments",
"!=",
"{",
"}",
":",
"result",
"[",
"'parameters'",
"]",
"=",
"arguments",
"doc",
"=",
"method",
".",
"__doc__",
".",
"strip",
"(",
")",
"if",
"method",
".",
"__doc__",
"else",
"''",
"if",
"':'",
"in",
"method",
".",
"__doc__",
":",
"doc",
"=",
"{",
"'summary'",
":",
"method",
".",
"__doc__",
"[",
"0",
":",
"doc",
".",
"find",
"(",
"' :'",
")",
"]",
".",
"strip",
"(",
")",
"}",
"params",
"=",
"re",
".",
"findall",
"(",
"r\":param ([^\\s]*): (.*)\\n\"",
",",
"method",
".",
"__doc__",
")",
"if",
"len",
"(",
"params",
")",
">",
"0",
":",
"doc",
"[",
"'parameters'",
"]",
"=",
"{",
"}",
"for",
"param",
"in",
"params",
":",
"doc",
"[",
"'parameters'",
"]",
"[",
"param",
"[",
"0",
"]",
"]",
"=",
"param",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"regex",
"=",
"re",
".",
"compile",
"(",
"r\":returns:(.*)\"",
",",
"re",
".",
"MULTILINE",
"|",
"re",
".",
"DOTALL",
")",
"returns",
"=",
"regex",
".",
"search",
"(",
"method",
".",
"__doc__",
")",
"if",
"returns",
"and",
"returns",
".",
"group",
"(",
"0",
")",
":",
"doc",
"[",
"'return'",
"]",
"=",
"returns",
".",
"group",
"(",
"0",
")",
".",
"replace",
"(",
"':returns:'",
",",
"''",
")",
".",
"replace",
"(",
"'\\n '",
",",
"'\\n'",
")",
".",
"strip",
"(",
")",
"if",
"doc",
"!=",
"''",
":",
"result",
"[",
"'help'",
"]",
"=",
"doc",
"return",
"result"
]
| This function uses "inspect" to retrieve information about a method.
Also, if you place comment on the method, method can be docummented with "reStructured Text".
:param method: method to describe
:returns:
{
'name' : <string> - name of the method,
'friendly_name' : <string> - friendly name of the method,
'parameters' : {
'required' : [ 'param1', 'param2' ],
'optionnal' : {
'param3' : 'default_value3',
'param4' : 'default_value4',
},
'help' : {
'summary' : <string> - Summary - general description like in the comment,
'parameters' : {
'param1' : 'description',
'param2' : 'description',
},
'return' : <string> - Can be multiline,
}
} | [
"This",
"function",
"uses",
"inspect",
"to",
"retrieve",
"information",
"about",
"a",
"method",
"."
]
| 87f72ae0cadecafbcd144f1e930181fba77f6b83 | https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/code.py#L145-L208 | train |
ronhanson/python-tbx | tbx/code.py | sort_dictionary_list | def sort_dictionary_list(dict_list, sort_key):
"""
sorts a list of dictionaries based on the value of the sort_key
dict_list - a list of dictionaries
sort_key - a string that identifies the key to sort the dictionaries with.
Test sorting a list of dictionaries:
>>> sort_dictionary_list([{'b' : 1, 'value' : 2}, {'c' : 2, 'value' : 3}, {'a' : 3, 'value' : 1}], 'value')
[{'a': 3, 'value': 1}, {'b': 1, 'value': 2}, {'c': 2, 'value': 3}]
"""
if not dict_list or len(dict_list) == 0:
return dict_list
dict_list.sort(key=itemgetter(sort_key))
return dict_list | python | def sort_dictionary_list(dict_list, sort_key):
"""
sorts a list of dictionaries based on the value of the sort_key
dict_list - a list of dictionaries
sort_key - a string that identifies the key to sort the dictionaries with.
Test sorting a list of dictionaries:
>>> sort_dictionary_list([{'b' : 1, 'value' : 2}, {'c' : 2, 'value' : 3}, {'a' : 3, 'value' : 1}], 'value')
[{'a': 3, 'value': 1}, {'b': 1, 'value': 2}, {'c': 2, 'value': 3}]
"""
if not dict_list or len(dict_list) == 0:
return dict_list
dict_list.sort(key=itemgetter(sort_key))
return dict_list | [
"def",
"sort_dictionary_list",
"(",
"dict_list",
",",
"sort_key",
")",
":",
"if",
"not",
"dict_list",
"or",
"len",
"(",
"dict_list",
")",
"==",
"0",
":",
"return",
"dict_list",
"dict_list",
".",
"sort",
"(",
"key",
"=",
"itemgetter",
"(",
"sort_key",
")",
")",
"return",
"dict_list"
]
| sorts a list of dictionaries based on the value of the sort_key
dict_list - a list of dictionaries
sort_key - a string that identifies the key to sort the dictionaries with.
Test sorting a list of dictionaries:
>>> sort_dictionary_list([{'b' : 1, 'value' : 2}, {'c' : 2, 'value' : 3}, {'a' : 3, 'value' : 1}], 'value')
[{'a': 3, 'value': 1}, {'b': 1, 'value': 2}, {'c': 2, 'value': 3}] | [
"sorts",
"a",
"list",
"of",
"dictionaries",
"based",
"on",
"the",
"value",
"of",
"the",
"sort_key"
]
| 87f72ae0cadecafbcd144f1e930181fba77f6b83 | https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/code.py#L313-L327 | train |
ronhanson/python-tbx | tbx/code.py | SerializableObject.safe_info | def safe_info(self, dic=None):
"""
Returns public information of the object
"""
if dic is None and dic != {}:
dic = self.to_dict()
output = {}
for (key, value) in dic.items():
if key[0] != '_':
if isinstance(value, SerializableObject):
output[key] = value.safe_info()
elif isinstance(value, dict):
output[key] = self.safe_info(dic=value)
elif isinstance(value, list):
output[key] = []
for f in value:
if isinstance(f, SerializableObject):
output[key].append(f.safe_info())
elif isinstance(f, dict):
output[key].append(self.safe_info(dic=f))
else:
output[key].append(f)
else:
output[key] = value
return output | python | def safe_info(self, dic=None):
"""
Returns public information of the object
"""
if dic is None and dic != {}:
dic = self.to_dict()
output = {}
for (key, value) in dic.items():
if key[0] != '_':
if isinstance(value, SerializableObject):
output[key] = value.safe_info()
elif isinstance(value, dict):
output[key] = self.safe_info(dic=value)
elif isinstance(value, list):
output[key] = []
for f in value:
if isinstance(f, SerializableObject):
output[key].append(f.safe_info())
elif isinstance(f, dict):
output[key].append(self.safe_info(dic=f))
else:
output[key].append(f)
else:
output[key] = value
return output | [
"def",
"safe_info",
"(",
"self",
",",
"dic",
"=",
"None",
")",
":",
"if",
"dic",
"is",
"None",
"and",
"dic",
"!=",
"{",
"}",
":",
"dic",
"=",
"self",
".",
"to_dict",
"(",
")",
"output",
"=",
"{",
"}",
"for",
"(",
"key",
",",
"value",
")",
"in",
"dic",
".",
"items",
"(",
")",
":",
"if",
"key",
"[",
"0",
"]",
"!=",
"'_'",
":",
"if",
"isinstance",
"(",
"value",
",",
"SerializableObject",
")",
":",
"output",
"[",
"key",
"]",
"=",
"value",
".",
"safe_info",
"(",
")",
"elif",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"output",
"[",
"key",
"]",
"=",
"self",
".",
"safe_info",
"(",
"dic",
"=",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"output",
"[",
"key",
"]",
"=",
"[",
"]",
"for",
"f",
"in",
"value",
":",
"if",
"isinstance",
"(",
"f",
",",
"SerializableObject",
")",
":",
"output",
"[",
"key",
"]",
".",
"append",
"(",
"f",
".",
"safe_info",
"(",
")",
")",
"elif",
"isinstance",
"(",
"f",
",",
"dict",
")",
":",
"output",
"[",
"key",
"]",
".",
"append",
"(",
"self",
".",
"safe_info",
"(",
"dic",
"=",
"f",
")",
")",
"else",
":",
"output",
"[",
"key",
"]",
".",
"append",
"(",
"f",
")",
"else",
":",
"output",
"[",
"key",
"]",
"=",
"value",
"return",
"output"
]
| Returns public information of the object | [
"Returns",
"public",
"information",
"of",
"the",
"object"
]
| 87f72ae0cadecafbcd144f1e930181fba77f6b83 | https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/code.py#L286-L310 | train |
projectshift/shift-boiler | boiler/cli/cli.py | run | def run(host='0.0.0.0', port=5000, reload=True, debug=True):
""" Run development server """
from werkzeug.serving import run_simple
app = bootstrap.get_app()
return run_simple(
hostname=host,
port=port,
application=app,
use_reloader=reload,
use_debugger=debug,
) | python | def run(host='0.0.0.0', port=5000, reload=True, debug=True):
""" Run development server """
from werkzeug.serving import run_simple
app = bootstrap.get_app()
return run_simple(
hostname=host,
port=port,
application=app,
use_reloader=reload,
use_debugger=debug,
) | [
"def",
"run",
"(",
"host",
"=",
"'0.0.0.0'",
",",
"port",
"=",
"5000",
",",
"reload",
"=",
"True",
",",
"debug",
"=",
"True",
")",
":",
"from",
"werkzeug",
".",
"serving",
"import",
"run_simple",
"app",
"=",
"bootstrap",
".",
"get_app",
"(",
")",
"return",
"run_simple",
"(",
"hostname",
"=",
"host",
",",
"port",
"=",
"port",
",",
"application",
"=",
"app",
",",
"use_reloader",
"=",
"reload",
",",
"use_debugger",
"=",
"debug",
",",
")"
]
| Run development server | [
"Run",
"development",
"server"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/cli/cli.py#L26-L37 | train |
projectshift/shift-boiler | boiler/cli/cli.py | shell | def shell():
""" Start application-aware shell """
app = bootstrap.get_app()
context = dict(app=app)
# and push app context
app_context = app.app_context()
app_context.push()
# got ipython?
ipython = importlib.util.find_spec("IPython")
# run now
if ipython:
from IPython import embed
embed(user_ns=context)
else:
import code
code.interact(local=context) | python | def shell():
""" Start application-aware shell """
app = bootstrap.get_app()
context = dict(app=app)
# and push app context
app_context = app.app_context()
app_context.push()
# got ipython?
ipython = importlib.util.find_spec("IPython")
# run now
if ipython:
from IPython import embed
embed(user_ns=context)
else:
import code
code.interact(local=context) | [
"def",
"shell",
"(",
")",
":",
"app",
"=",
"bootstrap",
".",
"get_app",
"(",
")",
"context",
"=",
"dict",
"(",
"app",
"=",
"app",
")",
"# and push app context",
"app_context",
"=",
"app",
".",
"app_context",
"(",
")",
"app_context",
".",
"push",
"(",
")",
"# got ipython?",
"ipython",
"=",
"importlib",
".",
"util",
".",
"find_spec",
"(",
"\"IPython\"",
")",
"# run now",
"if",
"ipython",
":",
"from",
"IPython",
"import",
"embed",
"embed",
"(",
"user_ns",
"=",
"context",
")",
"else",
":",
"import",
"code",
"code",
".",
"interact",
"(",
"local",
"=",
"context",
")"
]
| Start application-aware shell | [
"Start",
"application",
"-",
"aware",
"shell"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/cli/cli.py#L41-L59 | train |
mardix/Mocha | mocha/render.py | SiteNavigation._push | def _push(self, title, view, class_name, is_class, **kwargs):
""" Push nav data stack """
# Set the page title
set_view_attr(view, "title", title, cls_name=class_name)
module_name = view.__module__
method_name = view.__name__
_endpoint = build_endpoint_route_name(view, "index" if is_class else method_name, class_name)
endpoint = kwargs.pop("endpoint", _endpoint)
kwargs.setdefault("endpoint_kwargs", {})
order = kwargs.pop("order", 0)
# Tags
_nav_tags = get_view_attr(view, "nav_tags", ["default"], cls_name=class_name)
tags = kwargs.pop("tags", _nav_tags)
if not isinstance(tags, list):
_ = tags
tags = [_]
kwargs["tags"] = tags
# visible: accepts a bool or list of callback to execute
visible = kwargs.pop("visible", [True])
if not isinstance(visible, list):
visible = [visible]
if get_view_attr(view, "nav_visible", cls_name=class_name) is False:
visible = False
kwargs["view"] = view
kwargs["visible"] = visible
kwargs["active"] = False
kwargs["key"] = class_name
if is_class: # class menu
kwargs["endpoint"] = endpoint
kwargs["has_subnav"] = True
else:
kwargs["has_subnav"] = False
kwargs.update({
"order": order,
"has_subnav": False,
"title": title,
"endpoint": endpoint,
})
self._title_map[endpoint] = title
path = "%s.%s" % (module_name, method_name if is_class else class_name)
attach_to = kwargs.pop("attach_to", [])
if not attach_to:
attach_to.append(path)
for path in attach_to:
if path not in self.MENU:
self.MENU[path] = {
"title": None,
"endpoint": None,
"endpoint_kwargs": {},
"order": None,
"subnav": [],
"kwargs": {}
}
if is_class: # class menu
self.MENU[path]["title"] = title
self.MENU[path]["order"] = order
self.MENU[path]["kwargs"] = kwargs
else: # sub menu
self.MENU[path]["subnav"].append(kwargs) | python | def _push(self, title, view, class_name, is_class, **kwargs):
""" Push nav data stack """
# Set the page title
set_view_attr(view, "title", title, cls_name=class_name)
module_name = view.__module__
method_name = view.__name__
_endpoint = build_endpoint_route_name(view, "index" if is_class else method_name, class_name)
endpoint = kwargs.pop("endpoint", _endpoint)
kwargs.setdefault("endpoint_kwargs", {})
order = kwargs.pop("order", 0)
# Tags
_nav_tags = get_view_attr(view, "nav_tags", ["default"], cls_name=class_name)
tags = kwargs.pop("tags", _nav_tags)
if not isinstance(tags, list):
_ = tags
tags = [_]
kwargs["tags"] = tags
# visible: accepts a bool or list of callback to execute
visible = kwargs.pop("visible", [True])
if not isinstance(visible, list):
visible = [visible]
if get_view_attr(view, "nav_visible", cls_name=class_name) is False:
visible = False
kwargs["view"] = view
kwargs["visible"] = visible
kwargs["active"] = False
kwargs["key"] = class_name
if is_class: # class menu
kwargs["endpoint"] = endpoint
kwargs["has_subnav"] = True
else:
kwargs["has_subnav"] = False
kwargs.update({
"order": order,
"has_subnav": False,
"title": title,
"endpoint": endpoint,
})
self._title_map[endpoint] = title
path = "%s.%s" % (module_name, method_name if is_class else class_name)
attach_to = kwargs.pop("attach_to", [])
if not attach_to:
attach_to.append(path)
for path in attach_to:
if path not in self.MENU:
self.MENU[path] = {
"title": None,
"endpoint": None,
"endpoint_kwargs": {},
"order": None,
"subnav": [],
"kwargs": {}
}
if is_class: # class menu
self.MENU[path]["title"] = title
self.MENU[path]["order"] = order
self.MENU[path]["kwargs"] = kwargs
else: # sub menu
self.MENU[path]["subnav"].append(kwargs) | [
"def",
"_push",
"(",
"self",
",",
"title",
",",
"view",
",",
"class_name",
",",
"is_class",
",",
"*",
"*",
"kwargs",
")",
":",
"# Set the page title",
"set_view_attr",
"(",
"view",
",",
"\"title\"",
",",
"title",
",",
"cls_name",
"=",
"class_name",
")",
"module_name",
"=",
"view",
".",
"__module__",
"method_name",
"=",
"view",
".",
"__name__",
"_endpoint",
"=",
"build_endpoint_route_name",
"(",
"view",
",",
"\"index\"",
"if",
"is_class",
"else",
"method_name",
",",
"class_name",
")",
"endpoint",
"=",
"kwargs",
".",
"pop",
"(",
"\"endpoint\"",
",",
"_endpoint",
")",
"kwargs",
".",
"setdefault",
"(",
"\"endpoint_kwargs\"",
",",
"{",
"}",
")",
"order",
"=",
"kwargs",
".",
"pop",
"(",
"\"order\"",
",",
"0",
")",
"# Tags",
"_nav_tags",
"=",
"get_view_attr",
"(",
"view",
",",
"\"nav_tags\"",
",",
"[",
"\"default\"",
"]",
",",
"cls_name",
"=",
"class_name",
")",
"tags",
"=",
"kwargs",
".",
"pop",
"(",
"\"tags\"",
",",
"_nav_tags",
")",
"if",
"not",
"isinstance",
"(",
"tags",
",",
"list",
")",
":",
"_",
"=",
"tags",
"tags",
"=",
"[",
"_",
"]",
"kwargs",
"[",
"\"tags\"",
"]",
"=",
"tags",
"# visible: accepts a bool or list of callback to execute",
"visible",
"=",
"kwargs",
".",
"pop",
"(",
"\"visible\"",
",",
"[",
"True",
"]",
")",
"if",
"not",
"isinstance",
"(",
"visible",
",",
"list",
")",
":",
"visible",
"=",
"[",
"visible",
"]",
"if",
"get_view_attr",
"(",
"view",
",",
"\"nav_visible\"",
",",
"cls_name",
"=",
"class_name",
")",
"is",
"False",
":",
"visible",
"=",
"False",
"kwargs",
"[",
"\"view\"",
"]",
"=",
"view",
"kwargs",
"[",
"\"visible\"",
"]",
"=",
"visible",
"kwargs",
"[",
"\"active\"",
"]",
"=",
"False",
"kwargs",
"[",
"\"key\"",
"]",
"=",
"class_name",
"if",
"is_class",
":",
"# class menu",
"kwargs",
"[",
"\"endpoint\"",
"]",
"=",
"endpoint",
"kwargs",
"[",
"\"has_subnav\"",
"]",
"=",
"True",
"else",
":",
"kwargs",
"[",
"\"has_subnav\"",
"]",
"=",
"False",
"kwargs",
".",
"update",
"(",
"{",
"\"order\"",
":",
"order",
",",
"\"has_subnav\"",
":",
"False",
",",
"\"title\"",
":",
"title",
",",
"\"endpoint\"",
":",
"endpoint",
",",
"}",
")",
"self",
".",
"_title_map",
"[",
"endpoint",
"]",
"=",
"title",
"path",
"=",
"\"%s.%s\"",
"%",
"(",
"module_name",
",",
"method_name",
"if",
"is_class",
"else",
"class_name",
")",
"attach_to",
"=",
"kwargs",
".",
"pop",
"(",
"\"attach_to\"",
",",
"[",
"]",
")",
"if",
"not",
"attach_to",
":",
"attach_to",
".",
"append",
"(",
"path",
")",
"for",
"path",
"in",
"attach_to",
":",
"if",
"path",
"not",
"in",
"self",
".",
"MENU",
":",
"self",
".",
"MENU",
"[",
"path",
"]",
"=",
"{",
"\"title\"",
":",
"None",
",",
"\"endpoint\"",
":",
"None",
",",
"\"endpoint_kwargs\"",
":",
"{",
"}",
",",
"\"order\"",
":",
"None",
",",
"\"subnav\"",
":",
"[",
"]",
",",
"\"kwargs\"",
":",
"{",
"}",
"}",
"if",
"is_class",
":",
"# class menu",
"self",
".",
"MENU",
"[",
"path",
"]",
"[",
"\"title\"",
"]",
"=",
"title",
"self",
".",
"MENU",
"[",
"path",
"]",
"[",
"\"order\"",
"]",
"=",
"order",
"self",
".",
"MENU",
"[",
"path",
"]",
"[",
"\"kwargs\"",
"]",
"=",
"kwargs",
"else",
":",
"# sub menu",
"self",
".",
"MENU",
"[",
"path",
"]",
"[",
"\"subnav\"",
"]",
".",
"append",
"(",
"kwargs",
")"
]
| Push nav data stack | [
"Push",
"nav",
"data",
"stack"
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/render.py#L329-L400 | train |
mardix/Mocha | mocha/render.py | SiteNavigation.render | def render(self):
""" Render the menu into a sorted by order multi dict """
menu_list = []
menu_index = 0
for _, menu in copy.deepcopy(self.MENU).items():
subnav = []
menu["kwargs"]["_id"] = str(menu_index)
menu["kwargs"]["active"] = False
if "visible" in menu["kwargs"]:
menu["kwargs"]["visible"] = self._test_visibility(menu["kwargs"]["visible"])
for s in menu["subnav"]:
if s["title"]:
s["title"] = self._get_title(s["title"])
if s["endpoint"] == request.endpoint:
s["active"] = True
menu["kwargs"]["active"] = True
s["visible"] = self._test_visibility(s["visible"])
menu_index += 1
s["_id"] = str(menu_index)
subnav.append(s)
_kwargs = menu["kwargs"]
if menu["title"]:
_kwargs.update({
"subnav": self._sort(subnav),
"order": menu["order"],
"title": self._get_title(menu["title"])
})
menu_list.append(_kwargs)
else:
menu_list += subnav
menu_index += 1
return self._sort(menu_list) | python | def render(self):
""" Render the menu into a sorted by order multi dict """
menu_list = []
menu_index = 0
for _, menu in copy.deepcopy(self.MENU).items():
subnav = []
menu["kwargs"]["_id"] = str(menu_index)
menu["kwargs"]["active"] = False
if "visible" in menu["kwargs"]:
menu["kwargs"]["visible"] = self._test_visibility(menu["kwargs"]["visible"])
for s in menu["subnav"]:
if s["title"]:
s["title"] = self._get_title(s["title"])
if s["endpoint"] == request.endpoint:
s["active"] = True
menu["kwargs"]["active"] = True
s["visible"] = self._test_visibility(s["visible"])
menu_index += 1
s["_id"] = str(menu_index)
subnav.append(s)
_kwargs = menu["kwargs"]
if menu["title"]:
_kwargs.update({
"subnav": self._sort(subnav),
"order": menu["order"],
"title": self._get_title(menu["title"])
})
menu_list.append(_kwargs)
else:
menu_list += subnav
menu_index += 1
return self._sort(menu_list) | [
"def",
"render",
"(",
"self",
")",
":",
"menu_list",
"=",
"[",
"]",
"menu_index",
"=",
"0",
"for",
"_",
",",
"menu",
"in",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"MENU",
")",
".",
"items",
"(",
")",
":",
"subnav",
"=",
"[",
"]",
"menu",
"[",
"\"kwargs\"",
"]",
"[",
"\"_id\"",
"]",
"=",
"str",
"(",
"menu_index",
")",
"menu",
"[",
"\"kwargs\"",
"]",
"[",
"\"active\"",
"]",
"=",
"False",
"if",
"\"visible\"",
"in",
"menu",
"[",
"\"kwargs\"",
"]",
":",
"menu",
"[",
"\"kwargs\"",
"]",
"[",
"\"visible\"",
"]",
"=",
"self",
".",
"_test_visibility",
"(",
"menu",
"[",
"\"kwargs\"",
"]",
"[",
"\"visible\"",
"]",
")",
"for",
"s",
"in",
"menu",
"[",
"\"subnav\"",
"]",
":",
"if",
"s",
"[",
"\"title\"",
"]",
":",
"s",
"[",
"\"title\"",
"]",
"=",
"self",
".",
"_get_title",
"(",
"s",
"[",
"\"title\"",
"]",
")",
"if",
"s",
"[",
"\"endpoint\"",
"]",
"==",
"request",
".",
"endpoint",
":",
"s",
"[",
"\"active\"",
"]",
"=",
"True",
"menu",
"[",
"\"kwargs\"",
"]",
"[",
"\"active\"",
"]",
"=",
"True",
"s",
"[",
"\"visible\"",
"]",
"=",
"self",
".",
"_test_visibility",
"(",
"s",
"[",
"\"visible\"",
"]",
")",
"menu_index",
"+=",
"1",
"s",
"[",
"\"_id\"",
"]",
"=",
"str",
"(",
"menu_index",
")",
"subnav",
".",
"append",
"(",
"s",
")",
"_kwargs",
"=",
"menu",
"[",
"\"kwargs\"",
"]",
"if",
"menu",
"[",
"\"title\"",
"]",
":",
"_kwargs",
".",
"update",
"(",
"{",
"\"subnav\"",
":",
"self",
".",
"_sort",
"(",
"subnav",
")",
",",
"\"order\"",
":",
"menu",
"[",
"\"order\"",
"]",
",",
"\"title\"",
":",
"self",
".",
"_get_title",
"(",
"menu",
"[",
"\"title\"",
"]",
")",
"}",
")",
"menu_list",
".",
"append",
"(",
"_kwargs",
")",
"else",
":",
"menu_list",
"+=",
"subnav",
"menu_index",
"+=",
"1",
"return",
"self",
".",
"_sort",
"(",
"menu_list",
")"
]
| Render the menu into a sorted by order multi dict | [
"Render",
"the",
"menu",
"into",
"a",
"sorted",
"by",
"order",
"multi",
"dict"
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/render.py#L425-L462 | train |
PBR/MQ2 | MQ2/add_qtl_to_map.py | add_qtl_to_marker | def add_qtl_to_marker(marker, qtls):
"""Add the number of QTLs found for a given marker.
:arg marker, the marker we are looking for the QTL's.
:arg qtls, the list of all QTLs found.
"""
cnt = 0
for qtl in qtls:
if qtl[-1] == marker[0]:
cnt = cnt + 1
marker.append(str(cnt))
return marker | python | def add_qtl_to_marker(marker, qtls):
"""Add the number of QTLs found for a given marker.
:arg marker, the marker we are looking for the QTL's.
:arg qtls, the list of all QTLs found.
"""
cnt = 0
for qtl in qtls:
if qtl[-1] == marker[0]:
cnt = cnt + 1
marker.append(str(cnt))
return marker | [
"def",
"add_qtl_to_marker",
"(",
"marker",
",",
"qtls",
")",
":",
"cnt",
"=",
"0",
"for",
"qtl",
"in",
"qtls",
":",
"if",
"qtl",
"[",
"-",
"1",
"]",
"==",
"marker",
"[",
"0",
"]",
":",
"cnt",
"=",
"cnt",
"+",
"1",
"marker",
".",
"append",
"(",
"str",
"(",
"cnt",
")",
")",
"return",
"marker"
]
| Add the number of QTLs found for a given marker.
:arg marker, the marker we are looking for the QTL's.
:arg qtls, the list of all QTLs found. | [
"Add",
"the",
"number",
"of",
"QTLs",
"found",
"for",
"a",
"given",
"marker",
"."
]
| 6d84dea47e6751333004743f588f03158e35c28d | https://github.com/PBR/MQ2/blob/6d84dea47e6751333004743f588f03158e35c28d/MQ2/add_qtl_to_map.py#L38-L51 | train |
PBR/MQ2 | MQ2/add_qtl_to_map.py | add_qtl_to_map | def add_qtl_to_map(qtlfile, mapfile, outputfile='map_with_qtls.csv'):
""" This function adds to a genetic map for each marker the number
of significant QTLs found.
:arg qtlfile, the output from MapQTL transformed to a csv file via
'parse_mapqtl_file' which contains the closest markers.
:arg mapfile, the genetic map with all the markers.
:kwarg outputfile, the name of the output file in which the map will
be written.
"""
qtl_list = read_input_file(qtlfile, ',')
map_list = read_input_file(mapfile, ',')
map_list[0].append('# QTLs')
markers = []
markers.append(map_list[0])
qtl_cnt = 0
for marker in map_list[1:]:
markers.append(add_qtl_to_marker(marker, qtl_list[1:]))
qtl_cnt = qtl_cnt + int(markers[-1][-1])
LOG.info('- %s markers processed in %s' % (len(markers), mapfile))
LOG.info('- %s QTLs located in the map: %s' % (qtl_cnt, outputfile))
write_matrix(outputfile, markers) | python | def add_qtl_to_map(qtlfile, mapfile, outputfile='map_with_qtls.csv'):
""" This function adds to a genetic map for each marker the number
of significant QTLs found.
:arg qtlfile, the output from MapQTL transformed to a csv file via
'parse_mapqtl_file' which contains the closest markers.
:arg mapfile, the genetic map with all the markers.
:kwarg outputfile, the name of the output file in which the map will
be written.
"""
qtl_list = read_input_file(qtlfile, ',')
map_list = read_input_file(mapfile, ',')
map_list[0].append('# QTLs')
markers = []
markers.append(map_list[0])
qtl_cnt = 0
for marker in map_list[1:]:
markers.append(add_qtl_to_marker(marker, qtl_list[1:]))
qtl_cnt = qtl_cnt + int(markers[-1][-1])
LOG.info('- %s markers processed in %s' % (len(markers), mapfile))
LOG.info('- %s QTLs located in the map: %s' % (qtl_cnt, outputfile))
write_matrix(outputfile, markers) | [
"def",
"add_qtl_to_map",
"(",
"qtlfile",
",",
"mapfile",
",",
"outputfile",
"=",
"'map_with_qtls.csv'",
")",
":",
"qtl_list",
"=",
"read_input_file",
"(",
"qtlfile",
",",
"','",
")",
"map_list",
"=",
"read_input_file",
"(",
"mapfile",
",",
"','",
")",
"map_list",
"[",
"0",
"]",
".",
"append",
"(",
"'# QTLs'",
")",
"markers",
"=",
"[",
"]",
"markers",
".",
"append",
"(",
"map_list",
"[",
"0",
"]",
")",
"qtl_cnt",
"=",
"0",
"for",
"marker",
"in",
"map_list",
"[",
"1",
":",
"]",
":",
"markers",
".",
"append",
"(",
"add_qtl_to_marker",
"(",
"marker",
",",
"qtl_list",
"[",
"1",
":",
"]",
")",
")",
"qtl_cnt",
"=",
"qtl_cnt",
"+",
"int",
"(",
"markers",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
")",
"LOG",
".",
"info",
"(",
"'- %s markers processed in %s'",
"%",
"(",
"len",
"(",
"markers",
")",
",",
"mapfile",
")",
")",
"LOG",
".",
"info",
"(",
"'- %s QTLs located in the map: %s'",
"%",
"(",
"qtl_cnt",
",",
"outputfile",
")",
")",
"write_matrix",
"(",
"outputfile",
",",
"markers",
")"
]
| This function adds to a genetic map for each marker the number
of significant QTLs found.
:arg qtlfile, the output from MapQTL transformed to a csv file via
'parse_mapqtl_file' which contains the closest markers.
:arg mapfile, the genetic map with all the markers.
:kwarg outputfile, the name of the output file in which the map will
be written. | [
"This",
"function",
"adds",
"to",
"a",
"genetic",
"map",
"for",
"each",
"marker",
"the",
"number",
"of",
"significant",
"QTLs",
"found",
"."
]
| 6d84dea47e6751333004743f588f03158e35c28d | https://github.com/PBR/MQ2/blob/6d84dea47e6751333004743f588f03158e35c28d/MQ2/add_qtl_to_map.py#L54-L76 | train |
franciscoruiz/python-elm | elm327/connection.py | SerialConnection.send_command | def send_command(self, data, read_delay=None):
"""Write "data" to the port and return the response form it"""
self._write(data)
if read_delay:
time.sleep(read_delay)
return self._read() | python | def send_command(self, data, read_delay=None):
"""Write "data" to the port and return the response form it"""
self._write(data)
if read_delay:
time.sleep(read_delay)
return self._read() | [
"def",
"send_command",
"(",
"self",
",",
"data",
",",
"read_delay",
"=",
"None",
")",
":",
"self",
".",
"_write",
"(",
"data",
")",
"if",
"read_delay",
":",
"time",
".",
"sleep",
"(",
"read_delay",
")",
"return",
"self",
".",
"_read",
"(",
")"
]
| Write "data" to the port and return the response form it | [
"Write",
"data",
"to",
"the",
"port",
"and",
"return",
"the",
"response",
"form",
"it"
]
| cdcecfc363b1eb25d21659bc14cf68a4a19970b6 | https://github.com/franciscoruiz/python-elm/blob/cdcecfc363b1eb25d21659bc14cf68a4a19970b6/elm327/connection.py#L45-L50 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/serializers/utils.py | serialize_relations | def serialize_relations(pid):
"""Serialize the relations for given PID."""
data = {}
relations = PIDRelation.get_child_relations(pid).all()
for relation in relations:
rel_cfg = resolve_relation_type_config(relation.relation_type)
dump_relation(rel_cfg.api(relation.parent),
rel_cfg, pid, data)
parent_relations = PIDRelation.get_parent_relations(pid).all()
rel_cfgs = set([resolve_relation_type_config(p) for p in parent_relations])
for rel_cfg in rel_cfgs:
dump_relation(rel_cfg.api(pid), rel_cfg, pid, data)
return data | python | def serialize_relations(pid):
"""Serialize the relations for given PID."""
data = {}
relations = PIDRelation.get_child_relations(pid).all()
for relation in relations:
rel_cfg = resolve_relation_type_config(relation.relation_type)
dump_relation(rel_cfg.api(relation.parent),
rel_cfg, pid, data)
parent_relations = PIDRelation.get_parent_relations(pid).all()
rel_cfgs = set([resolve_relation_type_config(p) for p in parent_relations])
for rel_cfg in rel_cfgs:
dump_relation(rel_cfg.api(pid), rel_cfg, pid, data)
return data | [
"def",
"serialize_relations",
"(",
"pid",
")",
":",
"data",
"=",
"{",
"}",
"relations",
"=",
"PIDRelation",
".",
"get_child_relations",
"(",
"pid",
")",
".",
"all",
"(",
")",
"for",
"relation",
"in",
"relations",
":",
"rel_cfg",
"=",
"resolve_relation_type_config",
"(",
"relation",
".",
"relation_type",
")",
"dump_relation",
"(",
"rel_cfg",
".",
"api",
"(",
"relation",
".",
"parent",
")",
",",
"rel_cfg",
",",
"pid",
",",
"data",
")",
"parent_relations",
"=",
"PIDRelation",
".",
"get_parent_relations",
"(",
"pid",
")",
".",
"all",
"(",
")",
"rel_cfgs",
"=",
"set",
"(",
"[",
"resolve_relation_type_config",
"(",
"p",
")",
"for",
"p",
"in",
"parent_relations",
"]",
")",
"for",
"rel_cfg",
"in",
"rel_cfgs",
":",
"dump_relation",
"(",
"rel_cfg",
".",
"api",
"(",
"pid",
")",
",",
"rel_cfg",
",",
"pid",
",",
"data",
")",
"return",
"data"
]
| Serialize the relations for given PID. | [
"Serialize",
"the",
"relations",
"for",
"given",
"PID",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/serializers/utils.py#L32-L44 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/serializers/utils.py | dump_relation | def dump_relation(api, rel_cfg, pid, data):
"""Dump a specific relation to a data dict."""
schema_class = rel_cfg.schema
if schema_class is not None:
schema = schema_class()
schema.context['pid'] = pid
result, errors = schema.dump(api)
data.setdefault(rel_cfg.name, []).append(result) | python | def dump_relation(api, rel_cfg, pid, data):
"""Dump a specific relation to a data dict."""
schema_class = rel_cfg.schema
if schema_class is not None:
schema = schema_class()
schema.context['pid'] = pid
result, errors = schema.dump(api)
data.setdefault(rel_cfg.name, []).append(result) | [
"def",
"dump_relation",
"(",
"api",
",",
"rel_cfg",
",",
"pid",
",",
"data",
")",
":",
"schema_class",
"=",
"rel_cfg",
".",
"schema",
"if",
"schema_class",
"is",
"not",
"None",
":",
"schema",
"=",
"schema_class",
"(",
")",
"schema",
".",
"context",
"[",
"'pid'",
"]",
"=",
"pid",
"result",
",",
"errors",
"=",
"schema",
".",
"dump",
"(",
"api",
")",
"data",
".",
"setdefault",
"(",
"rel_cfg",
".",
"name",
",",
"[",
"]",
")",
".",
"append",
"(",
"result",
")"
]
| Dump a specific relation to a data dict. | [
"Dump",
"a",
"specific",
"relation",
"to",
"a",
"data",
"dict",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/serializers/utils.py#L47-L54 | train |
mrtazz/InstapaperLibrary | instapaperlib/instapaperlib.py | Instapaper.add_item | def add_item(self, url, title=None, selection=None,
jsonp=None, redirect=None, response_info=False):
""" Method to add a new item to a instapaper account
Parameters: url -> URL to add
title -> optional title for the URL
Returns: (status as int, status error message)
"""
parameters = {
'username' : self.user,
'password' : self.password,
'url' : url,
}
# look for optional parameters title and selection
if title is not None:
parameters['title'] = title
else:
parameters['auto-title'] = 1
if selection is not None:
parameters['selection'] = selection
if redirect is not None:
parameters['redirect'] = redirect
if jsonp is not None:
parameters['jsonp'] = jsonp
# make query with the chosen parameters
status, headers = self._query(self.addurl, parameters)
# return the callback call if we want jsonp
if jsonp is not None:
return status
statustxt = self.add_status_codes[int(status)]
# if response headers are desired, return them also
if response_info:
return (int(status), statustxt, headers['title'], headers['location'])
else:
return (int(status), statustxt) | python | def add_item(self, url, title=None, selection=None,
jsonp=None, redirect=None, response_info=False):
""" Method to add a new item to a instapaper account
Parameters: url -> URL to add
title -> optional title for the URL
Returns: (status as int, status error message)
"""
parameters = {
'username' : self.user,
'password' : self.password,
'url' : url,
}
# look for optional parameters title and selection
if title is not None:
parameters['title'] = title
else:
parameters['auto-title'] = 1
if selection is not None:
parameters['selection'] = selection
if redirect is not None:
parameters['redirect'] = redirect
if jsonp is not None:
parameters['jsonp'] = jsonp
# make query with the chosen parameters
status, headers = self._query(self.addurl, parameters)
# return the callback call if we want jsonp
if jsonp is not None:
return status
statustxt = self.add_status_codes[int(status)]
# if response headers are desired, return them also
if response_info:
return (int(status), statustxt, headers['title'], headers['location'])
else:
return (int(status), statustxt) | [
"def",
"add_item",
"(",
"self",
",",
"url",
",",
"title",
"=",
"None",
",",
"selection",
"=",
"None",
",",
"jsonp",
"=",
"None",
",",
"redirect",
"=",
"None",
",",
"response_info",
"=",
"False",
")",
":",
"parameters",
"=",
"{",
"'username'",
":",
"self",
".",
"user",
",",
"'password'",
":",
"self",
".",
"password",
",",
"'url'",
":",
"url",
",",
"}",
"# look for optional parameters title and selection",
"if",
"title",
"is",
"not",
"None",
":",
"parameters",
"[",
"'title'",
"]",
"=",
"title",
"else",
":",
"parameters",
"[",
"'auto-title'",
"]",
"=",
"1",
"if",
"selection",
"is",
"not",
"None",
":",
"parameters",
"[",
"'selection'",
"]",
"=",
"selection",
"if",
"redirect",
"is",
"not",
"None",
":",
"parameters",
"[",
"'redirect'",
"]",
"=",
"redirect",
"if",
"jsonp",
"is",
"not",
"None",
":",
"parameters",
"[",
"'jsonp'",
"]",
"=",
"jsonp",
"# make query with the chosen parameters",
"status",
",",
"headers",
"=",
"self",
".",
"_query",
"(",
"self",
".",
"addurl",
",",
"parameters",
")",
"# return the callback call if we want jsonp",
"if",
"jsonp",
"is",
"not",
"None",
":",
"return",
"status",
"statustxt",
"=",
"self",
".",
"add_status_codes",
"[",
"int",
"(",
"status",
")",
"]",
"# if response headers are desired, return them also",
"if",
"response_info",
":",
"return",
"(",
"int",
"(",
"status",
")",
",",
"statustxt",
",",
"headers",
"[",
"'title'",
"]",
",",
"headers",
"[",
"'location'",
"]",
")",
"else",
":",
"return",
"(",
"int",
"(",
"status",
")",
",",
"statustxt",
")"
]
| Method to add a new item to a instapaper account
Parameters: url -> URL to add
title -> optional title for the URL
Returns: (status as int, status error message) | [
"Method",
"to",
"add",
"a",
"new",
"item",
"to",
"a",
"instapaper",
"account"
]
| bf273c02b468e523994d46def07f70902f596676 | https://github.com/mrtazz/InstapaperLibrary/blob/bf273c02b468e523994d46def07f70902f596676/instapaperlib/instapaperlib.py#L75-L110 | train |
mrtazz/InstapaperLibrary | instapaperlib/instapaperlib.py | Instapaper._query | def _query(self, url=None, params=""):
""" method to query a URL with the given parameters
Parameters:
url -> URL to query
params -> dictionary with parameter values
Returns: HTTP response code, headers
If an exception occurred, headers fields are None
"""
if url is None:
raise NoUrlError("No URL was provided.")
# return values
headers = {'location': None, 'title': None}
headerdata = urllib.urlencode(params)
try:
request = urllib2.Request(url, headerdata)
response = urllib2.urlopen(request)
# return numeric HTTP status code unless JSONP was requested
if 'jsonp' in params:
status = response.read()
else:
status = response.getcode()
info = response.info()
try:
headers['location'] = info['Content-Location']
except KeyError:
pass
try:
headers['title'] = info['X-Instapaper-Title']
except KeyError:
pass
return (status, headers)
except urllib2.HTTPError as exception:
# handle API not returning JSONP response on 403
if 'jsonp' in params:
return ('%s({"status":%d})' % (params['jsonp'], exception.code), headers)
else:
return (exception.code, headers)
except IOError as exception:
return (exception.code, headers) | python | def _query(self, url=None, params=""):
""" method to query a URL with the given parameters
Parameters:
url -> URL to query
params -> dictionary with parameter values
Returns: HTTP response code, headers
If an exception occurred, headers fields are None
"""
if url is None:
raise NoUrlError("No URL was provided.")
# return values
headers = {'location': None, 'title': None}
headerdata = urllib.urlencode(params)
try:
request = urllib2.Request(url, headerdata)
response = urllib2.urlopen(request)
# return numeric HTTP status code unless JSONP was requested
if 'jsonp' in params:
status = response.read()
else:
status = response.getcode()
info = response.info()
try:
headers['location'] = info['Content-Location']
except KeyError:
pass
try:
headers['title'] = info['X-Instapaper-Title']
except KeyError:
pass
return (status, headers)
except urllib2.HTTPError as exception:
# handle API not returning JSONP response on 403
if 'jsonp' in params:
return ('%s({"status":%d})' % (params['jsonp'], exception.code), headers)
else:
return (exception.code, headers)
except IOError as exception:
return (exception.code, headers) | [
"def",
"_query",
"(",
"self",
",",
"url",
"=",
"None",
",",
"params",
"=",
"\"\"",
")",
":",
"if",
"url",
"is",
"None",
":",
"raise",
"NoUrlError",
"(",
"\"No URL was provided.\"",
")",
"# return values",
"headers",
"=",
"{",
"'location'",
":",
"None",
",",
"'title'",
":",
"None",
"}",
"headerdata",
"=",
"urllib",
".",
"urlencode",
"(",
"params",
")",
"try",
":",
"request",
"=",
"urllib2",
".",
"Request",
"(",
"url",
",",
"headerdata",
")",
"response",
"=",
"urllib2",
".",
"urlopen",
"(",
"request",
")",
"# return numeric HTTP status code unless JSONP was requested",
"if",
"'jsonp'",
"in",
"params",
":",
"status",
"=",
"response",
".",
"read",
"(",
")",
"else",
":",
"status",
"=",
"response",
".",
"getcode",
"(",
")",
"info",
"=",
"response",
".",
"info",
"(",
")",
"try",
":",
"headers",
"[",
"'location'",
"]",
"=",
"info",
"[",
"'Content-Location'",
"]",
"except",
"KeyError",
":",
"pass",
"try",
":",
"headers",
"[",
"'title'",
"]",
"=",
"info",
"[",
"'X-Instapaper-Title'",
"]",
"except",
"KeyError",
":",
"pass",
"return",
"(",
"status",
",",
"headers",
")",
"except",
"urllib2",
".",
"HTTPError",
"as",
"exception",
":",
"# handle API not returning JSONP response on 403",
"if",
"'jsonp'",
"in",
"params",
":",
"return",
"(",
"'%s({\"status\":%d})'",
"%",
"(",
"params",
"[",
"'jsonp'",
"]",
",",
"exception",
".",
"code",
")",
",",
"headers",
")",
"else",
":",
"return",
"(",
"exception",
".",
"code",
",",
"headers",
")",
"except",
"IOError",
"as",
"exception",
":",
"return",
"(",
"exception",
".",
"code",
",",
"headers",
")"
]
| method to query a URL with the given parameters
Parameters:
url -> URL to query
params -> dictionary with parameter values
Returns: HTTP response code, headers
If an exception occurred, headers fields are None | [
"method",
"to",
"query",
"a",
"URL",
"with",
"the",
"given",
"parameters"
]
| bf273c02b468e523994d46def07f70902f596676 | https://github.com/mrtazz/InstapaperLibrary/blob/bf273c02b468e523994d46def07f70902f596676/instapaperlib/instapaperlib.py#L135-L176 | train |
mardix/Mocha | mocha/decorators.py | cors | def cors(*args, **kwargs):
"""
A wrapper around flask-cors cross_origin, to also act on classes
**An extra note about cors, a response must be available before the
cors is applied. Dynamic return is applied after the fact, so use the
decorators, json, xml, or return self.render() for txt/html
ie:
@cors()
class Index(Mocha):
def index(self):
return self.render()
@json
def json(self):
return {}
class Index2(Mocha):
def index(self):
return self.render()
@cors()
@json
def json(self):
return {}
:return:
"""
def decorator(fn):
cors_fn = flask_cors.cross_origin(automatic_options=False, *args, **kwargs)
if inspect.isclass(fn):
apply_function_to_members(fn, cors_fn)
else:
return cors_fn(fn)
return fn
return decorator | python | def cors(*args, **kwargs):
"""
A wrapper around flask-cors cross_origin, to also act on classes
**An extra note about cors, a response must be available before the
cors is applied. Dynamic return is applied after the fact, so use the
decorators, json, xml, or return self.render() for txt/html
ie:
@cors()
class Index(Mocha):
def index(self):
return self.render()
@json
def json(self):
return {}
class Index2(Mocha):
def index(self):
return self.render()
@cors()
@json
def json(self):
return {}
:return:
"""
def decorator(fn):
cors_fn = flask_cors.cross_origin(automatic_options=False, *args, **kwargs)
if inspect.isclass(fn):
apply_function_to_members(fn, cors_fn)
else:
return cors_fn(fn)
return fn
return decorator | [
"def",
"cors",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"decorator",
"(",
"fn",
")",
":",
"cors_fn",
"=",
"flask_cors",
".",
"cross_origin",
"(",
"automatic_options",
"=",
"False",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"inspect",
".",
"isclass",
"(",
"fn",
")",
":",
"apply_function_to_members",
"(",
"fn",
",",
"cors_fn",
")",
"else",
":",
"return",
"cors_fn",
"(",
"fn",
")",
"return",
"fn",
"return",
"decorator"
]
| A wrapper around flask-cors cross_origin, to also act on classes
**An extra note about cors, a response must be available before the
cors is applied. Dynamic return is applied after the fact, so use the
decorators, json, xml, or return self.render() for txt/html
ie:
@cors()
class Index(Mocha):
def index(self):
return self.render()
@json
def json(self):
return {}
class Index2(Mocha):
def index(self):
return self.render()
@cors()
@json
def json(self):
return {}
:return: | [
"A",
"wrapper",
"around",
"flask",
"-",
"cors",
"cross_origin",
"to",
"also",
"act",
"on",
"classes"
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/decorators.py#L42-L78 | train |
Kortemme-Lab/klab | klab/bio/clustalo.py | SequenceAligner.get_residue_mapping | def get_residue_mapping(self):
'''Returns a mapping between the sequences ONLY IF there are exactly two. This restriction makes the code much simpler.'''
if len(self.sequence_ids) == 2:
if not self.alignment_output:
self.align()
assert(self.alignment_output)
return self._create_residue_map(self._get_alignment_lines(), self.sequence_ids[1], self.sequence_ids[2])
else:
return None | python | def get_residue_mapping(self):
'''Returns a mapping between the sequences ONLY IF there are exactly two. This restriction makes the code much simpler.'''
if len(self.sequence_ids) == 2:
if not self.alignment_output:
self.align()
assert(self.alignment_output)
return self._create_residue_map(self._get_alignment_lines(), self.sequence_ids[1], self.sequence_ids[2])
else:
return None | [
"def",
"get_residue_mapping",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"sequence_ids",
")",
"==",
"2",
":",
"if",
"not",
"self",
".",
"alignment_output",
":",
"self",
".",
"align",
"(",
")",
"assert",
"(",
"self",
".",
"alignment_output",
")",
"return",
"self",
".",
"_create_residue_map",
"(",
"self",
".",
"_get_alignment_lines",
"(",
")",
",",
"self",
".",
"sequence_ids",
"[",
"1",
"]",
",",
"self",
".",
"sequence_ids",
"[",
"2",
"]",
")",
"else",
":",
"return",
"None"
]
| Returns a mapping between the sequences ONLY IF there are exactly two. This restriction makes the code much simpler. | [
"Returns",
"a",
"mapping",
"between",
"the",
"sequences",
"ONLY",
"IF",
"there",
"are",
"exactly",
"two",
".",
"This",
"restriction",
"makes",
"the",
"code",
"much",
"simpler",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/clustalo.py#L224-L232 | train |
Kortemme-Lab/klab | klab/bio/clustalo.py | PDBUniParcSequenceAligner.realign | def realign(self, cut_off, chains_to_skip = set()):
''' Alter the cut-off and run alignment again. This is much quicker than creating a new PDBUniParcSequenceAligner
object as the UniParcEntry creation etc. in the constructor does not need to be repeated.
The chains_to_skip argument (a Set) allows us to skip chains that were already matched which speeds up the alignment even more.
'''
if cut_off != self.cut_off:
self.cut_off = cut_off
# Wipe any existing information for chains not in chains_to_skip
for c in self.chains:
if c not in chains_to_skip:
self.clustal_matches[c] = None
self.substring_matches[c] = None
if self.alignment.get(c):
del self.alignment[c]
if self.seqres_to_uniparc_sequence_maps.get(c):
del self.seqres_to_uniparc_sequence_maps[c]
# Run alignment for the remaining chains
self._align_with_clustal(chains_to_skip = chains_to_skip)
self._align_with_substrings(chains_to_skip = chains_to_skip)
self._check_alignments(chains_to_skip = chains_to_skip)
self._get_residue_mapping(chains_to_skip = chains_to_skip) | python | def realign(self, cut_off, chains_to_skip = set()):
''' Alter the cut-off and run alignment again. This is much quicker than creating a new PDBUniParcSequenceAligner
object as the UniParcEntry creation etc. in the constructor does not need to be repeated.
The chains_to_skip argument (a Set) allows us to skip chains that were already matched which speeds up the alignment even more.
'''
if cut_off != self.cut_off:
self.cut_off = cut_off
# Wipe any existing information for chains not in chains_to_skip
for c in self.chains:
if c not in chains_to_skip:
self.clustal_matches[c] = None
self.substring_matches[c] = None
if self.alignment.get(c):
del self.alignment[c]
if self.seqres_to_uniparc_sequence_maps.get(c):
del self.seqres_to_uniparc_sequence_maps[c]
# Run alignment for the remaining chains
self._align_with_clustal(chains_to_skip = chains_to_skip)
self._align_with_substrings(chains_to_skip = chains_to_skip)
self._check_alignments(chains_to_skip = chains_to_skip)
self._get_residue_mapping(chains_to_skip = chains_to_skip) | [
"def",
"realign",
"(",
"self",
",",
"cut_off",
",",
"chains_to_skip",
"=",
"set",
"(",
")",
")",
":",
"if",
"cut_off",
"!=",
"self",
".",
"cut_off",
":",
"self",
".",
"cut_off",
"=",
"cut_off",
"# Wipe any existing information for chains not in chains_to_skip",
"for",
"c",
"in",
"self",
".",
"chains",
":",
"if",
"c",
"not",
"in",
"chains_to_skip",
":",
"self",
".",
"clustal_matches",
"[",
"c",
"]",
"=",
"None",
"self",
".",
"substring_matches",
"[",
"c",
"]",
"=",
"None",
"if",
"self",
".",
"alignment",
".",
"get",
"(",
"c",
")",
":",
"del",
"self",
".",
"alignment",
"[",
"c",
"]",
"if",
"self",
".",
"seqres_to_uniparc_sequence_maps",
".",
"get",
"(",
"c",
")",
":",
"del",
"self",
".",
"seqres_to_uniparc_sequence_maps",
"[",
"c",
"]",
"# Run alignment for the remaining chains",
"self",
".",
"_align_with_clustal",
"(",
"chains_to_skip",
"=",
"chains_to_skip",
")",
"self",
".",
"_align_with_substrings",
"(",
"chains_to_skip",
"=",
"chains_to_skip",
")",
"self",
".",
"_check_alignments",
"(",
"chains_to_skip",
"=",
"chains_to_skip",
")",
"self",
".",
"_get_residue_mapping",
"(",
"chains_to_skip",
"=",
"chains_to_skip",
")"
]
| Alter the cut-off and run alignment again. This is much quicker than creating a new PDBUniParcSequenceAligner
object as the UniParcEntry creation etc. in the constructor does not need to be repeated.
The chains_to_skip argument (a Set) allows us to skip chains that were already matched which speeds up the alignment even more. | [
"Alter",
"the",
"cut",
"-",
"off",
"and",
"run",
"alignment",
"again",
".",
"This",
"is",
"much",
"quicker",
"than",
"creating",
"a",
"new",
"PDBUniParcSequenceAligner",
"object",
"as",
"the",
"UniParcEntry",
"creation",
"etc",
".",
"in",
"the",
"constructor",
"does",
"not",
"need",
"to",
"be",
"repeated",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/clustalo.py#L527-L550 | train |
Kortemme-Lab/klab | klab/bio/clustalo.py | PDBUniParcSequenceAligner._determine_representative_chains | def _determine_representative_chains(self):
''' Quotient the chains to get equivalence classes of chains. These will be used for the actual mapping.'''
# todo: This logic should be moved into the FASTA class or a more general module (maybe a fast exists which uses a C/C++ library?) but at present it is easier to write here since we do not need to worry about other PDB IDs.
equivalence_fiber = {}
matched_chains = set()
for chain_id, equivalent_chains in self.identical_sequences.iteritems():
matched_chains.add(chain_id)
equivalent_chain_ids = set()
for equivalent_chain in equivalent_chains:
assert(len(equivalent_chain) == 6)
assert((equivalent_chain[:5] == '%s_' % self.pdb_id) or (equivalent_chain[:5] == '%s:' % self.pdb_id)) # ClustalW changes e.g. 1KI1:A to 1KI1_A in its output
equivalent_chain_ids.add(equivalent_chain[5])
found = False
for equivalent_chain_id in equivalent_chain_ids:
if equivalence_fiber.get(equivalent_chain_id):
found = True
assert(equivalence_fiber[equivalent_chain_id] == equivalent_chain_ids.union(set([chain_id])))
break
if not found:
equivalence_fiber[chain_id] = set(equivalent_chain_ids)
equivalence_fiber[chain_id].add(chain_id)
for c in self.chains:
if c not in matched_chains:
equivalence_fiber[c] = set([c])
self.equivalence_fiber = equivalence_fiber
self.representative_chains = equivalence_fiber.keys() | python | def _determine_representative_chains(self):
''' Quotient the chains to get equivalence classes of chains. These will be used for the actual mapping.'''
# todo: This logic should be moved into the FASTA class or a more general module (maybe a fast exists which uses a C/C++ library?) but at present it is easier to write here since we do not need to worry about other PDB IDs.
equivalence_fiber = {}
matched_chains = set()
for chain_id, equivalent_chains in self.identical_sequences.iteritems():
matched_chains.add(chain_id)
equivalent_chain_ids = set()
for equivalent_chain in equivalent_chains:
assert(len(equivalent_chain) == 6)
assert((equivalent_chain[:5] == '%s_' % self.pdb_id) or (equivalent_chain[:5] == '%s:' % self.pdb_id)) # ClustalW changes e.g. 1KI1:A to 1KI1_A in its output
equivalent_chain_ids.add(equivalent_chain[5])
found = False
for equivalent_chain_id in equivalent_chain_ids:
if equivalence_fiber.get(equivalent_chain_id):
found = True
assert(equivalence_fiber[equivalent_chain_id] == equivalent_chain_ids.union(set([chain_id])))
break
if not found:
equivalence_fiber[chain_id] = set(equivalent_chain_ids)
equivalence_fiber[chain_id].add(chain_id)
for c in self.chains:
if c not in matched_chains:
equivalence_fiber[c] = set([c])
self.equivalence_fiber = equivalence_fiber
self.representative_chains = equivalence_fiber.keys() | [
"def",
"_determine_representative_chains",
"(",
"self",
")",
":",
"# todo: This logic should be moved into the FASTA class or a more general module (maybe a fast exists which uses a C/C++ library?) but at present it is easier to write here since we do not need to worry about other PDB IDs.",
"equivalence_fiber",
"=",
"{",
"}",
"matched_chains",
"=",
"set",
"(",
")",
"for",
"chain_id",
",",
"equivalent_chains",
"in",
"self",
".",
"identical_sequences",
".",
"iteritems",
"(",
")",
":",
"matched_chains",
".",
"add",
"(",
"chain_id",
")",
"equivalent_chain_ids",
"=",
"set",
"(",
")",
"for",
"equivalent_chain",
"in",
"equivalent_chains",
":",
"assert",
"(",
"len",
"(",
"equivalent_chain",
")",
"==",
"6",
")",
"assert",
"(",
"(",
"equivalent_chain",
"[",
":",
"5",
"]",
"==",
"'%s_'",
"%",
"self",
".",
"pdb_id",
")",
"or",
"(",
"equivalent_chain",
"[",
":",
"5",
"]",
"==",
"'%s:'",
"%",
"self",
".",
"pdb_id",
")",
")",
"# ClustalW changes e.g. 1KI1:A to 1KI1_A in its output",
"equivalent_chain_ids",
".",
"add",
"(",
"equivalent_chain",
"[",
"5",
"]",
")",
"found",
"=",
"False",
"for",
"equivalent_chain_id",
"in",
"equivalent_chain_ids",
":",
"if",
"equivalence_fiber",
".",
"get",
"(",
"equivalent_chain_id",
")",
":",
"found",
"=",
"True",
"assert",
"(",
"equivalence_fiber",
"[",
"equivalent_chain_id",
"]",
"==",
"equivalent_chain_ids",
".",
"union",
"(",
"set",
"(",
"[",
"chain_id",
"]",
")",
")",
")",
"break",
"if",
"not",
"found",
":",
"equivalence_fiber",
"[",
"chain_id",
"]",
"=",
"set",
"(",
"equivalent_chain_ids",
")",
"equivalence_fiber",
"[",
"chain_id",
"]",
".",
"add",
"(",
"chain_id",
")",
"for",
"c",
"in",
"self",
".",
"chains",
":",
"if",
"c",
"not",
"in",
"matched_chains",
":",
"equivalence_fiber",
"[",
"c",
"]",
"=",
"set",
"(",
"[",
"c",
"]",
")",
"self",
".",
"equivalence_fiber",
"=",
"equivalence_fiber",
"self",
".",
"representative_chains",
"=",
"equivalence_fiber",
".",
"keys",
"(",
")"
]
| Quotient the chains to get equivalence classes of chains. These will be used for the actual mapping. | [
"Quotient",
"the",
"chains",
"to",
"get",
"equivalence",
"classes",
"of",
"chains",
".",
"These",
"will",
"be",
"used",
"for",
"the",
"actual",
"mapping",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/clustalo.py#L569-L597 | train |
Kortemme-Lab/klab | klab/bio/clustalo.py | PDBUniParcSequenceAligner._get_uniparc_sequences_through_uniprot_ACs | def _get_uniparc_sequences_through_uniprot_ACs(self, mapping_pdb_id, uniprot_ACs, cache_dir):
'''Get the UniParc sequences associated with the UniProt accession number.'''
# Map the UniProt ACs to the UniParc IDs
m = uniprot_map('ACC', 'UPARC', uniprot_ACs, cache_dir = cache_dir)
UniParcIDs = []
for _, v in m.iteritems():
UniParcIDs.extend(v)
# Create a mapping from the mapping_pdb_id to the UniParcEntry objects. This must match the return type from pdb_to_uniparc.
mapping = {mapping_pdb_id : []}
for UniParcID in UniParcIDs:
entry = UniParcEntry(UniParcID, cache_dir = cache_dir)
mapping[mapping_pdb_id].append(entry)
return mapping | python | def _get_uniparc_sequences_through_uniprot_ACs(self, mapping_pdb_id, uniprot_ACs, cache_dir):
'''Get the UniParc sequences associated with the UniProt accession number.'''
# Map the UniProt ACs to the UniParc IDs
m = uniprot_map('ACC', 'UPARC', uniprot_ACs, cache_dir = cache_dir)
UniParcIDs = []
for _, v in m.iteritems():
UniParcIDs.extend(v)
# Create a mapping from the mapping_pdb_id to the UniParcEntry objects. This must match the return type from pdb_to_uniparc.
mapping = {mapping_pdb_id : []}
for UniParcID in UniParcIDs:
entry = UniParcEntry(UniParcID, cache_dir = cache_dir)
mapping[mapping_pdb_id].append(entry)
return mapping | [
"def",
"_get_uniparc_sequences_through_uniprot_ACs",
"(",
"self",
",",
"mapping_pdb_id",
",",
"uniprot_ACs",
",",
"cache_dir",
")",
":",
"# Map the UniProt ACs to the UniParc IDs",
"m",
"=",
"uniprot_map",
"(",
"'ACC'",
",",
"'UPARC'",
",",
"uniprot_ACs",
",",
"cache_dir",
"=",
"cache_dir",
")",
"UniParcIDs",
"=",
"[",
"]",
"for",
"_",
",",
"v",
"in",
"m",
".",
"iteritems",
"(",
")",
":",
"UniParcIDs",
".",
"extend",
"(",
"v",
")",
"# Create a mapping from the mapping_pdb_id to the UniParcEntry objects. This must match the return type from pdb_to_uniparc.",
"mapping",
"=",
"{",
"mapping_pdb_id",
":",
"[",
"]",
"}",
"for",
"UniParcID",
"in",
"UniParcIDs",
":",
"entry",
"=",
"UniParcEntry",
"(",
"UniParcID",
",",
"cache_dir",
"=",
"cache_dir",
")",
"mapping",
"[",
"mapping_pdb_id",
"]",
".",
"append",
"(",
"entry",
")",
"return",
"mapping"
]
| Get the UniParc sequences associated with the UniProt accession number. | [
"Get",
"the",
"UniParc",
"sequences",
"associated",
"with",
"the",
"UniProt",
"accession",
"number",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/clustalo.py#L664-L679 | train |
Kortemme-Lab/klab | klab/bio/clustalo.py | PDBUniParcSequenceAligner._align_with_substrings | def _align_with_substrings(self, chains_to_skip = set()):
'''Simple substring-based matching'''
for c in self.representative_chains:
# Skip specified chains
if c not in chains_to_skip:
#colortext.pcyan(c)
#colortext.warning(self.fasta[c])
fasta_sequence = self.fasta[c]
substring_matches = {}
for uniparc_id, uniparc_sequence in sorted(self.uniparc_sequences.iteritems()):
uniparc_sequence = str(uniparc_sequence)
idx = uniparc_sequence.find(fasta_sequence)
if idx != -1:
substring_matches[uniparc_id] = 0
elif len(fasta_sequence) > 30:
idx = uniparc_sequence.find(fasta_sequence[5:-5])
if idx != -1:
substring_matches[uniparc_id] = 5
else:
idx = uniparc_sequence.find(fasta_sequence[7:-7])
if idx != -1:
substring_matches[uniparc_id] = 7
elif len(fasta_sequence) > 15:
idx = uniparc_sequence.find(fasta_sequence[3:-3])
if idx != -1:
substring_matches[uniparc_id] = 3
self.substring_matches[c] = substring_matches
# Restrict the matches to a given set of UniParc IDs. This can be used to remove ambiguity when the correct mapping has been determined e.g. from the SIFTS database.
colortext.pcyan('*' * 100)
pprint.pprint(self.substring_matches)
if self.restrict_to_uniparc_values:
for c in self.representative_chains:
#print('HERE!')
#print(c)
if set(map(str, self.substring_matches[c].keys())).intersection(set(self.restrict_to_uniparc_values)) > 0:
# Only restrict in cases where there is at least one match in self.restrict_to_uniparc_values
# Otherwise, chains which are not considered in self.restrict_to_uniparc_values may throw away valid matches
# e.g. when looking for structures related to 1KTZ (A -> P10600 -> UPI000000D8EC, B -> P37173 -> UPI000011DD7E),
# we find the close match 2PJY. However, 2PJY has 3 chains: A -> P10600, B -> P37173, and C -> P36897 -> UPI000011D62A
restricted_matches = dict((str(k), self.substring_matches[c][k]) for k in self.substring_matches[c].keys() if str(k) in self.restrict_to_uniparc_values)
if len(restricted_matches) != len(self.substring_matches[c]):
removed_matches = sorted(set(self.substring_matches[c].keys()).difference(set(restricted_matches)))
# todo: see above re:quiet colortext.pcyan('Ignoring {0} as those chains were not included in the list self.restrict_to_uniparc_values ({1}).'.format(', '.join(removed_matches), ', '.join(self.restrict_to_uniparc_values)))
self.substring_matches[c] = restricted_matches
#pprint.pprint(self.substring_matches)
#colortext.pcyan('*' * 100)
# Use the representatives' alignments for their respective equivalent classes
for c_1, related_chains in self.equivalence_fiber.iteritems():
for c_2 in related_chains:
self.substring_matches[c_2] = self.substring_matches[c_1] | python | def _align_with_substrings(self, chains_to_skip = set()):
'''Simple substring-based matching'''
for c in self.representative_chains:
# Skip specified chains
if c not in chains_to_skip:
#colortext.pcyan(c)
#colortext.warning(self.fasta[c])
fasta_sequence = self.fasta[c]
substring_matches = {}
for uniparc_id, uniparc_sequence in sorted(self.uniparc_sequences.iteritems()):
uniparc_sequence = str(uniparc_sequence)
idx = uniparc_sequence.find(fasta_sequence)
if idx != -1:
substring_matches[uniparc_id] = 0
elif len(fasta_sequence) > 30:
idx = uniparc_sequence.find(fasta_sequence[5:-5])
if idx != -1:
substring_matches[uniparc_id] = 5
else:
idx = uniparc_sequence.find(fasta_sequence[7:-7])
if idx != -1:
substring_matches[uniparc_id] = 7
elif len(fasta_sequence) > 15:
idx = uniparc_sequence.find(fasta_sequence[3:-3])
if idx != -1:
substring_matches[uniparc_id] = 3
self.substring_matches[c] = substring_matches
# Restrict the matches to a given set of UniParc IDs. This can be used to remove ambiguity when the correct mapping has been determined e.g. from the SIFTS database.
colortext.pcyan('*' * 100)
pprint.pprint(self.substring_matches)
if self.restrict_to_uniparc_values:
for c in self.representative_chains:
#print('HERE!')
#print(c)
if set(map(str, self.substring_matches[c].keys())).intersection(set(self.restrict_to_uniparc_values)) > 0:
# Only restrict in cases where there is at least one match in self.restrict_to_uniparc_values
# Otherwise, chains which are not considered in self.restrict_to_uniparc_values may throw away valid matches
# e.g. when looking for structures related to 1KTZ (A -> P10600 -> UPI000000D8EC, B -> P37173 -> UPI000011DD7E),
# we find the close match 2PJY. However, 2PJY has 3 chains: A -> P10600, B -> P37173, and C -> P36897 -> UPI000011D62A
restricted_matches = dict((str(k), self.substring_matches[c][k]) for k in self.substring_matches[c].keys() if str(k) in self.restrict_to_uniparc_values)
if len(restricted_matches) != len(self.substring_matches[c]):
removed_matches = sorted(set(self.substring_matches[c].keys()).difference(set(restricted_matches)))
# todo: see above re:quiet colortext.pcyan('Ignoring {0} as those chains were not included in the list self.restrict_to_uniparc_values ({1}).'.format(', '.join(removed_matches), ', '.join(self.restrict_to_uniparc_values)))
self.substring_matches[c] = restricted_matches
#pprint.pprint(self.substring_matches)
#colortext.pcyan('*' * 100)
# Use the representatives' alignments for their respective equivalent classes
for c_1, related_chains in self.equivalence_fiber.iteritems():
for c_2 in related_chains:
self.substring_matches[c_2] = self.substring_matches[c_1] | [
"def",
"_align_with_substrings",
"(",
"self",
",",
"chains_to_skip",
"=",
"set",
"(",
")",
")",
":",
"for",
"c",
"in",
"self",
".",
"representative_chains",
":",
"# Skip specified chains",
"if",
"c",
"not",
"in",
"chains_to_skip",
":",
"#colortext.pcyan(c)",
"#colortext.warning(self.fasta[c])",
"fasta_sequence",
"=",
"self",
".",
"fasta",
"[",
"c",
"]",
"substring_matches",
"=",
"{",
"}",
"for",
"uniparc_id",
",",
"uniparc_sequence",
"in",
"sorted",
"(",
"self",
".",
"uniparc_sequences",
".",
"iteritems",
"(",
")",
")",
":",
"uniparc_sequence",
"=",
"str",
"(",
"uniparc_sequence",
")",
"idx",
"=",
"uniparc_sequence",
".",
"find",
"(",
"fasta_sequence",
")",
"if",
"idx",
"!=",
"-",
"1",
":",
"substring_matches",
"[",
"uniparc_id",
"]",
"=",
"0",
"elif",
"len",
"(",
"fasta_sequence",
")",
">",
"30",
":",
"idx",
"=",
"uniparc_sequence",
".",
"find",
"(",
"fasta_sequence",
"[",
"5",
":",
"-",
"5",
"]",
")",
"if",
"idx",
"!=",
"-",
"1",
":",
"substring_matches",
"[",
"uniparc_id",
"]",
"=",
"5",
"else",
":",
"idx",
"=",
"uniparc_sequence",
".",
"find",
"(",
"fasta_sequence",
"[",
"7",
":",
"-",
"7",
"]",
")",
"if",
"idx",
"!=",
"-",
"1",
":",
"substring_matches",
"[",
"uniparc_id",
"]",
"=",
"7",
"elif",
"len",
"(",
"fasta_sequence",
")",
">",
"15",
":",
"idx",
"=",
"uniparc_sequence",
".",
"find",
"(",
"fasta_sequence",
"[",
"3",
":",
"-",
"3",
"]",
")",
"if",
"idx",
"!=",
"-",
"1",
":",
"substring_matches",
"[",
"uniparc_id",
"]",
"=",
"3",
"self",
".",
"substring_matches",
"[",
"c",
"]",
"=",
"substring_matches",
"# Restrict the matches to a given set of UniParc IDs. This can be used to remove ambiguity when the correct mapping has been determined e.g. from the SIFTS database.",
"colortext",
".",
"pcyan",
"(",
"'*'",
"*",
"100",
")",
"pprint",
".",
"pprint",
"(",
"self",
".",
"substring_matches",
")",
"if",
"self",
".",
"restrict_to_uniparc_values",
":",
"for",
"c",
"in",
"self",
".",
"representative_chains",
":",
"#print('HERE!')",
"#print(c)",
"if",
"set",
"(",
"map",
"(",
"str",
",",
"self",
".",
"substring_matches",
"[",
"c",
"]",
".",
"keys",
"(",
")",
")",
")",
".",
"intersection",
"(",
"set",
"(",
"self",
".",
"restrict_to_uniparc_values",
")",
")",
">",
"0",
":",
"# Only restrict in cases where there is at least one match in self.restrict_to_uniparc_values",
"# Otherwise, chains which are not considered in self.restrict_to_uniparc_values may throw away valid matches",
"# e.g. when looking for structures related to 1KTZ (A -> P10600 -> UPI000000D8EC, B -> P37173 -> UPI000011DD7E),",
"# we find the close match 2PJY. However, 2PJY has 3 chains: A -> P10600, B -> P37173, and C -> P36897 -> UPI000011D62A",
"restricted_matches",
"=",
"dict",
"(",
"(",
"str",
"(",
"k",
")",
",",
"self",
".",
"substring_matches",
"[",
"c",
"]",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"self",
".",
"substring_matches",
"[",
"c",
"]",
".",
"keys",
"(",
")",
"if",
"str",
"(",
"k",
")",
"in",
"self",
".",
"restrict_to_uniparc_values",
")",
"if",
"len",
"(",
"restricted_matches",
")",
"!=",
"len",
"(",
"self",
".",
"substring_matches",
"[",
"c",
"]",
")",
":",
"removed_matches",
"=",
"sorted",
"(",
"set",
"(",
"self",
".",
"substring_matches",
"[",
"c",
"]",
".",
"keys",
"(",
")",
")",
".",
"difference",
"(",
"set",
"(",
"restricted_matches",
")",
")",
")",
"# todo: see above re:quiet colortext.pcyan('Ignoring {0} as those chains were not included in the list self.restrict_to_uniparc_values ({1}).'.format(', '.join(removed_matches), ', '.join(self.restrict_to_uniparc_values)))",
"self",
".",
"substring_matches",
"[",
"c",
"]",
"=",
"restricted_matches",
"#pprint.pprint(self.substring_matches)",
"#colortext.pcyan('*' * 100)",
"# Use the representatives' alignments for their respective equivalent classes",
"for",
"c_1",
",",
"related_chains",
"in",
"self",
".",
"equivalence_fiber",
".",
"iteritems",
"(",
")",
":",
"for",
"c_2",
"in",
"related_chains",
":",
"self",
".",
"substring_matches",
"[",
"c_2",
"]",
"=",
"self",
".",
"substring_matches",
"[",
"c_1",
"]"
]
| Simple substring-based matching | [
"Simple",
"substring",
"-",
"based",
"matching"
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/clustalo.py#L737-L791 | train |
Kortemme-Lab/klab | klab/bio/clustalo.py | PDBUniParcSequenceAligner._get_residue_mapping | def _get_residue_mapping(self, chains_to_skip = set()):
'''Creates a mapping between the residues of the chains and the associated UniParc entries.'''
for c in self.representative_chains:
# Skip specified chains
if c not in chains_to_skip:
if self.alignment.get(c):
uniparc_entry = self.get_uniparc_object(c)
sa = SequenceAligner()
sa.add_sequence(c, self.fasta[c])
sa.add_sequence(uniparc_entry.UniParcID, uniparc_entry.sequence)
sa.align()
residue_mapping, residue_match_mapping = sa.get_residue_mapping()
# Create a SequenceMap
s = PDBUniParcSequenceMap()
assert(sorted(residue_mapping.keys()) == sorted(residue_match_mapping.keys()))
for k, v in residue_mapping.iteritems():
s.add(k, (uniparc_entry.UniParcID, v), residue_match_mapping[k])
self.seqres_to_uniparc_sequence_maps[c] = s
else:
self.seqres_to_uniparc_sequence_maps[c] = PDBUniParcSequenceMap()
# Use the representatives' alignments for their respective equivalent classes. This saves memory as the same SequenceMap is used.
for c_1, related_chains in self.equivalence_fiber.iteritems():
for c_2 in related_chains:
if self.seqres_to_uniparc_sequence_maps.get(c_1):
self.seqres_to_uniparc_sequence_maps[c_2] = self.seqres_to_uniparc_sequence_maps[c_1] | python | def _get_residue_mapping(self, chains_to_skip = set()):
'''Creates a mapping between the residues of the chains and the associated UniParc entries.'''
for c in self.representative_chains:
# Skip specified chains
if c not in chains_to_skip:
if self.alignment.get(c):
uniparc_entry = self.get_uniparc_object(c)
sa = SequenceAligner()
sa.add_sequence(c, self.fasta[c])
sa.add_sequence(uniparc_entry.UniParcID, uniparc_entry.sequence)
sa.align()
residue_mapping, residue_match_mapping = sa.get_residue_mapping()
# Create a SequenceMap
s = PDBUniParcSequenceMap()
assert(sorted(residue_mapping.keys()) == sorted(residue_match_mapping.keys()))
for k, v in residue_mapping.iteritems():
s.add(k, (uniparc_entry.UniParcID, v), residue_match_mapping[k])
self.seqres_to_uniparc_sequence_maps[c] = s
else:
self.seqres_to_uniparc_sequence_maps[c] = PDBUniParcSequenceMap()
# Use the representatives' alignments for their respective equivalent classes. This saves memory as the same SequenceMap is used.
for c_1, related_chains in self.equivalence_fiber.iteritems():
for c_2 in related_chains:
if self.seqres_to_uniparc_sequence_maps.get(c_1):
self.seqres_to_uniparc_sequence_maps[c_2] = self.seqres_to_uniparc_sequence_maps[c_1] | [
"def",
"_get_residue_mapping",
"(",
"self",
",",
"chains_to_skip",
"=",
"set",
"(",
")",
")",
":",
"for",
"c",
"in",
"self",
".",
"representative_chains",
":",
"# Skip specified chains",
"if",
"c",
"not",
"in",
"chains_to_skip",
":",
"if",
"self",
".",
"alignment",
".",
"get",
"(",
"c",
")",
":",
"uniparc_entry",
"=",
"self",
".",
"get_uniparc_object",
"(",
"c",
")",
"sa",
"=",
"SequenceAligner",
"(",
")",
"sa",
".",
"add_sequence",
"(",
"c",
",",
"self",
".",
"fasta",
"[",
"c",
"]",
")",
"sa",
".",
"add_sequence",
"(",
"uniparc_entry",
".",
"UniParcID",
",",
"uniparc_entry",
".",
"sequence",
")",
"sa",
".",
"align",
"(",
")",
"residue_mapping",
",",
"residue_match_mapping",
"=",
"sa",
".",
"get_residue_mapping",
"(",
")",
"# Create a SequenceMap",
"s",
"=",
"PDBUniParcSequenceMap",
"(",
")",
"assert",
"(",
"sorted",
"(",
"residue_mapping",
".",
"keys",
"(",
")",
")",
"==",
"sorted",
"(",
"residue_match_mapping",
".",
"keys",
"(",
")",
")",
")",
"for",
"k",
",",
"v",
"in",
"residue_mapping",
".",
"iteritems",
"(",
")",
":",
"s",
".",
"add",
"(",
"k",
",",
"(",
"uniparc_entry",
".",
"UniParcID",
",",
"v",
")",
",",
"residue_match_mapping",
"[",
"k",
"]",
")",
"self",
".",
"seqres_to_uniparc_sequence_maps",
"[",
"c",
"]",
"=",
"s",
"else",
":",
"self",
".",
"seqres_to_uniparc_sequence_maps",
"[",
"c",
"]",
"=",
"PDBUniParcSequenceMap",
"(",
")",
"# Use the representatives' alignments for their respective equivalent classes. This saves memory as the same SequenceMap is used.",
"for",
"c_1",
",",
"related_chains",
"in",
"self",
".",
"equivalence_fiber",
".",
"iteritems",
"(",
")",
":",
"for",
"c_2",
"in",
"related_chains",
":",
"if",
"self",
".",
"seqres_to_uniparc_sequence_maps",
".",
"get",
"(",
"c_1",
")",
":",
"self",
".",
"seqres_to_uniparc_sequence_maps",
"[",
"c_2",
"]",
"=",
"self",
".",
"seqres_to_uniparc_sequence_maps",
"[",
"c_1",
"]"
]
| Creates a mapping between the residues of the chains and the associated UniParc entries. | [
"Creates",
"a",
"mapping",
"between",
"the",
"residues",
"of",
"the",
"chains",
"and",
"the",
"associated",
"UniParc",
"entries",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/clustalo.py#L833-L860 | train |
Kortemme-Lab/klab | klab/bio/clustalo.py | SIFTSChainMutatorSequenceAligner.get_corresponding_chains | def get_corresponding_chains(self, from_pdb_id, from_chain_id, to_pdb_id):
'''Should be called after get_mutations.'''
chains = self.chain_map.get(from_pdb_id, {}).get(from_chain_id, {}).get(to_pdb_id, [])
return sorted(chains) | python | def get_corresponding_chains(self, from_pdb_id, from_chain_id, to_pdb_id):
'''Should be called after get_mutations.'''
chains = self.chain_map.get(from_pdb_id, {}).get(from_chain_id, {}).get(to_pdb_id, [])
return sorted(chains) | [
"def",
"get_corresponding_chains",
"(",
"self",
",",
"from_pdb_id",
",",
"from_chain_id",
",",
"to_pdb_id",
")",
":",
"chains",
"=",
"self",
".",
"chain_map",
".",
"get",
"(",
"from_pdb_id",
",",
"{",
"}",
")",
".",
"get",
"(",
"from_chain_id",
",",
"{",
"}",
")",
".",
"get",
"(",
"to_pdb_id",
",",
"[",
"]",
")",
"return",
"sorted",
"(",
"chains",
")"
]
| Should be called after get_mutations. | [
"Should",
"be",
"called",
"after",
"get_mutations",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/clustalo.py#L1281-L1284 | train |
Kortemme-Lab/klab | klab/bio/clustalo.py | SIFTSChainMutatorSequenceAligner.get_chain_mutations | def get_chain_mutations(self, pdb_id_1, chain_1, pdb_id_2, chain_2):
'''Returns a list of tuples each containing a SEQRES Mutation object and an ATOM Mutation object representing the
mutations from pdb_id_1, chain_1 to pdb_id_2, chain_2.
SequenceMaps are constructed in this function between the chains based on the alignment.
PDBMutationPair are returned as they are hashable and amenable to Set construction to eliminate duplicates.
'''
# Set up the objects
p1 = self.add_pdb(pdb_id_1)
p2 = self.add_pdb(pdb_id_2)
sifts_1, pdb_1 = p1['sifts'], p1['pdb']
sifts_2, pdb_2 = p2['sifts'], p2['pdb']
# Set up the sequences
#pprint.pprint(sifts_1.seqres_to_atom_sequence_maps)
seqres_to_atom_sequence_maps_1 = sifts_1.seqres_to_atom_sequence_maps.get(chain_1, {}) # this is not guaranteed to exist e.g. 2ZNW chain A
seqres_1, atom_1 = pdb_1.seqres_sequences.get(chain_1), pdb_1.atom_sequences.get(chain_1)
seqres_2, atom_2 = pdb_2.seqres_sequences.get(chain_2), pdb_2.atom_sequences.get(chain_2)
if not seqres_1: raise Exception('No SEQRES sequence for chain {0} of {1}.'.format(chain_1, pdb_1))
if not atom_1: raise Exception('No ATOM sequence for chain {0} of {1}.'.format(chain_1, pdb_1))
if not seqres_2: raise Exception('No SEQRES sequence for chain {0} of {1}.'.format(chain_2, pdb_2))
if not atom_2: raise Exception('No ATOM sequence for chain {0} of {1}.'.format(chain_2, pdb_2))
seqres_str_1 = str(seqres_1)
seqres_str_2 = str(seqres_2)
# Align the SEQRES sequences
sa = SequenceAligner()
sa.add_sequence('{0}_{1}'.format(pdb_id_1, chain_1), seqres_str_1)
sa.add_sequence('{0}_{1}'.format(pdb_id_2, chain_2), seqres_str_2)
sa.align()
seqres_residue_mapping, seqres_match_mapping = sa.get_residue_mapping()
#colortext.pcyan(sa.alignment_output)
# Create a SequenceMap
seqres_sequence_map = SequenceMap()
assert(sorted(seqres_residue_mapping.keys()) == sorted(seqres_match_mapping.keys()))
for k, v in seqres_residue_mapping.iteritems():
seqres_sequence_map.add(k, v, seqres_match_mapping[k])
self.seqres_sequence_maps[(pdb_id_1, chain_1)][(pdb_id_2, chain_2)] = seqres_sequence_map
# Determine the mutations between the SEQRES sequences and use these to generate a list of ATOM mutations
mutations = []
clustal_symbols = SubstitutionScore.clustal_symbols
#print(pdb_id_1, chain_1, pdb_id_2, chain_2)
#print(seqres_to_atom_sequence_maps_1)
for seqres_res_id, v in seqres_match_mapping.iteritems():
# Look at all positions which differ. seqres_res_id is 1-indexed, following the SEQRES and UniProt convention. However, so our our Sequence objects.
if clustal_symbols[v.clustal] != '*':
# Get the wildtype Residue objects
seqres_wt_residue = seqres_1[seqres_res_id]
#print(seqres_wt_residue)
seqres_mutant_residue = seqres_2[seqres_residue_mapping[seqres_res_id]] # todo: this will probably fail for some cases where there is no corresponding mapping
# If there is an associated ATOM record for the wildtype residue, get its residue ID
atom_res_id = None
atom_chain_res_id = seqres_to_atom_sequence_maps_1.get(seqres_res_id)
try:
if atom_chain_res_id:
assert(atom_chain_res_id[0] == chain_1)
atom_residue = atom_1[atom_chain_res_id]
atom_res_id = atom_chain_res_id[1:]
assert(atom_residue.ResidueAA == seqres_wt_residue.ResidueAA)
assert(atom_residue.ResidueID == atom_res_id)
except:
atom_res_id = None
if seqres_wt_residue.ResidueAA != 'X':
# we do not seem to keep ATOM records for unknown/non-canonicals: see 2BTF chain A -> 2PBD chain A
raise
# Create two mutations - one for the SEQRES residue and one for the corresponding (if any) ATOM residue
# We create both so that the user is informed whether there is a mutation between the structures which is
# not captured by the coordinates.
# If there are no ATOM coordinates, there is no point creating an ATOM mutation object so we instead use
# the None type. This also fits with the approach in the SpiderWeb framework.
seqres_mutation = ChainMutation(seqres_wt_residue.ResidueAA, seqres_res_id,seqres_mutant_residue.ResidueAA, Chain = chain_1)
atom_mutation = None
if atom_res_id:
atom_mutation = ChainMutation(seqres_wt_residue.ResidueAA, atom_res_id, seqres_mutant_residue.ResidueAA, Chain = chain_1)
mutations.append(PDBMutationPair(seqres_mutation, atom_mutation))
return mutations | python | def get_chain_mutations(self, pdb_id_1, chain_1, pdb_id_2, chain_2):
'''Returns a list of tuples each containing a SEQRES Mutation object and an ATOM Mutation object representing the
mutations from pdb_id_1, chain_1 to pdb_id_2, chain_2.
SequenceMaps are constructed in this function between the chains based on the alignment.
PDBMutationPair are returned as they are hashable and amenable to Set construction to eliminate duplicates.
'''
# Set up the objects
p1 = self.add_pdb(pdb_id_1)
p2 = self.add_pdb(pdb_id_2)
sifts_1, pdb_1 = p1['sifts'], p1['pdb']
sifts_2, pdb_2 = p2['sifts'], p2['pdb']
# Set up the sequences
#pprint.pprint(sifts_1.seqres_to_atom_sequence_maps)
seqres_to_atom_sequence_maps_1 = sifts_1.seqres_to_atom_sequence_maps.get(chain_1, {}) # this is not guaranteed to exist e.g. 2ZNW chain A
seqres_1, atom_1 = pdb_1.seqres_sequences.get(chain_1), pdb_1.atom_sequences.get(chain_1)
seqres_2, atom_2 = pdb_2.seqres_sequences.get(chain_2), pdb_2.atom_sequences.get(chain_2)
if not seqres_1: raise Exception('No SEQRES sequence for chain {0} of {1}.'.format(chain_1, pdb_1))
if not atom_1: raise Exception('No ATOM sequence for chain {0} of {1}.'.format(chain_1, pdb_1))
if not seqres_2: raise Exception('No SEQRES sequence for chain {0} of {1}.'.format(chain_2, pdb_2))
if not atom_2: raise Exception('No ATOM sequence for chain {0} of {1}.'.format(chain_2, pdb_2))
seqres_str_1 = str(seqres_1)
seqres_str_2 = str(seqres_2)
# Align the SEQRES sequences
sa = SequenceAligner()
sa.add_sequence('{0}_{1}'.format(pdb_id_1, chain_1), seqres_str_1)
sa.add_sequence('{0}_{1}'.format(pdb_id_2, chain_2), seqres_str_2)
sa.align()
seqres_residue_mapping, seqres_match_mapping = sa.get_residue_mapping()
#colortext.pcyan(sa.alignment_output)
# Create a SequenceMap
seqres_sequence_map = SequenceMap()
assert(sorted(seqres_residue_mapping.keys()) == sorted(seqres_match_mapping.keys()))
for k, v in seqres_residue_mapping.iteritems():
seqres_sequence_map.add(k, v, seqres_match_mapping[k])
self.seqres_sequence_maps[(pdb_id_1, chain_1)][(pdb_id_2, chain_2)] = seqres_sequence_map
# Determine the mutations between the SEQRES sequences and use these to generate a list of ATOM mutations
mutations = []
clustal_symbols = SubstitutionScore.clustal_symbols
#print(pdb_id_1, chain_1, pdb_id_2, chain_2)
#print(seqres_to_atom_sequence_maps_1)
for seqres_res_id, v in seqres_match_mapping.iteritems():
# Look at all positions which differ. seqres_res_id is 1-indexed, following the SEQRES and UniProt convention. However, so our our Sequence objects.
if clustal_symbols[v.clustal] != '*':
# Get the wildtype Residue objects
seqres_wt_residue = seqres_1[seqres_res_id]
#print(seqres_wt_residue)
seqres_mutant_residue = seqres_2[seqres_residue_mapping[seqres_res_id]] # todo: this will probably fail for some cases where there is no corresponding mapping
# If there is an associated ATOM record for the wildtype residue, get its residue ID
atom_res_id = None
atom_chain_res_id = seqres_to_atom_sequence_maps_1.get(seqres_res_id)
try:
if atom_chain_res_id:
assert(atom_chain_res_id[0] == chain_1)
atom_residue = atom_1[atom_chain_res_id]
atom_res_id = atom_chain_res_id[1:]
assert(atom_residue.ResidueAA == seqres_wt_residue.ResidueAA)
assert(atom_residue.ResidueID == atom_res_id)
except:
atom_res_id = None
if seqres_wt_residue.ResidueAA != 'X':
# we do not seem to keep ATOM records for unknown/non-canonicals: see 2BTF chain A -> 2PBD chain A
raise
# Create two mutations - one for the SEQRES residue and one for the corresponding (if any) ATOM residue
# We create both so that the user is informed whether there is a mutation between the structures which is
# not captured by the coordinates.
# If there are no ATOM coordinates, there is no point creating an ATOM mutation object so we instead use
# the None type. This also fits with the approach in the SpiderWeb framework.
seqres_mutation = ChainMutation(seqres_wt_residue.ResidueAA, seqres_res_id,seqres_mutant_residue.ResidueAA, Chain = chain_1)
atom_mutation = None
if atom_res_id:
atom_mutation = ChainMutation(seqres_wt_residue.ResidueAA, atom_res_id, seqres_mutant_residue.ResidueAA, Chain = chain_1)
mutations.append(PDBMutationPair(seqres_mutation, atom_mutation))
return mutations | [
"def",
"get_chain_mutations",
"(",
"self",
",",
"pdb_id_1",
",",
"chain_1",
",",
"pdb_id_2",
",",
"chain_2",
")",
":",
"# Set up the objects",
"p1",
"=",
"self",
".",
"add_pdb",
"(",
"pdb_id_1",
")",
"p2",
"=",
"self",
".",
"add_pdb",
"(",
"pdb_id_2",
")",
"sifts_1",
",",
"pdb_1",
"=",
"p1",
"[",
"'sifts'",
"]",
",",
"p1",
"[",
"'pdb'",
"]",
"sifts_2",
",",
"pdb_2",
"=",
"p2",
"[",
"'sifts'",
"]",
",",
"p2",
"[",
"'pdb'",
"]",
"# Set up the sequences",
"#pprint.pprint(sifts_1.seqres_to_atom_sequence_maps)",
"seqres_to_atom_sequence_maps_1",
"=",
"sifts_1",
".",
"seqres_to_atom_sequence_maps",
".",
"get",
"(",
"chain_1",
",",
"{",
"}",
")",
"# this is not guaranteed to exist e.g. 2ZNW chain A",
"seqres_1",
",",
"atom_1",
"=",
"pdb_1",
".",
"seqres_sequences",
".",
"get",
"(",
"chain_1",
")",
",",
"pdb_1",
".",
"atom_sequences",
".",
"get",
"(",
"chain_1",
")",
"seqres_2",
",",
"atom_2",
"=",
"pdb_2",
".",
"seqres_sequences",
".",
"get",
"(",
"chain_2",
")",
",",
"pdb_2",
".",
"atom_sequences",
".",
"get",
"(",
"chain_2",
")",
"if",
"not",
"seqres_1",
":",
"raise",
"Exception",
"(",
"'No SEQRES sequence for chain {0} of {1}.'",
".",
"format",
"(",
"chain_1",
",",
"pdb_1",
")",
")",
"if",
"not",
"atom_1",
":",
"raise",
"Exception",
"(",
"'No ATOM sequence for chain {0} of {1}.'",
".",
"format",
"(",
"chain_1",
",",
"pdb_1",
")",
")",
"if",
"not",
"seqres_2",
":",
"raise",
"Exception",
"(",
"'No SEQRES sequence for chain {0} of {1}.'",
".",
"format",
"(",
"chain_2",
",",
"pdb_2",
")",
")",
"if",
"not",
"atom_2",
":",
"raise",
"Exception",
"(",
"'No ATOM sequence for chain {0} of {1}.'",
".",
"format",
"(",
"chain_2",
",",
"pdb_2",
")",
")",
"seqres_str_1",
"=",
"str",
"(",
"seqres_1",
")",
"seqres_str_2",
"=",
"str",
"(",
"seqres_2",
")",
"# Align the SEQRES sequences",
"sa",
"=",
"SequenceAligner",
"(",
")",
"sa",
".",
"add_sequence",
"(",
"'{0}_{1}'",
".",
"format",
"(",
"pdb_id_1",
",",
"chain_1",
")",
",",
"seqres_str_1",
")",
"sa",
".",
"add_sequence",
"(",
"'{0}_{1}'",
".",
"format",
"(",
"pdb_id_2",
",",
"chain_2",
")",
",",
"seqres_str_2",
")",
"sa",
".",
"align",
"(",
")",
"seqres_residue_mapping",
",",
"seqres_match_mapping",
"=",
"sa",
".",
"get_residue_mapping",
"(",
")",
"#colortext.pcyan(sa.alignment_output)",
"# Create a SequenceMap",
"seqres_sequence_map",
"=",
"SequenceMap",
"(",
")",
"assert",
"(",
"sorted",
"(",
"seqres_residue_mapping",
".",
"keys",
"(",
")",
")",
"==",
"sorted",
"(",
"seqres_match_mapping",
".",
"keys",
"(",
")",
")",
")",
"for",
"k",
",",
"v",
"in",
"seqres_residue_mapping",
".",
"iteritems",
"(",
")",
":",
"seqres_sequence_map",
".",
"add",
"(",
"k",
",",
"v",
",",
"seqres_match_mapping",
"[",
"k",
"]",
")",
"self",
".",
"seqres_sequence_maps",
"[",
"(",
"pdb_id_1",
",",
"chain_1",
")",
"]",
"[",
"(",
"pdb_id_2",
",",
"chain_2",
")",
"]",
"=",
"seqres_sequence_map",
"# Determine the mutations between the SEQRES sequences and use these to generate a list of ATOM mutations",
"mutations",
"=",
"[",
"]",
"clustal_symbols",
"=",
"SubstitutionScore",
".",
"clustal_symbols",
"#print(pdb_id_1, chain_1, pdb_id_2, chain_2)",
"#print(seqres_to_atom_sequence_maps_1)",
"for",
"seqres_res_id",
",",
"v",
"in",
"seqres_match_mapping",
".",
"iteritems",
"(",
")",
":",
"# Look at all positions which differ. seqres_res_id is 1-indexed, following the SEQRES and UniProt convention. However, so our our Sequence objects.",
"if",
"clustal_symbols",
"[",
"v",
".",
"clustal",
"]",
"!=",
"'*'",
":",
"# Get the wildtype Residue objects",
"seqres_wt_residue",
"=",
"seqres_1",
"[",
"seqres_res_id",
"]",
"#print(seqres_wt_residue)",
"seqres_mutant_residue",
"=",
"seqres_2",
"[",
"seqres_residue_mapping",
"[",
"seqres_res_id",
"]",
"]",
"# todo: this will probably fail for some cases where there is no corresponding mapping",
"# If there is an associated ATOM record for the wildtype residue, get its residue ID",
"atom_res_id",
"=",
"None",
"atom_chain_res_id",
"=",
"seqres_to_atom_sequence_maps_1",
".",
"get",
"(",
"seqres_res_id",
")",
"try",
":",
"if",
"atom_chain_res_id",
":",
"assert",
"(",
"atom_chain_res_id",
"[",
"0",
"]",
"==",
"chain_1",
")",
"atom_residue",
"=",
"atom_1",
"[",
"atom_chain_res_id",
"]",
"atom_res_id",
"=",
"atom_chain_res_id",
"[",
"1",
":",
"]",
"assert",
"(",
"atom_residue",
".",
"ResidueAA",
"==",
"seqres_wt_residue",
".",
"ResidueAA",
")",
"assert",
"(",
"atom_residue",
".",
"ResidueID",
"==",
"atom_res_id",
")",
"except",
":",
"atom_res_id",
"=",
"None",
"if",
"seqres_wt_residue",
".",
"ResidueAA",
"!=",
"'X'",
":",
"# we do not seem to keep ATOM records for unknown/non-canonicals: see 2BTF chain A -> 2PBD chain A",
"raise",
"# Create two mutations - one for the SEQRES residue and one for the corresponding (if any) ATOM residue",
"# We create both so that the user is informed whether there is a mutation between the structures which is",
"# not captured by the coordinates.",
"# If there are no ATOM coordinates, there is no point creating an ATOM mutation object so we instead use",
"# the None type. This also fits with the approach in the SpiderWeb framework.",
"seqres_mutation",
"=",
"ChainMutation",
"(",
"seqres_wt_residue",
".",
"ResidueAA",
",",
"seqres_res_id",
",",
"seqres_mutant_residue",
".",
"ResidueAA",
",",
"Chain",
"=",
"chain_1",
")",
"atom_mutation",
"=",
"None",
"if",
"atom_res_id",
":",
"atom_mutation",
"=",
"ChainMutation",
"(",
"seqres_wt_residue",
".",
"ResidueAA",
",",
"atom_res_id",
",",
"seqres_mutant_residue",
".",
"ResidueAA",
",",
"Chain",
"=",
"chain_1",
")",
"mutations",
".",
"append",
"(",
"PDBMutationPair",
"(",
"seqres_mutation",
",",
"atom_mutation",
")",
")",
"return",
"mutations"
]
| Returns a list of tuples each containing a SEQRES Mutation object and an ATOM Mutation object representing the
mutations from pdb_id_1, chain_1 to pdb_id_2, chain_2.
SequenceMaps are constructed in this function between the chains based on the alignment.
PDBMutationPair are returned as they are hashable and amenable to Set construction to eliminate duplicates. | [
"Returns",
"a",
"list",
"of",
"tuples",
"each",
"containing",
"a",
"SEQRES",
"Mutation",
"object",
"and",
"an",
"ATOM",
"Mutation",
"object",
"representing",
"the",
"mutations",
"from",
"pdb_id_1",
"chain_1",
"to",
"pdb_id_2",
"chain_2",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/clustalo.py#L1287-L1373 | train |
Kortemme-Lab/klab | klab/rosetta/map_pdb_residues.py | get_mapping_from_db3_file | def get_mapping_from_db3_file( db_path ):
'''
Does the work of reading the Rosetta SQLite3 .db3 file to retrieve the mapping
'''
import sqlite3 # should be moved to the top but we do this here for CentOS 5 support
conn = sqlite3.connect(db_path)
results = conn.cursor().execute('''
SELECT chain_id, pdb_residue_number, insertion_code, residues.struct_id, residues.resNum, residues.name3, residues.res_type
FROM residue_pdb_identification
INNER JOIN residues ON residue_pdb_identification.struct_id=residues.struct_id AND residue_pdb_identification.residue_number=residues.resNum
''')
# Create the mapping from PDB residues to Rosetta residues
rosetta_residue_ids = []
mapping = {}
for r in results:
mapping["%s%s%s" % (r[0], str(r[1]).rjust(4), r[2])] = {'pose_residue_id' : r[4], 'name3' : r[5], 'res_type' : r[6]}
rosetta_residue_ids.append(r[4])
# Ensure that the the range of the map is exactly the set of Rosetta residues i.e. the map from (a subset of) the PDB residues to the Rosetta residues is surjective
raw_residue_list = [r for r in conn.cursor().execute('''SELECT resNum, name3 FROM residues ORDER BY resNum''')]
assert(sorted([r[0] for r in raw_residue_list]) == sorted(rosetta_residue_ids))
return mapping | python | def get_mapping_from_db3_file( db_path ):
'''
Does the work of reading the Rosetta SQLite3 .db3 file to retrieve the mapping
'''
import sqlite3 # should be moved to the top but we do this here for CentOS 5 support
conn = sqlite3.connect(db_path)
results = conn.cursor().execute('''
SELECT chain_id, pdb_residue_number, insertion_code, residues.struct_id, residues.resNum, residues.name3, residues.res_type
FROM residue_pdb_identification
INNER JOIN residues ON residue_pdb_identification.struct_id=residues.struct_id AND residue_pdb_identification.residue_number=residues.resNum
''')
# Create the mapping from PDB residues to Rosetta residues
rosetta_residue_ids = []
mapping = {}
for r in results:
mapping["%s%s%s" % (r[0], str(r[1]).rjust(4), r[2])] = {'pose_residue_id' : r[4], 'name3' : r[5], 'res_type' : r[6]}
rosetta_residue_ids.append(r[4])
# Ensure that the the range of the map is exactly the set of Rosetta residues i.e. the map from (a subset of) the PDB residues to the Rosetta residues is surjective
raw_residue_list = [r for r in conn.cursor().execute('''SELECT resNum, name3 FROM residues ORDER BY resNum''')]
assert(sorted([r[0] for r in raw_residue_list]) == sorted(rosetta_residue_ids))
return mapping | [
"def",
"get_mapping_from_db3_file",
"(",
"db_path",
")",
":",
"import",
"sqlite3",
"# should be moved to the top but we do this here for CentOS 5 support",
"conn",
"=",
"sqlite3",
".",
"connect",
"(",
"db_path",
")",
"results",
"=",
"conn",
".",
"cursor",
"(",
")",
".",
"execute",
"(",
"'''\n SELECT chain_id, pdb_residue_number, insertion_code, residues.struct_id, residues.resNum, residues.name3, residues.res_type\n FROM residue_pdb_identification\n INNER JOIN residues ON residue_pdb_identification.struct_id=residues.struct_id AND residue_pdb_identification.residue_number=residues.resNum\n '''",
")",
"# Create the mapping from PDB residues to Rosetta residues",
"rosetta_residue_ids",
"=",
"[",
"]",
"mapping",
"=",
"{",
"}",
"for",
"r",
"in",
"results",
":",
"mapping",
"[",
"\"%s%s%s\"",
"%",
"(",
"r",
"[",
"0",
"]",
",",
"str",
"(",
"r",
"[",
"1",
"]",
")",
".",
"rjust",
"(",
"4",
")",
",",
"r",
"[",
"2",
"]",
")",
"]",
"=",
"{",
"'pose_residue_id'",
":",
"r",
"[",
"4",
"]",
",",
"'name3'",
":",
"r",
"[",
"5",
"]",
",",
"'res_type'",
":",
"r",
"[",
"6",
"]",
"}",
"rosetta_residue_ids",
".",
"append",
"(",
"r",
"[",
"4",
"]",
")",
"# Ensure that the the range of the map is exactly the set of Rosetta residues i.e. the map from (a subset of) the PDB residues to the Rosetta residues is surjective",
"raw_residue_list",
"=",
"[",
"r",
"for",
"r",
"in",
"conn",
".",
"cursor",
"(",
")",
".",
"execute",
"(",
"'''SELECT resNum, name3 FROM residues ORDER BY resNum'''",
")",
"]",
"assert",
"(",
"sorted",
"(",
"[",
"r",
"[",
"0",
"]",
"for",
"r",
"in",
"raw_residue_list",
"]",
")",
"==",
"sorted",
"(",
"rosetta_residue_ids",
")",
")",
"return",
"mapping"
]
| Does the work of reading the Rosetta SQLite3 .db3 file to retrieve the mapping | [
"Does",
"the",
"work",
"of",
"reading",
"the",
"Rosetta",
"SQLite3",
".",
"db3",
"file",
"to",
"retrieve",
"the",
"mapping"
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/rosetta/map_pdb_residues.py#L128-L152 | train |
Kortemme-Lab/klab | klab/google/gcalendar.py | GoogleCalendar.add_company_quarter | def add_company_quarter(self, company_name, quarter_name, dt, calendar_id = 'notices'):
'''Adds a company_name quarter event to the calendar. dt should be a date object. Returns True if the event was added.'''
assert(calendar_id in self.configured_calendar_ids.keys())
calendarId = self.configured_calendar_ids[calendar_id]
quarter_name = quarter_name.title()
quarter_numbers = {
'Spring' : 1,
'Summer' : 2,
'Fall' : 3,
'Winter' : 4
}
assert(quarter_name in quarter_numbers.keys())
start_time = datetime(year=dt.year, month=dt.month, day=dt.day, hour=0, minute=0, second=0, tzinfo=self.timezone) + timedelta(days = -1)
end_time = start_time + timedelta(days = 3, seconds = -1)
summary = '%s %s Quarter begins' % (company_name, quarter_name)
# Do not add the quarter multiple times
events = self.get_events(start_time.isoformat(), end_time.isoformat(), ignore_cancelled = True)
for event in events:
if event.summary.find(summary) != -1:
return False
event_body = {
'summary' : summary,
'description' : summary,
'start' : {'date' : dt.isoformat(), 'timeZone' : self.timezone_string},
'end' : {'date' : dt.isoformat(), 'timeZone' : self.timezone_string},
'status' : 'confirmed',
'gadget' : {
'display' : 'icon',
'iconLink' : 'https://guybrush.ucsf.edu/images/Q%d_32.png' % quarter_numbers[quarter_name],
'title' : summary,
},
'extendedProperties' : {
'shared' : {
'event_type' : '%s quarter' % company_name,
'quarter_name' : quarter_name
}
}
}
colortext.warning('\n%s\n' % pprint.pformat(event_body))
created_event = self.service.events().insert(calendarId = self.configured_calendar_ids[calendar_id], body = event_body).execute()
return True | python | def add_company_quarter(self, company_name, quarter_name, dt, calendar_id = 'notices'):
'''Adds a company_name quarter event to the calendar. dt should be a date object. Returns True if the event was added.'''
assert(calendar_id in self.configured_calendar_ids.keys())
calendarId = self.configured_calendar_ids[calendar_id]
quarter_name = quarter_name.title()
quarter_numbers = {
'Spring' : 1,
'Summer' : 2,
'Fall' : 3,
'Winter' : 4
}
assert(quarter_name in quarter_numbers.keys())
start_time = datetime(year=dt.year, month=dt.month, day=dt.day, hour=0, minute=0, second=0, tzinfo=self.timezone) + timedelta(days = -1)
end_time = start_time + timedelta(days = 3, seconds = -1)
summary = '%s %s Quarter begins' % (company_name, quarter_name)
# Do not add the quarter multiple times
events = self.get_events(start_time.isoformat(), end_time.isoformat(), ignore_cancelled = True)
for event in events:
if event.summary.find(summary) != -1:
return False
event_body = {
'summary' : summary,
'description' : summary,
'start' : {'date' : dt.isoformat(), 'timeZone' : self.timezone_string},
'end' : {'date' : dt.isoformat(), 'timeZone' : self.timezone_string},
'status' : 'confirmed',
'gadget' : {
'display' : 'icon',
'iconLink' : 'https://guybrush.ucsf.edu/images/Q%d_32.png' % quarter_numbers[quarter_name],
'title' : summary,
},
'extendedProperties' : {
'shared' : {
'event_type' : '%s quarter' % company_name,
'quarter_name' : quarter_name
}
}
}
colortext.warning('\n%s\n' % pprint.pformat(event_body))
created_event = self.service.events().insert(calendarId = self.configured_calendar_ids[calendar_id], body = event_body).execute()
return True | [
"def",
"add_company_quarter",
"(",
"self",
",",
"company_name",
",",
"quarter_name",
",",
"dt",
",",
"calendar_id",
"=",
"'notices'",
")",
":",
"assert",
"(",
"calendar_id",
"in",
"self",
".",
"configured_calendar_ids",
".",
"keys",
"(",
")",
")",
"calendarId",
"=",
"self",
".",
"configured_calendar_ids",
"[",
"calendar_id",
"]",
"quarter_name",
"=",
"quarter_name",
".",
"title",
"(",
")",
"quarter_numbers",
"=",
"{",
"'Spring'",
":",
"1",
",",
"'Summer'",
":",
"2",
",",
"'Fall'",
":",
"3",
",",
"'Winter'",
":",
"4",
"}",
"assert",
"(",
"quarter_name",
"in",
"quarter_numbers",
".",
"keys",
"(",
")",
")",
"start_time",
"=",
"datetime",
"(",
"year",
"=",
"dt",
".",
"year",
",",
"month",
"=",
"dt",
".",
"month",
",",
"day",
"=",
"dt",
".",
"day",
",",
"hour",
"=",
"0",
",",
"minute",
"=",
"0",
",",
"second",
"=",
"0",
",",
"tzinfo",
"=",
"self",
".",
"timezone",
")",
"+",
"timedelta",
"(",
"days",
"=",
"-",
"1",
")",
"end_time",
"=",
"start_time",
"+",
"timedelta",
"(",
"days",
"=",
"3",
",",
"seconds",
"=",
"-",
"1",
")",
"summary",
"=",
"'%s %s Quarter begins'",
"%",
"(",
"company_name",
",",
"quarter_name",
")",
"# Do not add the quarter multiple times",
"events",
"=",
"self",
".",
"get_events",
"(",
"start_time",
".",
"isoformat",
"(",
")",
",",
"end_time",
".",
"isoformat",
"(",
")",
",",
"ignore_cancelled",
"=",
"True",
")",
"for",
"event",
"in",
"events",
":",
"if",
"event",
".",
"summary",
".",
"find",
"(",
"summary",
")",
"!=",
"-",
"1",
":",
"return",
"False",
"event_body",
"=",
"{",
"'summary'",
":",
"summary",
",",
"'description'",
":",
"summary",
",",
"'start'",
":",
"{",
"'date'",
":",
"dt",
".",
"isoformat",
"(",
")",
",",
"'timeZone'",
":",
"self",
".",
"timezone_string",
"}",
",",
"'end'",
":",
"{",
"'date'",
":",
"dt",
".",
"isoformat",
"(",
")",
",",
"'timeZone'",
":",
"self",
".",
"timezone_string",
"}",
",",
"'status'",
":",
"'confirmed'",
",",
"'gadget'",
":",
"{",
"'display'",
":",
"'icon'",
",",
"'iconLink'",
":",
"'https://guybrush.ucsf.edu/images/Q%d_32.png'",
"%",
"quarter_numbers",
"[",
"quarter_name",
"]",
",",
"'title'",
":",
"summary",
",",
"}",
",",
"'extendedProperties'",
":",
"{",
"'shared'",
":",
"{",
"'event_type'",
":",
"'%s quarter'",
"%",
"company_name",
",",
"'quarter_name'",
":",
"quarter_name",
"}",
"}",
"}",
"colortext",
".",
"warning",
"(",
"'\\n%s\\n'",
"%",
"pprint",
".",
"pformat",
"(",
"event_body",
")",
")",
"created_event",
"=",
"self",
".",
"service",
".",
"events",
"(",
")",
".",
"insert",
"(",
"calendarId",
"=",
"self",
".",
"configured_calendar_ids",
"[",
"calendar_id",
"]",
",",
"body",
"=",
"event_body",
")",
".",
"execute",
"(",
")",
"return",
"True"
]
| Adds a company_name quarter event to the calendar. dt should be a date object. Returns True if the event was added. | [
"Adds",
"a",
"company_name",
"quarter",
"event",
"to",
"the",
"calendar",
".",
"dt",
"should",
"be",
"a",
"date",
"object",
".",
"Returns",
"True",
"if",
"the",
"event",
"was",
"added",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/google/gcalendar.py#L393-L438 | train |
uogbuji/versa | tools/py/driver/sqlite.py | connection.create_space | def create_space(self):
'''Set up a new table space for the first time'''
cur = self._conn.cursor()
cur.executescript(SQL_MODEL)
self._conn.commit()
cur.close()
return | python | def create_space(self):
'''Set up a new table space for the first time'''
cur = self._conn.cursor()
cur.executescript(SQL_MODEL)
self._conn.commit()
cur.close()
return | [
"def",
"create_space",
"(",
"self",
")",
":",
"cur",
"=",
"self",
".",
"_conn",
".",
"cursor",
"(",
")",
"cur",
".",
"executescript",
"(",
"SQL_MODEL",
")",
"self",
".",
"_conn",
".",
"commit",
"(",
")",
"cur",
".",
"close",
"(",
")",
"return"
]
| Set up a new table space for the first time | [
"Set",
"up",
"a",
"new",
"table",
"space",
"for",
"the",
"first",
"time"
]
| f092ffc7ed363a5b170890955168500f32de0dd5 | https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/driver/sqlite.py#L31-L37 | train |
uogbuji/versa | tools/py/driver/sqlite.py | connection.drop_space | def drop_space(self):
'''Dismantle an existing table space'''
cur = self._conn.cursor()
cur.executescript(DROP_SQL_MODEL)
self._conn.commit()
cur.close()
return | python | def drop_space(self):
'''Dismantle an existing table space'''
cur = self._conn.cursor()
cur.executescript(DROP_SQL_MODEL)
self._conn.commit()
cur.close()
return | [
"def",
"drop_space",
"(",
"self",
")",
":",
"cur",
"=",
"self",
".",
"_conn",
".",
"cursor",
"(",
")",
"cur",
".",
"executescript",
"(",
"DROP_SQL_MODEL",
")",
"self",
".",
"_conn",
".",
"commit",
"(",
")",
"cur",
".",
"close",
"(",
")",
"return"
]
| Dismantle an existing table space | [
"Dismantle",
"an",
"existing",
"table",
"space"
]
| f092ffc7ed363a5b170890955168500f32de0dd5 | https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/driver/sqlite.py#L39-L45 | train |
Ceasar/twosheds | twosheds/cli.py | CommandLineInterface.eval | def eval(self, text):
"""Respond to text entered by the user.
:param text: the user's input
"""
program = Program(text, echo=self.echo, transforms=self.transforms)
tokens = program.gen_tokens()
for sentence in program.gen_sentences(tokens, self.aliases):
if self.echo:
self.terminal.debug(str(sentence))
program.interpret(sentence, self.commands) | python | def eval(self, text):
"""Respond to text entered by the user.
:param text: the user's input
"""
program = Program(text, echo=self.echo, transforms=self.transforms)
tokens = program.gen_tokens()
for sentence in program.gen_sentences(tokens, self.aliases):
if self.echo:
self.terminal.debug(str(sentence))
program.interpret(sentence, self.commands) | [
"def",
"eval",
"(",
"self",
",",
"text",
")",
":",
"program",
"=",
"Program",
"(",
"text",
",",
"echo",
"=",
"self",
".",
"echo",
",",
"transforms",
"=",
"self",
".",
"transforms",
")",
"tokens",
"=",
"program",
".",
"gen_tokens",
"(",
")",
"for",
"sentence",
"in",
"program",
".",
"gen_sentences",
"(",
"tokens",
",",
"self",
".",
"aliases",
")",
":",
"if",
"self",
".",
"echo",
":",
"self",
".",
"terminal",
".",
"debug",
"(",
"str",
"(",
"sentence",
")",
")",
"program",
".",
"interpret",
"(",
"sentence",
",",
"self",
".",
"commands",
")"
]
| Respond to text entered by the user.
:param text: the user's input | [
"Respond",
"to",
"text",
"entered",
"by",
"the",
"user",
"."
]
| 55b0a207e3a06b85e9a9567069b3822a651501a7 | https://github.com/Ceasar/twosheds/blob/55b0a207e3a06b85e9a9567069b3822a651501a7/twosheds/cli.py#L36-L46 | train |
Ceasar/twosheds | twosheds/cli.py | CommandLineInterface.interact | def interact(self):
"""Get a command from the user and respond to it."""
lines = ""
for line in self.read():
lines += line
try:
self.eval(lines)
except ValueError:
pass
except KeyboardInterrupt as e:
raise e
except:
self.terminal.error(traceback.format_exc())
break
else:
break | python | def interact(self):
"""Get a command from the user and respond to it."""
lines = ""
for line in self.read():
lines += line
try:
self.eval(lines)
except ValueError:
pass
except KeyboardInterrupt as e:
raise e
except:
self.terminal.error(traceback.format_exc())
break
else:
break | [
"def",
"interact",
"(",
"self",
")",
":",
"lines",
"=",
"\"\"",
"for",
"line",
"in",
"self",
".",
"read",
"(",
")",
":",
"lines",
"+=",
"line",
"try",
":",
"self",
".",
"eval",
"(",
"lines",
")",
"except",
"ValueError",
":",
"pass",
"except",
"KeyboardInterrupt",
"as",
"e",
":",
"raise",
"e",
"except",
":",
"self",
".",
"terminal",
".",
"error",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"break",
"else",
":",
"break"
]
| Get a command from the user and respond to it. | [
"Get",
"a",
"command",
"from",
"the",
"user",
"and",
"respond",
"to",
"it",
"."
]
| 55b0a207e3a06b85e9a9567069b3822a651501a7 | https://github.com/Ceasar/twosheds/blob/55b0a207e3a06b85e9a9567069b3822a651501a7/twosheds/cli.py#L48-L63 | train |
Ceasar/twosheds | twosheds/cli.py | CommandLineInterface.serve_forever | def serve_forever(self, banner=None):
"""Handle one interaction at a time until shutdown.
:param banner: (optional) the banner to print before the first
interaction. Defaults to ``None``.
"""
if banner:
print(banner)
while True:
try:
self.interact()
except KeyboardInterrupt: # program interrupted by the user
print # do not print on the same line as ^C
pass
except SystemExit: # exit from the interpreter
break | python | def serve_forever(self, banner=None):
"""Handle one interaction at a time until shutdown.
:param banner: (optional) the banner to print before the first
interaction. Defaults to ``None``.
"""
if banner:
print(banner)
while True:
try:
self.interact()
except KeyboardInterrupt: # program interrupted by the user
print # do not print on the same line as ^C
pass
except SystemExit: # exit from the interpreter
break | [
"def",
"serve_forever",
"(",
"self",
",",
"banner",
"=",
"None",
")",
":",
"if",
"banner",
":",
"print",
"(",
"banner",
")",
"while",
"True",
":",
"try",
":",
"self",
".",
"interact",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"# program interrupted by the user",
"print",
"# do not print on the same line as ^C",
"pass",
"except",
"SystemExit",
":",
"# exit from the interpreter",
"break"
]
| Handle one interaction at a time until shutdown.
:param banner: (optional) the banner to print before the first
interaction. Defaults to ``None``. | [
"Handle",
"one",
"interaction",
"at",
"a",
"time",
"until",
"shutdown",
"."
]
| 55b0a207e3a06b85e9a9567069b3822a651501a7 | https://github.com/Ceasar/twosheds/blob/55b0a207e3a06b85e9a9567069b3822a651501a7/twosheds/cli.py#L65-L80 | train |
TheGhouls/oct | oct/utilities/run.py | process_results | def process_results(output_dir, config):
"""Process results and output them
"""
print('\nanalyzing results...\n')
res = output_results(output_dir, config)
if res:
print('created: %s/results.html\n' % output_dir)
else:
print('results cannot be processed') | python | def process_results(output_dir, config):
"""Process results and output them
"""
print('\nanalyzing results...\n')
res = output_results(output_dir, config)
if res:
print('created: %s/results.html\n' % output_dir)
else:
print('results cannot be processed') | [
"def",
"process_results",
"(",
"output_dir",
",",
"config",
")",
":",
"print",
"(",
"'\\nanalyzing results...\\n'",
")",
"res",
"=",
"output_results",
"(",
"output_dir",
",",
"config",
")",
"if",
"res",
":",
"print",
"(",
"'created: %s/results.html\\n'",
"%",
"output_dir",
")",
"else",
":",
"print",
"(",
"'results cannot be processed'",
")"
]
| Process results and output them | [
"Process",
"results",
"and",
"output",
"them"
]
| 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/utilities/run.py#L14-L22 | train |
TheGhouls/oct | oct/utilities/run.py | copy_config | def copy_config(project_path, output_dir):
"""Copy current config file to output directory
"""
project_config = os.path.join(project_path, 'config.json')
saved_config = os.path.join(output_dir, 'config.json')
shutil.copy(project_config, saved_config) | python | def copy_config(project_path, output_dir):
"""Copy current config file to output directory
"""
project_config = os.path.join(project_path, 'config.json')
saved_config = os.path.join(output_dir, 'config.json')
shutil.copy(project_config, saved_config) | [
"def",
"copy_config",
"(",
"project_path",
",",
"output_dir",
")",
":",
"project_config",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_path",
",",
"'config.json'",
")",
"saved_config",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"'config.json'",
")",
"shutil",
".",
"copy",
"(",
"project_config",
",",
"saved_config",
")"
]
| Copy current config file to output directory | [
"Copy",
"current",
"config",
"file",
"to",
"output",
"directory"
]
| 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/utilities/run.py#L25-L30 | train |
TheGhouls/oct | oct/utilities/run.py | start_hq | def start_hq(output_dir, config, topic, is_master=True, **kwargs):
"""Start a HQ
"""
HightQuarter = get_hq_class(config.get('hq_class'))
hq = HightQuarter(output_dir, config, topic, **kwargs)
hq.setup()
if is_master:
hq.wait_turrets(config.get("min_turrets", 1))
hq.run()
hq.tear_down() | python | def start_hq(output_dir, config, topic, is_master=True, **kwargs):
"""Start a HQ
"""
HightQuarter = get_hq_class(config.get('hq_class'))
hq = HightQuarter(output_dir, config, topic, **kwargs)
hq.setup()
if is_master:
hq.wait_turrets(config.get("min_turrets", 1))
hq.run()
hq.tear_down() | [
"def",
"start_hq",
"(",
"output_dir",
",",
"config",
",",
"topic",
",",
"is_master",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"HightQuarter",
"=",
"get_hq_class",
"(",
"config",
".",
"get",
"(",
"'hq_class'",
")",
")",
"hq",
"=",
"HightQuarter",
"(",
"output_dir",
",",
"config",
",",
"topic",
",",
"*",
"*",
"kwargs",
")",
"hq",
".",
"setup",
"(",
")",
"if",
"is_master",
":",
"hq",
".",
"wait_turrets",
"(",
"config",
".",
"get",
"(",
"\"min_turrets\"",
",",
"1",
")",
")",
"hq",
".",
"run",
"(",
")",
"hq",
".",
"tear_down",
"(",
")"
]
| Start a HQ | [
"Start",
"a",
"HQ"
]
| 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/utilities/run.py#L33-L42 | train |
TheGhouls/oct | oct/utilities/run.py | generate_output_path | def generate_output_path(args, project_path):
"""Generate default output directory
"""
milisec = datetime.now().microsecond
dirname = 'results_{}_{}'.format(time.strftime('%Y.%m.%d_%H.%M.%S', time.localtime()), str(milisec))
return os.path.join(project_path, 'results', dirname) | python | def generate_output_path(args, project_path):
"""Generate default output directory
"""
milisec = datetime.now().microsecond
dirname = 'results_{}_{}'.format(time.strftime('%Y.%m.%d_%H.%M.%S', time.localtime()), str(milisec))
return os.path.join(project_path, 'results', dirname) | [
"def",
"generate_output_path",
"(",
"args",
",",
"project_path",
")",
":",
"milisec",
"=",
"datetime",
".",
"now",
"(",
")",
".",
"microsecond",
"dirname",
"=",
"'results_{}_{}'",
".",
"format",
"(",
"time",
".",
"strftime",
"(",
"'%Y.%m.%d_%H.%M.%S'",
",",
"time",
".",
"localtime",
"(",
")",
")",
",",
"str",
"(",
"milisec",
")",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"project_path",
",",
"'results'",
",",
"dirname",
")"
]
| Generate default output directory | [
"Generate",
"default",
"output",
"directory"
]
| 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/utilities/run.py#L45-L50 | train |
TheGhouls/oct | oct/utilities/run.py | run | def run(args):
"""Start an oct project
:param Namespace args: the commande-line arguments
"""
kwargs = vars(args)
if 'func' in kwargs:
del kwargs['func']
project_path = kwargs.pop('project_path')
config = configure(project_path, kwargs.get('config_file'))
output_dir = kwargs.pop('output_dir', None) or generate_output_path(args, project_path)
stats_handler.init_stats(output_dir, config)
topic = args.publisher_channel or uuid.uuid4().hex
print("External publishing topic is %s" % topic)
start_hq(output_dir, config, topic, **kwargs)
if not args.no_results:
process_results(output_dir, config)
copy_config(project_path, output_dir)
print('done.\n') | python | def run(args):
"""Start an oct project
:param Namespace args: the commande-line arguments
"""
kwargs = vars(args)
if 'func' in kwargs:
del kwargs['func']
project_path = kwargs.pop('project_path')
config = configure(project_path, kwargs.get('config_file'))
output_dir = kwargs.pop('output_dir', None) or generate_output_path(args, project_path)
stats_handler.init_stats(output_dir, config)
topic = args.publisher_channel or uuid.uuid4().hex
print("External publishing topic is %s" % topic)
start_hq(output_dir, config, topic, **kwargs)
if not args.no_results:
process_results(output_dir, config)
copy_config(project_path, output_dir)
print('done.\n') | [
"def",
"run",
"(",
"args",
")",
":",
"kwargs",
"=",
"vars",
"(",
"args",
")",
"if",
"'func'",
"in",
"kwargs",
":",
"del",
"kwargs",
"[",
"'func'",
"]",
"project_path",
"=",
"kwargs",
".",
"pop",
"(",
"'project_path'",
")",
"config",
"=",
"configure",
"(",
"project_path",
",",
"kwargs",
".",
"get",
"(",
"'config_file'",
")",
")",
"output_dir",
"=",
"kwargs",
".",
"pop",
"(",
"'output_dir'",
",",
"None",
")",
"or",
"generate_output_path",
"(",
"args",
",",
"project_path",
")",
"stats_handler",
".",
"init_stats",
"(",
"output_dir",
",",
"config",
")",
"topic",
"=",
"args",
".",
"publisher_channel",
"or",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
"print",
"(",
"\"External publishing topic is %s\"",
"%",
"topic",
")",
"start_hq",
"(",
"output_dir",
",",
"config",
",",
"topic",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"args",
".",
"no_results",
":",
"process_results",
"(",
"output_dir",
",",
"config",
")",
"copy_config",
"(",
"project_path",
",",
"output_dir",
")",
"print",
"(",
"'done.\\n'",
")"
]
| Start an oct project
:param Namespace args: the commande-line arguments | [
"Start",
"an",
"oct",
"project"
]
| 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/utilities/run.py#L53-L79 | train |
projectshift/shift-boiler | boiler/user/views_profile.py | guest_access | def guest_access(func):
"""
Guest access decorator
Checks if public profiles option is enabled in config and checks
access to profile pages based on that.
"""
def decorated(*_, **kwargs):
public_profiles = current_app.config['USER_PUBLIC_PROFILES']
if not public_profiles:
if not current_user.is_authenticated:
abort(401)
elif current_user.id != kwargs['id']:
abort(403)
return func(**kwargs)
return decorated | python | def guest_access(func):
"""
Guest access decorator
Checks if public profiles option is enabled in config and checks
access to profile pages based on that.
"""
def decorated(*_, **kwargs):
public_profiles = current_app.config['USER_PUBLIC_PROFILES']
if not public_profiles:
if not current_user.is_authenticated:
abort(401)
elif current_user.id != kwargs['id']:
abort(403)
return func(**kwargs)
return decorated | [
"def",
"guest_access",
"(",
"func",
")",
":",
"def",
"decorated",
"(",
"*",
"_",
",",
"*",
"*",
"kwargs",
")",
":",
"public_profiles",
"=",
"current_app",
".",
"config",
"[",
"'USER_PUBLIC_PROFILES'",
"]",
"if",
"not",
"public_profiles",
":",
"if",
"not",
"current_user",
".",
"is_authenticated",
":",
"abort",
"(",
"401",
")",
"elif",
"current_user",
".",
"id",
"!=",
"kwargs",
"[",
"'id'",
"]",
":",
"abort",
"(",
"403",
")",
"return",
"func",
"(",
"*",
"*",
"kwargs",
")",
"return",
"decorated"
]
| Guest access decorator
Checks if public profiles option is enabled in config and checks
access to profile pages based on that. | [
"Guest",
"access",
"decorator",
"Checks",
"if",
"public",
"profiles",
"option",
"is",
"enabled",
"in",
"config",
"and",
"checks",
"access",
"to",
"profile",
"pages",
"based",
"on",
"that",
"."
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/views_profile.py#L24-L39 | train |
projectshift/shift-boiler | boiler/user/views_profile.py | only_owner | def only_owner(func):
"""
Only owner decorator
Restricts access to view ony to profile owner
"""
def decorated(*_, **kwargs):
id = kwargs['id']
if not current_user.is_authenticated:
abort(401)
elif current_user.id != id:
abort(403)
return func(**kwargs)
return decorated | python | def only_owner(func):
"""
Only owner decorator
Restricts access to view ony to profile owner
"""
def decorated(*_, **kwargs):
id = kwargs['id']
if not current_user.is_authenticated:
abort(401)
elif current_user.id != id:
abort(403)
return func(**kwargs)
return decorated | [
"def",
"only_owner",
"(",
"func",
")",
":",
"def",
"decorated",
"(",
"*",
"_",
",",
"*",
"*",
"kwargs",
")",
":",
"id",
"=",
"kwargs",
"[",
"'id'",
"]",
"if",
"not",
"current_user",
".",
"is_authenticated",
":",
"abort",
"(",
"401",
")",
"elif",
"current_user",
".",
"id",
"!=",
"id",
":",
"abort",
"(",
"403",
")",
"return",
"func",
"(",
"*",
"*",
"kwargs",
")",
"return",
"decorated"
]
| Only owner decorator
Restricts access to view ony to profile owner | [
"Only",
"owner",
"decorator",
"Restricts",
"access",
"to",
"view",
"ony",
"to",
"profile",
"owner"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/views_profile.py#L42-L55 | train |
CodersOfTheNight/oshino | oshino/config.py | load | def load(config_file):
"""
Processes and loads config file.
"""
with open(config_file, "r") as f:
def env_get():
return dict(os.environ)
tmpl = Template(f.read())
return Config(yaml.load(tmpl.render(**env_get()))) | python | def load(config_file):
"""
Processes and loads config file.
"""
with open(config_file, "r") as f:
def env_get():
return dict(os.environ)
tmpl = Template(f.read())
return Config(yaml.load(tmpl.render(**env_get()))) | [
"def",
"load",
"(",
"config_file",
")",
":",
"with",
"open",
"(",
"config_file",
",",
"\"r\"",
")",
"as",
"f",
":",
"def",
"env_get",
"(",
")",
":",
"return",
"dict",
"(",
"os",
".",
"environ",
")",
"tmpl",
"=",
"Template",
"(",
"f",
".",
"read",
"(",
")",
")",
"return",
"Config",
"(",
"yaml",
".",
"load",
"(",
"tmpl",
".",
"render",
"(",
"*",
"*",
"env_get",
"(",
")",
")",
")",
")"
]
| Processes and loads config file. | [
"Processes",
"and",
"loads",
"config",
"file",
"."
]
| 00f7e151e3ce1f3a7f43b353b695c4dba83c7f28 | https://github.com/CodersOfTheNight/oshino/blob/00f7e151e3ce1f3a7f43b353b695c4dba83c7f28/oshino/config.py#L180-L189 | train |
uw-it-aca/uw-restclients-sws | uw_sws/term.py | get_term_by_year_and_quarter | def get_term_by_year_and_quarter(year, quarter):
"""
Returns a uw_sws.models.Term object,
for the passed year and quarter.
"""
url = "{}/{},{}.json".format(
term_res_url_prefix, year, quarter.lower())
return _json_to_term_model(get_resource(url)) | python | def get_term_by_year_and_quarter(year, quarter):
"""
Returns a uw_sws.models.Term object,
for the passed year and quarter.
"""
url = "{}/{},{}.json".format(
term_res_url_prefix, year, quarter.lower())
return _json_to_term_model(get_resource(url)) | [
"def",
"get_term_by_year_and_quarter",
"(",
"year",
",",
"quarter",
")",
":",
"url",
"=",
"\"{}/{},{}.json\"",
".",
"format",
"(",
"term_res_url_prefix",
",",
"year",
",",
"quarter",
".",
"lower",
"(",
")",
")",
"return",
"_json_to_term_model",
"(",
"get_resource",
"(",
"url",
")",
")"
]
| Returns a uw_sws.models.Term object,
for the passed year and quarter. | [
"Returns",
"a",
"uw_sws",
".",
"models",
".",
"Term",
"object",
"for",
"the",
"passed",
"year",
"and",
"quarter",
"."
]
| 4d36776dcca36855fc15c1b8fe7650ae045194cf | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/term.py#L15-L22 | train |
uw-it-aca/uw-restclients-sws | uw_sws/term.py | get_current_term | def get_current_term():
"""
Returns a uw_sws.models.Term object,
for the current term.
"""
url = "{}/current.json".format(term_res_url_prefix)
term = _json_to_term_model(get_resource(url))
# A term doesn't become "current" until 2 days before the start of
# classes. That's too late to be useful, so if we're after the last
# day of grade submission window, use the next term resource.
if datetime.now() > term.grade_submission_deadline:
return get_next_term()
return term | python | def get_current_term():
"""
Returns a uw_sws.models.Term object,
for the current term.
"""
url = "{}/current.json".format(term_res_url_prefix)
term = _json_to_term_model(get_resource(url))
# A term doesn't become "current" until 2 days before the start of
# classes. That's too late to be useful, so if we're after the last
# day of grade submission window, use the next term resource.
if datetime.now() > term.grade_submission_deadline:
return get_next_term()
return term | [
"def",
"get_current_term",
"(",
")",
":",
"url",
"=",
"\"{}/current.json\"",
".",
"format",
"(",
"term_res_url_prefix",
")",
"term",
"=",
"_json_to_term_model",
"(",
"get_resource",
"(",
"url",
")",
")",
"# A term doesn't become \"current\" until 2 days before the start of",
"# classes. That's too late to be useful, so if we're after the last",
"# day of grade submission window, use the next term resource.",
"if",
"datetime",
".",
"now",
"(",
")",
">",
"term",
".",
"grade_submission_deadline",
":",
"return",
"get_next_term",
"(",
")",
"return",
"term"
]
| Returns a uw_sws.models.Term object,
for the current term. | [
"Returns",
"a",
"uw_sws",
".",
"models",
".",
"Term",
"object",
"for",
"the",
"current",
"term",
"."
]
| 4d36776dcca36855fc15c1b8fe7650ae045194cf | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/term.py#L25-L39 | train |
uw-it-aca/uw-restclients-sws | uw_sws/term.py | get_term_before | def get_term_before(aterm):
"""
Returns a uw_sws.models.Term object,
for the term before the term given.
"""
prev_year = aterm.year
prev_quarter = QUARTER_SEQ[QUARTER_SEQ.index(aterm.quarter) - 1]
if prev_quarter == "autumn":
prev_year -= 1
return get_term_by_year_and_quarter(prev_year, prev_quarter) | python | def get_term_before(aterm):
"""
Returns a uw_sws.models.Term object,
for the term before the term given.
"""
prev_year = aterm.year
prev_quarter = QUARTER_SEQ[QUARTER_SEQ.index(aterm.quarter) - 1]
if prev_quarter == "autumn":
prev_year -= 1
return get_term_by_year_and_quarter(prev_year, prev_quarter) | [
"def",
"get_term_before",
"(",
"aterm",
")",
":",
"prev_year",
"=",
"aterm",
".",
"year",
"prev_quarter",
"=",
"QUARTER_SEQ",
"[",
"QUARTER_SEQ",
".",
"index",
"(",
"aterm",
".",
"quarter",
")",
"-",
"1",
"]",
"if",
"prev_quarter",
"==",
"\"autumn\"",
":",
"prev_year",
"-=",
"1",
"return",
"get_term_by_year_and_quarter",
"(",
"prev_year",
",",
"prev_quarter",
")"
]
| Returns a uw_sws.models.Term object,
for the term before the term given. | [
"Returns",
"a",
"uw_sws",
".",
"models",
".",
"Term",
"object",
"for",
"the",
"term",
"before",
"the",
"term",
"given",
"."
]
| 4d36776dcca36855fc15c1b8fe7650ae045194cf | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/term.py#L60-L71 | train |
uw-it-aca/uw-restclients-sws | uw_sws/term.py | get_term_after | def get_term_after(aterm):
"""
Returns a uw_sws.models.Term object,
for the term after the term given.
"""
next_year = aterm.year
if aterm.quarter == "autumn":
next_quarter = QUARTER_SEQ[0]
else:
next_quarter = QUARTER_SEQ[QUARTER_SEQ.index(aterm.quarter) + 1]
if next_quarter == "winter":
next_year += 1
return get_term_by_year_and_quarter(next_year, next_quarter) | python | def get_term_after(aterm):
"""
Returns a uw_sws.models.Term object,
for the term after the term given.
"""
next_year = aterm.year
if aterm.quarter == "autumn":
next_quarter = QUARTER_SEQ[0]
else:
next_quarter = QUARTER_SEQ[QUARTER_SEQ.index(aterm.quarter) + 1]
if next_quarter == "winter":
next_year += 1
return get_term_by_year_and_quarter(next_year, next_quarter) | [
"def",
"get_term_after",
"(",
"aterm",
")",
":",
"next_year",
"=",
"aterm",
".",
"year",
"if",
"aterm",
".",
"quarter",
"==",
"\"autumn\"",
":",
"next_quarter",
"=",
"QUARTER_SEQ",
"[",
"0",
"]",
"else",
":",
"next_quarter",
"=",
"QUARTER_SEQ",
"[",
"QUARTER_SEQ",
".",
"index",
"(",
"aterm",
".",
"quarter",
")",
"+",
"1",
"]",
"if",
"next_quarter",
"==",
"\"winter\"",
":",
"next_year",
"+=",
"1",
"return",
"get_term_by_year_and_quarter",
"(",
"next_year",
",",
"next_quarter",
")"
]
| Returns a uw_sws.models.Term object,
for the term after the term given. | [
"Returns",
"a",
"uw_sws",
".",
"models",
".",
"Term",
"object",
"for",
"the",
"term",
"after",
"the",
"term",
"given",
"."
]
| 4d36776dcca36855fc15c1b8fe7650ae045194cf | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/term.py#L74-L88 | train |
uw-it-aca/uw-restclients-sws | uw_sws/term.py | get_term_by_date | def get_term_by_date(date):
"""
Returns a term for the datetime.date object given.
"""
year = date.year
term = None
for quarter in ('autumn', 'summer', 'spring', 'winter'):
term = get_term_by_year_and_quarter(year, quarter)
if date >= term.first_day_quarter:
break
# If we're in a year, before the start of winter quarter, we need to go
# to the previous year's autumn term:
if date < term.first_day_quarter:
term = get_term_by_year_and_quarter(year - 1, 'autumn')
# Autumn quarter should always last through the end of the year,
# with winter of the next year starting in January. But this makes sure
# we catch it if not.
term_after = get_term_after(term)
if term_after.first_day_quarter > date:
return term
else:
return term_after
pass | python | def get_term_by_date(date):
"""
Returns a term for the datetime.date object given.
"""
year = date.year
term = None
for quarter in ('autumn', 'summer', 'spring', 'winter'):
term = get_term_by_year_and_quarter(year, quarter)
if date >= term.first_day_quarter:
break
# If we're in a year, before the start of winter quarter, we need to go
# to the previous year's autumn term:
if date < term.first_day_quarter:
term = get_term_by_year_and_quarter(year - 1, 'autumn')
# Autumn quarter should always last through the end of the year,
# with winter of the next year starting in January. But this makes sure
# we catch it if not.
term_after = get_term_after(term)
if term_after.first_day_quarter > date:
return term
else:
return term_after
pass | [
"def",
"get_term_by_date",
"(",
"date",
")",
":",
"year",
"=",
"date",
".",
"year",
"term",
"=",
"None",
"for",
"quarter",
"in",
"(",
"'autumn'",
",",
"'summer'",
",",
"'spring'",
",",
"'winter'",
")",
":",
"term",
"=",
"get_term_by_year_and_quarter",
"(",
"year",
",",
"quarter",
")",
"if",
"date",
">=",
"term",
".",
"first_day_quarter",
":",
"break",
"# If we're in a year, before the start of winter quarter, we need to go",
"# to the previous year's autumn term:",
"if",
"date",
"<",
"term",
".",
"first_day_quarter",
":",
"term",
"=",
"get_term_by_year_and_quarter",
"(",
"year",
"-",
"1",
",",
"'autumn'",
")",
"# Autumn quarter should always last through the end of the year,",
"# with winter of the next year starting in January. But this makes sure",
"# we catch it if not.",
"term_after",
"=",
"get_term_after",
"(",
"term",
")",
"if",
"term_after",
".",
"first_day_quarter",
">",
"date",
":",
"return",
"term",
"else",
":",
"return",
"term_after",
"pass"
]
| Returns a term for the datetime.date object given. | [
"Returns",
"a",
"term",
"for",
"the",
"datetime",
".",
"date",
"object",
"given",
"."
]
| 4d36776dcca36855fc15c1b8fe7650ae045194cf | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/term.py#L91-L118 | train |
projectshift/shift-boiler | boiler/feature/logging.py | logging_feature | def logging_feature(app):
"""
Add logging
Accepts flask application and registers logging functionality within it
"""
# this is important because otherwise only log warn, err and crit
app.logger.setLevel(logging.INFO)
# enable loggers
email_exceptions = app.config.get('LOGGING_EMAIL_EXCEPTIONS_TO_ADMINS')
if email_exceptions and not app.debug and not app.testing:
# config.debug=False
mail_handler = mail_logger(app)
app.logger.addHandler(mail_handler)
if not app.testing:
file_handler = file_logger(app)
app.logger.addHandler(file_handler) | python | def logging_feature(app):
"""
Add logging
Accepts flask application and registers logging functionality within it
"""
# this is important because otherwise only log warn, err and crit
app.logger.setLevel(logging.INFO)
# enable loggers
email_exceptions = app.config.get('LOGGING_EMAIL_EXCEPTIONS_TO_ADMINS')
if email_exceptions and not app.debug and not app.testing:
# config.debug=False
mail_handler = mail_logger(app)
app.logger.addHandler(mail_handler)
if not app.testing:
file_handler = file_logger(app)
app.logger.addHandler(file_handler) | [
"def",
"logging_feature",
"(",
"app",
")",
":",
"# this is important because otherwise only log warn, err and crit",
"app",
".",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"# enable loggers",
"email_exceptions",
"=",
"app",
".",
"config",
".",
"get",
"(",
"'LOGGING_EMAIL_EXCEPTIONS_TO_ADMINS'",
")",
"if",
"email_exceptions",
"and",
"not",
"app",
".",
"debug",
"and",
"not",
"app",
".",
"testing",
":",
"# config.debug=False",
"mail_handler",
"=",
"mail_logger",
"(",
"app",
")",
"app",
".",
"logger",
".",
"addHandler",
"(",
"mail_handler",
")",
"if",
"not",
"app",
".",
"testing",
":",
"file_handler",
"=",
"file_logger",
"(",
"app",
")",
"app",
".",
"logger",
".",
"addHandler",
"(",
"file_handler",
")"
]
| Add logging
Accepts flask application and registers logging functionality within it | [
"Add",
"logging",
"Accepts",
"flask",
"application",
"and",
"registers",
"logging",
"functionality",
"within",
"it"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/feature/logging.py#L6-L24 | train |
jreese/aioslack | aioslack/core.py | Slack.rtm | async def rtm(self) -> AsyncIterator[Event]:
"""Connect to the realtime event API and start yielding events."""
response = cast(RTMStart, await self.api("rtm.start"))
self.me = Auto.generate(response.self_, "Me", recursive=False)
self.team = Auto.generate(response.team, "Team", recursive=False)
self.channels.fill(Channel.build(item) for item in response.channels)
self.users.fill(User.build(item) for item in response.users)
self.groups.fill(Group.build(item) for item in response.groups)
log.debug(
f"received {len(self.users)} users, {len(self.channels)} channels "
f"and {len(self.groups)} groups from rtm.start"
)
async with self.session.ws_connect(response["url"]) as ws:
async for msg in ws:
event: Event = Event.generate(msg.json(), recursive=False)
if event.type == "goodbye":
break
yield event | python | async def rtm(self) -> AsyncIterator[Event]:
"""Connect to the realtime event API and start yielding events."""
response = cast(RTMStart, await self.api("rtm.start"))
self.me = Auto.generate(response.self_, "Me", recursive=False)
self.team = Auto.generate(response.team, "Team", recursive=False)
self.channels.fill(Channel.build(item) for item in response.channels)
self.users.fill(User.build(item) for item in response.users)
self.groups.fill(Group.build(item) for item in response.groups)
log.debug(
f"received {len(self.users)} users, {len(self.channels)} channels "
f"and {len(self.groups)} groups from rtm.start"
)
async with self.session.ws_connect(response["url"]) as ws:
async for msg in ws:
event: Event = Event.generate(msg.json(), recursive=False)
if event.type == "goodbye":
break
yield event | [
"async",
"def",
"rtm",
"(",
"self",
")",
"->",
"AsyncIterator",
"[",
"Event",
"]",
":",
"response",
"=",
"cast",
"(",
"RTMStart",
",",
"await",
"self",
".",
"api",
"(",
"\"rtm.start\"",
")",
")",
"self",
".",
"me",
"=",
"Auto",
".",
"generate",
"(",
"response",
".",
"self_",
",",
"\"Me\"",
",",
"recursive",
"=",
"False",
")",
"self",
".",
"team",
"=",
"Auto",
".",
"generate",
"(",
"response",
".",
"team",
",",
"\"Team\"",
",",
"recursive",
"=",
"False",
")",
"self",
".",
"channels",
".",
"fill",
"(",
"Channel",
".",
"build",
"(",
"item",
")",
"for",
"item",
"in",
"response",
".",
"channels",
")",
"self",
".",
"users",
".",
"fill",
"(",
"User",
".",
"build",
"(",
"item",
")",
"for",
"item",
"in",
"response",
".",
"users",
")",
"self",
".",
"groups",
".",
"fill",
"(",
"Group",
".",
"build",
"(",
"item",
")",
"for",
"item",
"in",
"response",
".",
"groups",
")",
"log",
".",
"debug",
"(",
"f\"received {len(self.users)} users, {len(self.channels)} channels \"",
"f\"and {len(self.groups)} groups from rtm.start\"",
")",
"async",
"with",
"self",
".",
"session",
".",
"ws_connect",
"(",
"response",
"[",
"\"url\"",
"]",
")",
"as",
"ws",
":",
"async",
"for",
"msg",
"in",
"ws",
":",
"event",
":",
"Event",
"=",
"Event",
".",
"generate",
"(",
"msg",
".",
"json",
"(",
")",
",",
"recursive",
"=",
"False",
")",
"if",
"event",
".",
"type",
"==",
"\"goodbye\"",
":",
"break",
"yield",
"event"
]
| Connect to the realtime event API and start yielding events. | [
"Connect",
"to",
"the",
"realtime",
"event",
"API",
"and",
"start",
"yielding",
"events",
"."
]
| 5e705f557dde9e81903d84ffb2896ec0a074ad5c | https://github.com/jreese/aioslack/blob/5e705f557dde9e81903d84ffb2896ec0a074ad5c/aioslack/core.py#L79-L101 | train |
eventbrite/rebar | src/rebar/group.py | FormGroup._apply | def _apply(self, method_name, *args, **kwargs):
"""Call ``method_name`` with args and kwargs on each member.
Returns a sequence of return values.
"""
return [
getattr(member, method_name)(*args, **kwargs)
for member in self.forms
] | python | def _apply(self, method_name, *args, **kwargs):
"""Call ``method_name`` with args and kwargs on each member.
Returns a sequence of return values.
"""
return [
getattr(member, method_name)(*args, **kwargs)
for member in self.forms
] | [
"def",
"_apply",
"(",
"self",
",",
"method_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"[",
"getattr",
"(",
"member",
",",
"method_name",
")",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"for",
"member",
"in",
"self",
".",
"forms",
"]"
]
| Call ``method_name`` with args and kwargs on each member.
Returns a sequence of return values. | [
"Call",
"method_name",
"with",
"args",
"and",
"kwargs",
"on",
"each",
"member",
"."
]
| 32f8914a2c5529519009d21c85f0d47cc6601901 | https://github.com/eventbrite/rebar/blob/32f8914a2c5529519009d21c85f0d47cc6601901/src/rebar/group.py#L115-L125 | train |
eventbrite/rebar | src/rebar/group.py | FormGroup.html_id | def html_id(self, field_name, form=None):
"""Return the html ID for the given field_name."""
if form is None:
form = self
return form.auto_id % (form.add_prefix(field_name),) | python | def html_id(self, field_name, form=None):
"""Return the html ID for the given field_name."""
if form is None:
form = self
return form.auto_id % (form.add_prefix(field_name),) | [
"def",
"html_id",
"(",
"self",
",",
"field_name",
",",
"form",
"=",
"None",
")",
":",
"if",
"form",
"is",
"None",
":",
"form",
"=",
"self",
"return",
"form",
".",
"auto_id",
"%",
"(",
"form",
".",
"add_prefix",
"(",
"field_name",
")",
",",
")"
]
| Return the html ID for the given field_name. | [
"Return",
"the",
"html",
"ID",
"for",
"the",
"given",
"field_name",
"."
]
| 32f8914a2c5529519009d21c85f0d47cc6601901 | https://github.com/eventbrite/rebar/blob/32f8914a2c5529519009d21c85f0d47cc6601901/src/rebar/group.py#L136-L142 | train |
eventbrite/rebar | src/rebar/group.py | FormGroup.save | def save(self):
"""Save the changes to the instance and any related objects."""
# first call save with commit=False for all Forms
for form in self._forms:
if isinstance(form, BaseForm):
form.save(commit=False)
# call save on the instance
self.instance.save()
# call any post-commit hooks that have been stashed on Forms
for form in self.forms:
if isinstance(form, BaseForm):
if hasattr(form, 'save_m2m'):
form.save_m2m()
if hasattr(form, 'save_related'):
form.save_related()
# call save on any formsets
for form in self._forms:
if isinstance(form, BaseFormSet):
form.save(commit=True)
return self.instance | python | def save(self):
"""Save the changes to the instance and any related objects."""
# first call save with commit=False for all Forms
for form in self._forms:
if isinstance(form, BaseForm):
form.save(commit=False)
# call save on the instance
self.instance.save()
# call any post-commit hooks that have been stashed on Forms
for form in self.forms:
if isinstance(form, BaseForm):
if hasattr(form, 'save_m2m'):
form.save_m2m()
if hasattr(form, 'save_related'):
form.save_related()
# call save on any formsets
for form in self._forms:
if isinstance(form, BaseFormSet):
form.save(commit=True)
return self.instance | [
"def",
"save",
"(",
"self",
")",
":",
"# first call save with commit=False for all Forms",
"for",
"form",
"in",
"self",
".",
"_forms",
":",
"if",
"isinstance",
"(",
"form",
",",
"BaseForm",
")",
":",
"form",
".",
"save",
"(",
"commit",
"=",
"False",
")",
"# call save on the instance",
"self",
".",
"instance",
".",
"save",
"(",
")",
"# call any post-commit hooks that have been stashed on Forms",
"for",
"form",
"in",
"self",
".",
"forms",
":",
"if",
"isinstance",
"(",
"form",
",",
"BaseForm",
")",
":",
"if",
"hasattr",
"(",
"form",
",",
"'save_m2m'",
")",
":",
"form",
".",
"save_m2m",
"(",
")",
"if",
"hasattr",
"(",
"form",
",",
"'save_related'",
")",
":",
"form",
".",
"save_related",
"(",
")",
"# call save on any formsets",
"for",
"form",
"in",
"self",
".",
"_forms",
":",
"if",
"isinstance",
"(",
"form",
",",
"BaseFormSet",
")",
":",
"form",
".",
"save",
"(",
"commit",
"=",
"True",
")",
"return",
"self",
".",
"instance"
]
| Save the changes to the instance and any related objects. | [
"Save",
"the",
"changes",
"to",
"the",
"instance",
"and",
"any",
"related",
"objects",
"."
]
| 32f8914a2c5529519009d21c85f0d47cc6601901 | https://github.com/eventbrite/rebar/blob/32f8914a2c5529519009d21c85f0d47cc6601901/src/rebar/group.py#L208-L232 | train |
brunato/lograptor | lograptor/report.py | ReportData.make_csv | def make_csv(self):
"""
Get the text representation of a report element as csv.
"""
import csv
try:
from StringIO import StringIO # Python 2.7
except ImportError:
from io import StringIO
out = StringIO()
writer = csv.writer(out, delimiter='|', lineterminator='\n', quoting=csv.QUOTE_MINIMAL)
if self.function == 'total':
writer.writerows(self.results)
elif self.function == 'top':
rows = [['Value', self.headers.strip('"')]]
if self.results[0] is not None:
for res in self.results:
if res is not None:
rows.append(tuple([res[0], ','.join(res[1])]))
writer.writerows(rows)
elif self.function == 'table':
rows = [[header.strip('"') for header in re.split('\s*,\s*', self.headers)]]
for res in sorted(self.results, key=lambda x: x[0]):
row = list(res[:-1])
lastcol = get_fmt_results(res[-1], limit=10)
if lastcol[-1][0] == '[' and lastcol[-1][-1] == ']':
row.append(u'{0} {1}'.format(u', '.join(lastcol[:-1]), lastcol[-1]))
else:
row.append(u', '.join(lastcol))
rows.append(row)
writer.writerows(rows)
self.csv = out.getvalue() | python | def make_csv(self):
"""
Get the text representation of a report element as csv.
"""
import csv
try:
from StringIO import StringIO # Python 2.7
except ImportError:
from io import StringIO
out = StringIO()
writer = csv.writer(out, delimiter='|', lineterminator='\n', quoting=csv.QUOTE_MINIMAL)
if self.function == 'total':
writer.writerows(self.results)
elif self.function == 'top':
rows = [['Value', self.headers.strip('"')]]
if self.results[0] is not None:
for res in self.results:
if res is not None:
rows.append(tuple([res[0], ','.join(res[1])]))
writer.writerows(rows)
elif self.function == 'table':
rows = [[header.strip('"') for header in re.split('\s*,\s*', self.headers)]]
for res in sorted(self.results, key=lambda x: x[0]):
row = list(res[:-1])
lastcol = get_fmt_results(res[-1], limit=10)
if lastcol[-1][0] == '[' and lastcol[-1][-1] == ']':
row.append(u'{0} {1}'.format(u', '.join(lastcol[:-1]), lastcol[-1]))
else:
row.append(u', '.join(lastcol))
rows.append(row)
writer.writerows(rows)
self.csv = out.getvalue() | [
"def",
"make_csv",
"(",
"self",
")",
":",
"import",
"csv",
"try",
":",
"from",
"StringIO",
"import",
"StringIO",
"# Python 2.7",
"except",
"ImportError",
":",
"from",
"io",
"import",
"StringIO",
"out",
"=",
"StringIO",
"(",
")",
"writer",
"=",
"csv",
".",
"writer",
"(",
"out",
",",
"delimiter",
"=",
"'|'",
",",
"lineterminator",
"=",
"'\\n'",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_MINIMAL",
")",
"if",
"self",
".",
"function",
"==",
"'total'",
":",
"writer",
".",
"writerows",
"(",
"self",
".",
"results",
")",
"elif",
"self",
".",
"function",
"==",
"'top'",
":",
"rows",
"=",
"[",
"[",
"'Value'",
",",
"self",
".",
"headers",
".",
"strip",
"(",
"'\"'",
")",
"]",
"]",
"if",
"self",
".",
"results",
"[",
"0",
"]",
"is",
"not",
"None",
":",
"for",
"res",
"in",
"self",
".",
"results",
":",
"if",
"res",
"is",
"not",
"None",
":",
"rows",
".",
"append",
"(",
"tuple",
"(",
"[",
"res",
"[",
"0",
"]",
",",
"','",
".",
"join",
"(",
"res",
"[",
"1",
"]",
")",
"]",
")",
")",
"writer",
".",
"writerows",
"(",
"rows",
")",
"elif",
"self",
".",
"function",
"==",
"'table'",
":",
"rows",
"=",
"[",
"[",
"header",
".",
"strip",
"(",
"'\"'",
")",
"for",
"header",
"in",
"re",
".",
"split",
"(",
"'\\s*,\\s*'",
",",
"self",
".",
"headers",
")",
"]",
"]",
"for",
"res",
"in",
"sorted",
"(",
"self",
".",
"results",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
":",
"row",
"=",
"list",
"(",
"res",
"[",
":",
"-",
"1",
"]",
")",
"lastcol",
"=",
"get_fmt_results",
"(",
"res",
"[",
"-",
"1",
"]",
",",
"limit",
"=",
"10",
")",
"if",
"lastcol",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"==",
"'['",
"and",
"lastcol",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
"==",
"']'",
":",
"row",
".",
"append",
"(",
"u'{0} {1}'",
".",
"format",
"(",
"u', '",
".",
"join",
"(",
"lastcol",
"[",
":",
"-",
"1",
"]",
")",
",",
"lastcol",
"[",
"-",
"1",
"]",
")",
")",
"else",
":",
"row",
".",
"append",
"(",
"u', '",
".",
"join",
"(",
"lastcol",
")",
")",
"rows",
".",
"append",
"(",
"row",
")",
"writer",
".",
"writerows",
"(",
"rows",
")",
"self",
".",
"csv",
"=",
"out",
".",
"getvalue",
"(",
")"
]
| Get the text representation of a report element as csv. | [
"Get",
"the",
"text",
"representation",
"of",
"a",
"report",
"element",
"as",
"csv",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/report.py#L360-L397 | train |
brunato/lograptor | lograptor/report.py | Subreport.make | def make(self, apps):
"""
Make subreport items from results.
"""
for (appname, app) in sorted(apps.items(), key=lambda x: (x[1].priority, x[0])):
logger.info('Getting report results from %r', appname)
for report_data in app.report_data:
if report_data.subreport != self.name:
continue
if report_data.function == 'total':
for opt in report_data:
match = report_data.parse_report_data(opt)
cond = match.group('condition')
valfld = match.group('valfld')
unit = match.group('unit')
itemtitle = match.group('fields').strip('"')
total = report_data.rules[opt].total_events(cond, valfld)
if total == 0:
continue
if unit is not None:
total, unit = get_value_unit(total, unit, 'T')
total = '{0} {1}'.format(total, unit)
else:
total = str(total)
report_data.results.append(tuple([total, itemtitle]))
elif report_data.function == 'top':
k = int(report_data.topnum)
for opt in report_data:
match = report_data.parse_report_data(opt)
valfld = match.group('valfld')
field = match.group('fields')
usemax = match.group('add2res') is None
toplist = report_data.rules[opt].top_events(k, valfld, usemax, field)
report_data.results.extend(toplist)
elif report_data.function == 'table':
cols = len(re.split('\s*,\s*', report_data.headers))
for opt in report_data:
match = report_data.parse_report_data(opt)
cond = match.group('condition')
fields = re.split('\s*,\s*', match.group('fields'))
tablelist = report_data.rules[opt].list_events(cond, cols, fields)
report_data.results.extend(tablelist)
if report_data.results:
self.report_data.append(report_data)
# Sort and rewrite results as strings with units
for report_data in self.report_data:
if report_data.function == 'top':
# Sort values
report_data.results = sorted(report_data.results, key=lambda x: x[0], reverse=True)
# Get the unit if any and convert numeric results to strings
unit = None
for opt in report_data:
match = report_data.parse_report_data(opt)
unit = match.group('unit')
if unit is not None:
break
for res in report_data.results:
if unit is not None:
v, u = get_value_unit(res[0], unit, 'T')
res[0] = '{0} {1}'.format(v, u)
else:
res[0] = str(res[0]) | python | def make(self, apps):
"""
Make subreport items from results.
"""
for (appname, app) in sorted(apps.items(), key=lambda x: (x[1].priority, x[0])):
logger.info('Getting report results from %r', appname)
for report_data in app.report_data:
if report_data.subreport != self.name:
continue
if report_data.function == 'total':
for opt in report_data:
match = report_data.parse_report_data(opt)
cond = match.group('condition')
valfld = match.group('valfld')
unit = match.group('unit')
itemtitle = match.group('fields').strip('"')
total = report_data.rules[opt].total_events(cond, valfld)
if total == 0:
continue
if unit is not None:
total, unit = get_value_unit(total, unit, 'T')
total = '{0} {1}'.format(total, unit)
else:
total = str(total)
report_data.results.append(tuple([total, itemtitle]))
elif report_data.function == 'top':
k = int(report_data.topnum)
for opt in report_data:
match = report_data.parse_report_data(opt)
valfld = match.group('valfld')
field = match.group('fields')
usemax = match.group('add2res') is None
toplist = report_data.rules[opt].top_events(k, valfld, usemax, field)
report_data.results.extend(toplist)
elif report_data.function == 'table':
cols = len(re.split('\s*,\s*', report_data.headers))
for opt in report_data:
match = report_data.parse_report_data(opt)
cond = match.group('condition')
fields = re.split('\s*,\s*', match.group('fields'))
tablelist = report_data.rules[opt].list_events(cond, cols, fields)
report_data.results.extend(tablelist)
if report_data.results:
self.report_data.append(report_data)
# Sort and rewrite results as strings with units
for report_data in self.report_data:
if report_data.function == 'top':
# Sort values
report_data.results = sorted(report_data.results, key=lambda x: x[0], reverse=True)
# Get the unit if any and convert numeric results to strings
unit = None
for opt in report_data:
match = report_data.parse_report_data(opt)
unit = match.group('unit')
if unit is not None:
break
for res in report_data.results:
if unit is not None:
v, u = get_value_unit(res[0], unit, 'T')
res[0] = '{0} {1}'.format(v, u)
else:
res[0] = str(res[0]) | [
"def",
"make",
"(",
"self",
",",
"apps",
")",
":",
"for",
"(",
"appname",
",",
"app",
")",
"in",
"sorted",
"(",
"apps",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"(",
"x",
"[",
"1",
"]",
".",
"priority",
",",
"x",
"[",
"0",
"]",
")",
")",
":",
"logger",
".",
"info",
"(",
"'Getting report results from %r'",
",",
"appname",
")",
"for",
"report_data",
"in",
"app",
".",
"report_data",
":",
"if",
"report_data",
".",
"subreport",
"!=",
"self",
".",
"name",
":",
"continue",
"if",
"report_data",
".",
"function",
"==",
"'total'",
":",
"for",
"opt",
"in",
"report_data",
":",
"match",
"=",
"report_data",
".",
"parse_report_data",
"(",
"opt",
")",
"cond",
"=",
"match",
".",
"group",
"(",
"'condition'",
")",
"valfld",
"=",
"match",
".",
"group",
"(",
"'valfld'",
")",
"unit",
"=",
"match",
".",
"group",
"(",
"'unit'",
")",
"itemtitle",
"=",
"match",
".",
"group",
"(",
"'fields'",
")",
".",
"strip",
"(",
"'\"'",
")",
"total",
"=",
"report_data",
".",
"rules",
"[",
"opt",
"]",
".",
"total_events",
"(",
"cond",
",",
"valfld",
")",
"if",
"total",
"==",
"0",
":",
"continue",
"if",
"unit",
"is",
"not",
"None",
":",
"total",
",",
"unit",
"=",
"get_value_unit",
"(",
"total",
",",
"unit",
",",
"'T'",
")",
"total",
"=",
"'{0} {1}'",
".",
"format",
"(",
"total",
",",
"unit",
")",
"else",
":",
"total",
"=",
"str",
"(",
"total",
")",
"report_data",
".",
"results",
".",
"append",
"(",
"tuple",
"(",
"[",
"total",
",",
"itemtitle",
"]",
")",
")",
"elif",
"report_data",
".",
"function",
"==",
"'top'",
":",
"k",
"=",
"int",
"(",
"report_data",
".",
"topnum",
")",
"for",
"opt",
"in",
"report_data",
":",
"match",
"=",
"report_data",
".",
"parse_report_data",
"(",
"opt",
")",
"valfld",
"=",
"match",
".",
"group",
"(",
"'valfld'",
")",
"field",
"=",
"match",
".",
"group",
"(",
"'fields'",
")",
"usemax",
"=",
"match",
".",
"group",
"(",
"'add2res'",
")",
"is",
"None",
"toplist",
"=",
"report_data",
".",
"rules",
"[",
"opt",
"]",
".",
"top_events",
"(",
"k",
",",
"valfld",
",",
"usemax",
",",
"field",
")",
"report_data",
".",
"results",
".",
"extend",
"(",
"toplist",
")",
"elif",
"report_data",
".",
"function",
"==",
"'table'",
":",
"cols",
"=",
"len",
"(",
"re",
".",
"split",
"(",
"'\\s*,\\s*'",
",",
"report_data",
".",
"headers",
")",
")",
"for",
"opt",
"in",
"report_data",
":",
"match",
"=",
"report_data",
".",
"parse_report_data",
"(",
"opt",
")",
"cond",
"=",
"match",
".",
"group",
"(",
"'condition'",
")",
"fields",
"=",
"re",
".",
"split",
"(",
"'\\s*,\\s*'",
",",
"match",
".",
"group",
"(",
"'fields'",
")",
")",
"tablelist",
"=",
"report_data",
".",
"rules",
"[",
"opt",
"]",
".",
"list_events",
"(",
"cond",
",",
"cols",
",",
"fields",
")",
"report_data",
".",
"results",
".",
"extend",
"(",
"tablelist",
")",
"if",
"report_data",
".",
"results",
":",
"self",
".",
"report_data",
".",
"append",
"(",
"report_data",
")",
"# Sort and rewrite results as strings with units ",
"for",
"report_data",
"in",
"self",
".",
"report_data",
":",
"if",
"report_data",
".",
"function",
"==",
"'top'",
":",
"# Sort values",
"report_data",
".",
"results",
"=",
"sorted",
"(",
"report_data",
".",
"results",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
",",
"reverse",
"=",
"True",
")",
"# Get the unit if any and convert numeric results to strings",
"unit",
"=",
"None",
"for",
"opt",
"in",
"report_data",
":",
"match",
"=",
"report_data",
".",
"parse_report_data",
"(",
"opt",
")",
"unit",
"=",
"match",
".",
"group",
"(",
"'unit'",
")",
"if",
"unit",
"is",
"not",
"None",
":",
"break",
"for",
"res",
"in",
"report_data",
".",
"results",
":",
"if",
"unit",
"is",
"not",
"None",
":",
"v",
",",
"u",
"=",
"get_value_unit",
"(",
"res",
"[",
"0",
"]",
",",
"unit",
",",
"'T'",
")",
"res",
"[",
"0",
"]",
"=",
"'{0} {1}'",
".",
"format",
"(",
"v",
",",
"u",
")",
"else",
":",
"res",
"[",
"0",
"]",
"=",
"str",
"(",
"res",
"[",
"0",
"]",
")"
]
| Make subreport items from results. | [
"Make",
"subreport",
"items",
"from",
"results",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/report.py#L420-L493 | train |
brunato/lograptor | lograptor/report.py | Subreport.make_format | def make_format(self, fmt, width):
"""
Make subreport text in a specified format
"""
if not self.report_data:
return
for data_item in self.report_data:
if data_item.results:
if fmt is None or fmt == 'text':
data_item.make_text(width)
elif fmt == 'html':
data_item.make_html()
elif fmt == 'csv':
data_item.make_csv() | python | def make_format(self, fmt, width):
"""
Make subreport text in a specified format
"""
if not self.report_data:
return
for data_item in self.report_data:
if data_item.results:
if fmt is None or fmt == 'text':
data_item.make_text(width)
elif fmt == 'html':
data_item.make_html()
elif fmt == 'csv':
data_item.make_csv() | [
"def",
"make_format",
"(",
"self",
",",
"fmt",
",",
"width",
")",
":",
"if",
"not",
"self",
".",
"report_data",
":",
"return",
"for",
"data_item",
"in",
"self",
".",
"report_data",
":",
"if",
"data_item",
".",
"results",
":",
"if",
"fmt",
"is",
"None",
"or",
"fmt",
"==",
"'text'",
":",
"data_item",
".",
"make_text",
"(",
"width",
")",
"elif",
"fmt",
"==",
"'html'",
":",
"data_item",
".",
"make_html",
"(",
")",
"elif",
"fmt",
"==",
"'csv'",
":",
"data_item",
".",
"make_csv",
"(",
")"
]
| Make subreport text in a specified format | [
"Make",
"subreport",
"text",
"in",
"a",
"specified",
"format"
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/report.py#L495-L509 | train |
brunato/lograptor | lograptor/report.py | Subreport.compact_tables | def compact_tables(self):
"""
Compact report items of type "table" with same results type. Report items of type "tables" in the
same subreport is merged into one. The data are ordered by 1st column.
"""
items_to_del = set()
for i in range(len(self.report_data)):
if i in items_to_del:
continue
if self.report_data[i].function[0:5] == 'table':
for j in range(i+1, len(self.report_data)):
if self.report_data[j].function[0:5] == 'table':
if self.report_data[i] == self.report_data[j]:
logger.debug('Merge of 2 identical report tables: {0}'
.format(self.report_data[i].title))
items_to_del.add(j)
self.report_data[i].results.extend(self.report_data[j].results)
if items_to_del:
for i in reversed(sorted(items_to_del, key=lambda x: x)):
self.report_data.pop(i) | python | def compact_tables(self):
"""
Compact report items of type "table" with same results type. Report items of type "tables" in the
same subreport is merged into one. The data are ordered by 1st column.
"""
items_to_del = set()
for i in range(len(self.report_data)):
if i in items_to_del:
continue
if self.report_data[i].function[0:5] == 'table':
for j in range(i+1, len(self.report_data)):
if self.report_data[j].function[0:5] == 'table':
if self.report_data[i] == self.report_data[j]:
logger.debug('Merge of 2 identical report tables: {0}'
.format(self.report_data[i].title))
items_to_del.add(j)
self.report_data[i].results.extend(self.report_data[j].results)
if items_to_del:
for i in reversed(sorted(items_to_del, key=lambda x: x)):
self.report_data.pop(i) | [
"def",
"compact_tables",
"(",
"self",
")",
":",
"items_to_del",
"=",
"set",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"report_data",
")",
")",
":",
"if",
"i",
"in",
"items_to_del",
":",
"continue",
"if",
"self",
".",
"report_data",
"[",
"i",
"]",
".",
"function",
"[",
"0",
":",
"5",
"]",
"==",
"'table'",
":",
"for",
"j",
"in",
"range",
"(",
"i",
"+",
"1",
",",
"len",
"(",
"self",
".",
"report_data",
")",
")",
":",
"if",
"self",
".",
"report_data",
"[",
"j",
"]",
".",
"function",
"[",
"0",
":",
"5",
"]",
"==",
"'table'",
":",
"if",
"self",
".",
"report_data",
"[",
"i",
"]",
"==",
"self",
".",
"report_data",
"[",
"j",
"]",
":",
"logger",
".",
"debug",
"(",
"'Merge of 2 identical report tables: {0}'",
".",
"format",
"(",
"self",
".",
"report_data",
"[",
"i",
"]",
".",
"title",
")",
")",
"items_to_del",
".",
"add",
"(",
"j",
")",
"self",
".",
"report_data",
"[",
"i",
"]",
".",
"results",
".",
"extend",
"(",
"self",
".",
"report_data",
"[",
"j",
"]",
".",
"results",
")",
"if",
"items_to_del",
":",
"for",
"i",
"in",
"reversed",
"(",
"sorted",
"(",
"items_to_del",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
")",
")",
":",
"self",
".",
"report_data",
".",
"pop",
"(",
"i",
")"
]
| Compact report items of type "table" with same results type. Report items of type "tables" in the
same subreport is merged into one. The data are ordered by 1st column. | [
"Compact",
"report",
"items",
"of",
"type",
"table",
"with",
"same",
"results",
"type",
".",
"Report",
"items",
"of",
"type",
"tables",
"in",
"the",
"same",
"subreport",
"is",
"merged",
"into",
"one",
".",
"The",
"data",
"are",
"ordered",
"by",
"1st",
"column",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/report.py#L511-L530 | train |
brunato/lograptor | lograptor/report.py | Report.make | def make(self, apps):
"""
Create the report from application results
"""
for subreport in self.subreports:
logger.debug('Make subreport "{0}"'.format(subreport.name))
subreport.make(apps)
for subreport in self.subreports:
subreport.compact_tables() | python | def make(self, apps):
"""
Create the report from application results
"""
for subreport in self.subreports:
logger.debug('Make subreport "{0}"'.format(subreport.name))
subreport.make(apps)
for subreport in self.subreports:
subreport.compact_tables() | [
"def",
"make",
"(",
"self",
",",
"apps",
")",
":",
"for",
"subreport",
"in",
"self",
".",
"subreports",
":",
"logger",
".",
"debug",
"(",
"'Make subreport \"{0}\"'",
".",
"format",
"(",
"subreport",
".",
"name",
")",
")",
"subreport",
".",
"make",
"(",
"apps",
")",
"for",
"subreport",
"in",
"self",
".",
"subreports",
":",
"subreport",
".",
"compact_tables",
"(",
")"
]
| Create the report from application results | [
"Create",
"the",
"report",
"from",
"application",
"results"
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/report.py#L566-L575 | train |
brunato/lograptor | lograptor/report.py | Report.get_report_parts | def get_report_parts(self, apps, formats):
"""
Make report item texts in a specified format.
"""
for fmt in formats:
width = 100 if fmt is not None else tui.get_terminal_size()[0]
for sr in self.subreports:
sr.make_format(fmt, width)
logger.debug('Build a map for arguments and run\'s statistics ...')
value_mapping = {
'title': self.title,
'patterns': ', '.join([repr(pattern) for pattern in self.args.patterns]) or None,
'pattern_files': ', '.join(self.args.pattern_files) or None,
'hosts': ', '.join(self.args.hosts) or None,
'apps': u', '.join([
u'%s(%d)' % (app.name, app.matches) for app in apps.values() if app.matches > 0
]),
'version': __version__
}
filters = []
for flt in self.args.filters:
filters.append(' AND '.join(['%s=%r' % (k, v.pattern) for k, v in flt.items()]))
if filters:
value_mapping['filters'] = ' OR '.join(['(%s)' % item for item in filters])
else:
value_mapping['filters'] = filters[0] if filters else None
value_mapping.update(self.stats)
report = []
for fmt in formats:
if fmt == 'text':
logger.info('appends a text page report')
report.append(self.make_text_page(value_mapping))
elif fmt == 'html':
logger.info('appends a html page report')
report.append(self.make_html_page(value_mapping))
elif fmt == 'csv':
logger.info('extends with a list of csv subreports')
report.extend(self.make_csv_tables())
return report | python | def get_report_parts(self, apps, formats):
"""
Make report item texts in a specified format.
"""
for fmt in formats:
width = 100 if fmt is not None else tui.get_terminal_size()[0]
for sr in self.subreports:
sr.make_format(fmt, width)
logger.debug('Build a map for arguments and run\'s statistics ...')
value_mapping = {
'title': self.title,
'patterns': ', '.join([repr(pattern) for pattern in self.args.patterns]) or None,
'pattern_files': ', '.join(self.args.pattern_files) or None,
'hosts': ', '.join(self.args.hosts) or None,
'apps': u', '.join([
u'%s(%d)' % (app.name, app.matches) for app in apps.values() if app.matches > 0
]),
'version': __version__
}
filters = []
for flt in self.args.filters:
filters.append(' AND '.join(['%s=%r' % (k, v.pattern) for k, v in flt.items()]))
if filters:
value_mapping['filters'] = ' OR '.join(['(%s)' % item for item in filters])
else:
value_mapping['filters'] = filters[0] if filters else None
value_mapping.update(self.stats)
report = []
for fmt in formats:
if fmt == 'text':
logger.info('appends a text page report')
report.append(self.make_text_page(value_mapping))
elif fmt == 'html':
logger.info('appends a html page report')
report.append(self.make_html_page(value_mapping))
elif fmt == 'csv':
logger.info('extends with a list of csv subreports')
report.extend(self.make_csv_tables())
return report | [
"def",
"get_report_parts",
"(",
"self",
",",
"apps",
",",
"formats",
")",
":",
"for",
"fmt",
"in",
"formats",
":",
"width",
"=",
"100",
"if",
"fmt",
"is",
"not",
"None",
"else",
"tui",
".",
"get_terminal_size",
"(",
")",
"[",
"0",
"]",
"for",
"sr",
"in",
"self",
".",
"subreports",
":",
"sr",
".",
"make_format",
"(",
"fmt",
",",
"width",
")",
"logger",
".",
"debug",
"(",
"'Build a map for arguments and run\\'s statistics ...'",
")",
"value_mapping",
"=",
"{",
"'title'",
":",
"self",
".",
"title",
",",
"'patterns'",
":",
"', '",
".",
"join",
"(",
"[",
"repr",
"(",
"pattern",
")",
"for",
"pattern",
"in",
"self",
".",
"args",
".",
"patterns",
"]",
")",
"or",
"None",
",",
"'pattern_files'",
":",
"', '",
".",
"join",
"(",
"self",
".",
"args",
".",
"pattern_files",
")",
"or",
"None",
",",
"'hosts'",
":",
"', '",
".",
"join",
"(",
"self",
".",
"args",
".",
"hosts",
")",
"or",
"None",
",",
"'apps'",
":",
"u', '",
".",
"join",
"(",
"[",
"u'%s(%d)'",
"%",
"(",
"app",
".",
"name",
",",
"app",
".",
"matches",
")",
"for",
"app",
"in",
"apps",
".",
"values",
"(",
")",
"if",
"app",
".",
"matches",
">",
"0",
"]",
")",
",",
"'version'",
":",
"__version__",
"}",
"filters",
"=",
"[",
"]",
"for",
"flt",
"in",
"self",
".",
"args",
".",
"filters",
":",
"filters",
".",
"append",
"(",
"' AND '",
".",
"join",
"(",
"[",
"'%s=%r'",
"%",
"(",
"k",
",",
"v",
".",
"pattern",
")",
"for",
"k",
",",
"v",
"in",
"flt",
".",
"items",
"(",
")",
"]",
")",
")",
"if",
"filters",
":",
"value_mapping",
"[",
"'filters'",
"]",
"=",
"' OR '",
".",
"join",
"(",
"[",
"'(%s)'",
"%",
"item",
"for",
"item",
"in",
"filters",
"]",
")",
"else",
":",
"value_mapping",
"[",
"'filters'",
"]",
"=",
"filters",
"[",
"0",
"]",
"if",
"filters",
"else",
"None",
"value_mapping",
".",
"update",
"(",
"self",
".",
"stats",
")",
"report",
"=",
"[",
"]",
"for",
"fmt",
"in",
"formats",
":",
"if",
"fmt",
"==",
"'text'",
":",
"logger",
".",
"info",
"(",
"'appends a text page report'",
")",
"report",
".",
"append",
"(",
"self",
".",
"make_text_page",
"(",
"value_mapping",
")",
")",
"elif",
"fmt",
"==",
"'html'",
":",
"logger",
".",
"info",
"(",
"'appends a html page report'",
")",
"report",
".",
"append",
"(",
"self",
".",
"make_html_page",
"(",
"value_mapping",
")",
")",
"elif",
"fmt",
"==",
"'csv'",
":",
"logger",
".",
"info",
"(",
"'extends with a list of csv subreports'",
")",
"report",
".",
"extend",
"(",
"self",
".",
"make_csv_tables",
"(",
")",
")",
"return",
"report"
]
| Make report item texts in a specified format. | [
"Make",
"report",
"item",
"texts",
"in",
"a",
"specified",
"format",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/report.py#L580-L622 | train |
brunato/lograptor | lograptor/report.py | Report.set_stats | def set_stats(self, run_stats):
"""
Set run statistics for the report.
"""
self.stats = run_stats.copy()
self.stats['files'] = ', '.join(self.stats['files'])
self.stats['tot_files'] = len(run_stats['files'])
self.stats['extra_tags'] = ', '.join(self.stats['extra_tags']) | python | def set_stats(self, run_stats):
"""
Set run statistics for the report.
"""
self.stats = run_stats.copy()
self.stats['files'] = ', '.join(self.stats['files'])
self.stats['tot_files'] = len(run_stats['files'])
self.stats['extra_tags'] = ', '.join(self.stats['extra_tags']) | [
"def",
"set_stats",
"(",
"self",
",",
"run_stats",
")",
":",
"self",
".",
"stats",
"=",
"run_stats",
".",
"copy",
"(",
")",
"self",
".",
"stats",
"[",
"'files'",
"]",
"=",
"', '",
".",
"join",
"(",
"self",
".",
"stats",
"[",
"'files'",
"]",
")",
"self",
".",
"stats",
"[",
"'tot_files'",
"]",
"=",
"len",
"(",
"run_stats",
"[",
"'files'",
"]",
")",
"self",
".",
"stats",
"[",
"'extra_tags'",
"]",
"=",
"', '",
".",
"join",
"(",
"self",
".",
"stats",
"[",
"'extra_tags'",
"]",
")"
]
| Set run statistics for the report. | [
"Set",
"run",
"statistics",
"for",
"the",
"report",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/report.py#L630-L637 | train |
brunato/lograptor | lograptor/report.py | Report.make_html_page | def make_html_page(self, valumap):
"""
Builds the report as html page, using the template page from file.
"""
logger.info('Making an html report using template %r.', self.html_template)
fh = open(self.html_template)
template = fh.read()
fh.close()
parts = []
for sr in self.subreports:
report_data = [item.html for item in sr.report_data if item.html]
if report_data:
parts.append('\n<h2>{1}</h2>\n'.format(sr.title, sr.reptext))
parts.extend(report_data)
parts.append('\n<hr/>')
valumap['subreports'] = '\n'.join(parts) # or "\n<<NO SUBREPORT RELATED EVENTS>>\n"
html_page = Template(template).safe_substitute(valumap)
return TextPart(fmt='html', text=html_page, ext='html') | python | def make_html_page(self, valumap):
"""
Builds the report as html page, using the template page from file.
"""
logger.info('Making an html report using template %r.', self.html_template)
fh = open(self.html_template)
template = fh.read()
fh.close()
parts = []
for sr in self.subreports:
report_data = [item.html for item in sr.report_data if item.html]
if report_data:
parts.append('\n<h2>{1}</h2>\n'.format(sr.title, sr.reptext))
parts.extend(report_data)
parts.append('\n<hr/>')
valumap['subreports'] = '\n'.join(parts) # or "\n<<NO SUBREPORT RELATED EVENTS>>\n"
html_page = Template(template).safe_substitute(valumap)
return TextPart(fmt='html', text=html_page, ext='html') | [
"def",
"make_html_page",
"(",
"self",
",",
"valumap",
")",
":",
"logger",
".",
"info",
"(",
"'Making an html report using template %r.'",
",",
"self",
".",
"html_template",
")",
"fh",
"=",
"open",
"(",
"self",
".",
"html_template",
")",
"template",
"=",
"fh",
".",
"read",
"(",
")",
"fh",
".",
"close",
"(",
")",
"parts",
"=",
"[",
"]",
"for",
"sr",
"in",
"self",
".",
"subreports",
":",
"report_data",
"=",
"[",
"item",
".",
"html",
"for",
"item",
"in",
"sr",
".",
"report_data",
"if",
"item",
".",
"html",
"]",
"if",
"report_data",
":",
"parts",
".",
"append",
"(",
"'\\n<h2>{1}</h2>\\n'",
".",
"format",
"(",
"sr",
".",
"title",
",",
"sr",
".",
"reptext",
")",
")",
"parts",
".",
"extend",
"(",
"report_data",
")",
"parts",
".",
"append",
"(",
"'\\n<hr/>'",
")",
"valumap",
"[",
"'subreports'",
"]",
"=",
"'\\n'",
".",
"join",
"(",
"parts",
")",
"# or \"\\n<<NO SUBREPORT RELATED EVENTS>>\\n\"",
"html_page",
"=",
"Template",
"(",
"template",
")",
".",
"safe_substitute",
"(",
"valumap",
")",
"return",
"TextPart",
"(",
"fmt",
"=",
"'html'",
",",
"text",
"=",
"html_page",
",",
"ext",
"=",
"'html'",
")"
]
| Builds the report as html page, using the template page from file. | [
"Builds",
"the",
"report",
"as",
"html",
"page",
"using",
"the",
"template",
"page",
"from",
"file",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/report.py#L639-L658 | train |
brunato/lograptor | lograptor/report.py | Report.make_text_page | def make_text_page(self, valumap):
"""
Builds the report as text page, using the template page from file.
"""
logger.info('Making a text report page using template %r.', self.text_template)
fh = open(self.text_template)
template = fh.read()
fh.close()
parts = []
for sr in self.subreports:
report_data = [item.text for item in sr.report_data if item.text]
if report_data:
parts.append('\n{1}\n***** {0} *****\n{1}'.format(sr.title, '*' * (len(sr.title)+12)))
parts.extend(report_data)
valumap['subreports'] = '\n'.join(parts) # "\n<<NO SUBREPORT RELATED EVENTS>>\n"
text_page = Template(template).safe_substitute(valumap)
return TextPart(fmt='text', text=text_page, ext='txt') | python | def make_text_page(self, valumap):
"""
Builds the report as text page, using the template page from file.
"""
logger.info('Making a text report page using template %r.', self.text_template)
fh = open(self.text_template)
template = fh.read()
fh.close()
parts = []
for sr in self.subreports:
report_data = [item.text for item in sr.report_data if item.text]
if report_data:
parts.append('\n{1}\n***** {0} *****\n{1}'.format(sr.title, '*' * (len(sr.title)+12)))
parts.extend(report_data)
valumap['subreports'] = '\n'.join(parts) # "\n<<NO SUBREPORT RELATED EVENTS>>\n"
text_page = Template(template).safe_substitute(valumap)
return TextPart(fmt='text', text=text_page, ext='txt') | [
"def",
"make_text_page",
"(",
"self",
",",
"valumap",
")",
":",
"logger",
".",
"info",
"(",
"'Making a text report page using template %r.'",
",",
"self",
".",
"text_template",
")",
"fh",
"=",
"open",
"(",
"self",
".",
"text_template",
")",
"template",
"=",
"fh",
".",
"read",
"(",
")",
"fh",
".",
"close",
"(",
")",
"parts",
"=",
"[",
"]",
"for",
"sr",
"in",
"self",
".",
"subreports",
":",
"report_data",
"=",
"[",
"item",
".",
"text",
"for",
"item",
"in",
"sr",
".",
"report_data",
"if",
"item",
".",
"text",
"]",
"if",
"report_data",
":",
"parts",
".",
"append",
"(",
"'\\n{1}\\n***** {0} *****\\n{1}'",
".",
"format",
"(",
"sr",
".",
"title",
",",
"'*'",
"*",
"(",
"len",
"(",
"sr",
".",
"title",
")",
"+",
"12",
")",
")",
")",
"parts",
".",
"extend",
"(",
"report_data",
")",
"valumap",
"[",
"'subreports'",
"]",
"=",
"'\\n'",
".",
"join",
"(",
"parts",
")",
"# \"\\n<<NO SUBREPORT RELATED EVENTS>>\\n\"",
"text_page",
"=",
"Template",
"(",
"template",
")",
".",
"safe_substitute",
"(",
"valumap",
")",
"return",
"TextPart",
"(",
"fmt",
"=",
"'text'",
",",
"text",
"=",
"text_page",
",",
"ext",
"=",
"'txt'",
")"
]
| Builds the report as text page, using the template page from file. | [
"Builds",
"the",
"report",
"as",
"text",
"page",
"using",
"the",
"template",
"page",
"from",
"file",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/report.py#L660-L678 | train |
brunato/lograptor | lograptor/report.py | Report.make_csv_tables | def make_csv_tables(self):
"""
Builds the report as a list of csv tables with titles.
"""
logger.info('Generate csv report tables')
report_parts = []
for sr in self.subreports:
for data_item in sr.report_data:
report_parts.append(TextPart(fmt='csv', text=data_item.csv, ext='csv'))
return report_parts | python | def make_csv_tables(self):
"""
Builds the report as a list of csv tables with titles.
"""
logger.info('Generate csv report tables')
report_parts = []
for sr in self.subreports:
for data_item in sr.report_data:
report_parts.append(TextPart(fmt='csv', text=data_item.csv, ext='csv'))
return report_parts | [
"def",
"make_csv_tables",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"'Generate csv report tables'",
")",
"report_parts",
"=",
"[",
"]",
"for",
"sr",
"in",
"self",
".",
"subreports",
":",
"for",
"data_item",
"in",
"sr",
".",
"report_data",
":",
"report_parts",
".",
"append",
"(",
"TextPart",
"(",
"fmt",
"=",
"'csv'",
",",
"text",
"=",
"data_item",
".",
"csv",
",",
"ext",
"=",
"'csv'",
")",
")",
"return",
"report_parts"
]
| Builds the report as a list of csv tables with titles. | [
"Builds",
"the",
"report",
"as",
"a",
"list",
"of",
"csv",
"tables",
"with",
"titles",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/report.py#L680-L689 | train |
projectshift/shift-boiler | boiler/collections/paginated_collection.py | PaginatedCollection.fetch_items | def fetch_items(self):
"""
Fetch items
Performs a query to retrieve items based on current query and
pagination settings.
"""
offset = self.per_page * (self.page - 1)
items = self._query.limit(self.per_page).offset(offset).all()
return items | python | def fetch_items(self):
"""
Fetch items
Performs a query to retrieve items based on current query and
pagination settings.
"""
offset = self.per_page * (self.page - 1)
items = self._query.limit(self.per_page).offset(offset).all()
return items | [
"def",
"fetch_items",
"(",
"self",
")",
":",
"offset",
"=",
"self",
".",
"per_page",
"*",
"(",
"self",
".",
"page",
"-",
"1",
")",
"items",
"=",
"self",
".",
"_query",
".",
"limit",
"(",
"self",
".",
"per_page",
")",
".",
"offset",
"(",
"offset",
")",
".",
"all",
"(",
")",
"return",
"items"
]
| Fetch items
Performs a query to retrieve items based on current query and
pagination settings. | [
"Fetch",
"items",
"Performs",
"a",
"query",
"to",
"retrieve",
"items",
"based",
"on",
"current",
"query",
"and",
"pagination",
"settings",
"."
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/collections/paginated_collection.py#L66-L74 | train |
projectshift/shift-boiler | boiler/collections/paginated_collection.py | PaginatedCollection.next_page | def next_page(self):
"""
Next page
Uses query object to fetch next slice of items unless on last page in
which case does nothing
"""
if self.is_last_page():
return False
self.page += 1
self.items = self.fetch_items()
return True | python | def next_page(self):
"""
Next page
Uses query object to fetch next slice of items unless on last page in
which case does nothing
"""
if self.is_last_page():
return False
self.page += 1
self.items = self.fetch_items()
return True | [
"def",
"next_page",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_last_page",
"(",
")",
":",
"return",
"False",
"self",
".",
"page",
"+=",
"1",
"self",
".",
"items",
"=",
"self",
".",
"fetch_items",
"(",
")",
"return",
"True"
]
| Next page
Uses query object to fetch next slice of items unless on last page in
which case does nothing | [
"Next",
"page",
"Uses",
"query",
"object",
"to",
"fetch",
"next",
"slice",
"of",
"items",
"unless",
"on",
"last",
"page",
"in",
"which",
"case",
"does",
"nothing"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/collections/paginated_collection.py#L96-L107 | train |
projectshift/shift-boiler | boiler/collections/paginated_collection.py | PaginatedCollection.previous_page | def previous_page(self):
"""
Previous page
Uses query object to fetch previous slice of items unless on first
page in which case does nothing
"""
if self.is_first_page():
return False
self.page -= 1
self.items = self.fetch_items()
return True | python | def previous_page(self):
"""
Previous page
Uses query object to fetch previous slice of items unless on first
page in which case does nothing
"""
if self.is_first_page():
return False
self.page -= 1
self.items = self.fetch_items()
return True | [
"def",
"previous_page",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_first_page",
"(",
")",
":",
"return",
"False",
"self",
".",
"page",
"-=",
"1",
"self",
".",
"items",
"=",
"self",
".",
"fetch_items",
"(",
")",
"return",
"True"
]
| Previous page
Uses query object to fetch previous slice of items unless on first
page in which case does nothing | [
"Previous",
"page",
"Uses",
"query",
"object",
"to",
"fetch",
"previous",
"slice",
"of",
"items",
"unless",
"on",
"first",
"page",
"in",
"which",
"case",
"does",
"nothing"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/collections/paginated_collection.py#L109-L120 | train |
brunato/lograptor | setup.py | my_bdist_rpm._make_spec_file | def _make_spec_file(self):
"""
Customize spec file inserting %config section
"""
spec_file = setuptools.command.bdist_rpm.bdist_rpm._make_spec_file(self)
spec_file.append('%config(noreplace) /etc/lograptor/lograptor.conf')
spec_file.append('%config(noreplace) /etc/lograptor/report_template.*')
spec_file.append('%config(noreplace) /etc/lograptor/conf.d/*.conf')
return spec_file | python | def _make_spec_file(self):
"""
Customize spec file inserting %config section
"""
spec_file = setuptools.command.bdist_rpm.bdist_rpm._make_spec_file(self)
spec_file.append('%config(noreplace) /etc/lograptor/lograptor.conf')
spec_file.append('%config(noreplace) /etc/lograptor/report_template.*')
spec_file.append('%config(noreplace) /etc/lograptor/conf.d/*.conf')
return spec_file | [
"def",
"_make_spec_file",
"(",
"self",
")",
":",
"spec_file",
"=",
"setuptools",
".",
"command",
".",
"bdist_rpm",
".",
"bdist_rpm",
".",
"_make_spec_file",
"(",
"self",
")",
"spec_file",
".",
"append",
"(",
"'%config(noreplace) /etc/lograptor/lograptor.conf'",
")",
"spec_file",
".",
"append",
"(",
"'%config(noreplace) /etc/lograptor/report_template.*'",
")",
"spec_file",
".",
"append",
"(",
"'%config(noreplace) /etc/lograptor/conf.d/*.conf'",
")",
"return",
"spec_file"
]
| Customize spec file inserting %config section | [
"Customize",
"spec",
"file",
"inserting",
"%config",
"section"
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/setup.py#L125-L133 | train |
projectshift/shift-boiler | boiler/user/event_handlers.py | user_save_event | def user_save_event(user):
""" Handle persist event for user entities """
msg = 'User ({}){} updated/saved'.format(user.id, user.email)
current_app.logger.info(msg) | python | def user_save_event(user):
""" Handle persist event for user entities """
msg = 'User ({}){} updated/saved'.format(user.id, user.email)
current_app.logger.info(msg) | [
"def",
"user_save_event",
"(",
"user",
")",
":",
"msg",
"=",
"'User ({}){} updated/saved'",
".",
"format",
"(",
"user",
".",
"id",
",",
"user",
".",
"email",
")",
"current_app",
".",
"logger",
".",
"info",
"(",
"msg",
")"
]
| Handle persist event for user entities | [
"Handle",
"persist",
"event",
"for",
"user",
"entities"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/event_handlers.py#L14-L17 | train |
projectshift/shift-boiler | boiler/user/event_handlers.py | user_got_role_event | def user_got_role_event(user, role):
""" User got new role """
msg = 'User ({}){} got new role [{}]'
current_app.logger.info(msg.format(user.id, user.email, role.handle)) | python | def user_got_role_event(user, role):
""" User got new role """
msg = 'User ({}){} got new role [{}]'
current_app.logger.info(msg.format(user.id, user.email, role.handle)) | [
"def",
"user_got_role_event",
"(",
"user",
",",
"role",
")",
":",
"msg",
"=",
"'User ({}){} got new role [{}]'",
"current_app",
".",
"logger",
".",
"info",
"(",
"msg",
".",
"format",
"(",
"user",
".",
"id",
",",
"user",
".",
"email",
",",
"role",
".",
"handle",
")",
")"
]
| User got new role | [
"User",
"got",
"new",
"role"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/event_handlers.py#L107-L110 | train |
projectshift/shift-boiler | boiler/user/models.py | User.generate_hash | def generate_hash(self, length=30):
""" Generate random string of given length """
import random, string
chars = string.ascii_letters + string.digits
ran = random.SystemRandom().choice
hash = ''.join(ran(chars) for i in range(length))
return hash | python | def generate_hash(self, length=30):
""" Generate random string of given length """
import random, string
chars = string.ascii_letters + string.digits
ran = random.SystemRandom().choice
hash = ''.join(ran(chars) for i in range(length))
return hash | [
"def",
"generate_hash",
"(",
"self",
",",
"length",
"=",
"30",
")",
":",
"import",
"random",
",",
"string",
"chars",
"=",
"string",
".",
"ascii_letters",
"+",
"string",
".",
"digits",
"ran",
"=",
"random",
".",
"SystemRandom",
"(",
")",
".",
"choice",
"hash",
"=",
"''",
".",
"join",
"(",
"ran",
"(",
"chars",
")",
"for",
"i",
"in",
"range",
"(",
"length",
")",
")",
"return",
"hash"
]
| Generate random string of given length | [
"Generate",
"random",
"string",
"of",
"given",
"length"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L163-L169 | train |
projectshift/shift-boiler | boiler/user/models.py | User.gravatar | def gravatar(self, size):
""" Get url to gravatar """
hash = md5(self.email.encode('utf-8')).hexdigest()
url = 'http://www.gravatar.com/avatar/{}?d=mm&s={}'
return url.format(hash, size) | python | def gravatar(self, size):
""" Get url to gravatar """
hash = md5(self.email.encode('utf-8')).hexdigest()
url = 'http://www.gravatar.com/avatar/{}?d=mm&s={}'
return url.format(hash, size) | [
"def",
"gravatar",
"(",
"self",
",",
"size",
")",
":",
"hash",
"=",
"md5",
"(",
"self",
".",
"email",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"url",
"=",
"'http://www.gravatar.com/avatar/{}?d=mm&s={}'",
"return",
"url",
".",
"format",
"(",
"hash",
",",
"size",
")"
]
| Get url to gravatar | [
"Get",
"url",
"to",
"gravatar"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L171-L175 | train |
projectshift/shift-boiler | boiler/user/models.py | User.is_locked | def is_locked(self):
"""
Is locked?
Checks locking and possibly unlocks upon timeout if account was
previously locked.
"""
now = datetime.datetime.utcnow()
if self.locked_until and self.locked_until >= now:
return True
elif self.locked_until and self.locked_until < now:
self.unlock_account()
return False
else:
return False | python | def is_locked(self):
"""
Is locked?
Checks locking and possibly unlocks upon timeout if account was
previously locked.
"""
now = datetime.datetime.utcnow()
if self.locked_until and self.locked_until >= now:
return True
elif self.locked_until and self.locked_until < now:
self.unlock_account()
return False
else:
return False | [
"def",
"is_locked",
"(",
"self",
")",
":",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"if",
"self",
".",
"locked_until",
"and",
"self",
".",
"locked_until",
">=",
"now",
":",
"return",
"True",
"elif",
"self",
".",
"locked_until",
"and",
"self",
".",
"locked_until",
"<",
"now",
":",
"self",
".",
"unlock_account",
"(",
")",
"return",
"False",
"else",
":",
"return",
"False"
]
| Is locked?
Checks locking and possibly unlocks upon timeout if account was
previously locked. | [
"Is",
"locked?",
"Checks",
"locking",
"and",
"possibly",
"unlocks",
"upon",
"timeout",
"if",
"account",
"was",
"previously",
"locked",
"."
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L200-L213 | train |
projectshift/shift-boiler | boiler/user/models.py | User.lock_account | def lock_account(self, minutes=30):
""" Lock user account for a period """
period = datetime.timedelta(minutes=minutes)
self.locked_until = datetime.datetime.utcnow() + period | python | def lock_account(self, minutes=30):
""" Lock user account for a period """
period = datetime.timedelta(minutes=minutes)
self.locked_until = datetime.datetime.utcnow() + period | [
"def",
"lock_account",
"(",
"self",
",",
"minutes",
"=",
"30",
")",
":",
"period",
"=",
"datetime",
".",
"timedelta",
"(",
"minutes",
"=",
"minutes",
")",
"self",
".",
"locked_until",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"+",
"period"
]
| Lock user account for a period | [
"Lock",
"user",
"account",
"for",
"a",
"period"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L215-L218 | train |
projectshift/shift-boiler | boiler/user/models.py | User.increment_failed_logins | def increment_failed_logins(self):
""" Increment failed logins counter"""
if not self.failed_logins:
self.failed_logins = 1
elif not self.failed_login_limit_reached():
self.failed_logins += 1
else:
self.reset_login_counter()
self.lock_account(30) | python | def increment_failed_logins(self):
""" Increment failed logins counter"""
if not self.failed_logins:
self.failed_logins = 1
elif not self.failed_login_limit_reached():
self.failed_logins += 1
else:
self.reset_login_counter()
self.lock_account(30) | [
"def",
"increment_failed_logins",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"failed_logins",
":",
"self",
".",
"failed_logins",
"=",
"1",
"elif",
"not",
"self",
".",
"failed_login_limit_reached",
"(",
")",
":",
"self",
".",
"failed_logins",
"+=",
"1",
"else",
":",
"self",
".",
"reset_login_counter",
"(",
")",
"self",
".",
"lock_account",
"(",
"30",
")"
]
| Increment failed logins counter | [
"Increment",
"failed",
"logins",
"counter"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L224-L232 | train |
projectshift/shift-boiler | boiler/user/models.py | User.failed_login_limit_reached | def failed_login_limit_reached(self):
""" A boolean method to check for failed login limit being reached"""
login_limit = 10
if self.failed_logins and self.failed_logins >= login_limit:
return True
else:
return False | python | def failed_login_limit_reached(self):
""" A boolean method to check for failed login limit being reached"""
login_limit = 10
if self.failed_logins and self.failed_logins >= login_limit:
return True
else:
return False | [
"def",
"failed_login_limit_reached",
"(",
"self",
")",
":",
"login_limit",
"=",
"10",
"if",
"self",
".",
"failed_logins",
"and",
"self",
".",
"failed_logins",
">=",
"login_limit",
":",
"return",
"True",
"else",
":",
"return",
"False"
]
| A boolean method to check for failed login limit being reached | [
"A",
"boolean",
"method",
"to",
"check",
"for",
"failed",
"login",
"limit",
"being",
"reached"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L238-L244 | train |
projectshift/shift-boiler | boiler/user/models.py | User.email_secure | def email_secure(self):
""" Obfuscated email used for display """
email = self._email
if not email: return ''
address, host = email.split('@')
if len(address) <= 2: return ('*' * len(address)) + '@' + host
import re
host = '@' + host
obfuscated = re.sub(r'[a-zA-z0-9]', '*', address[1:-1])
return address[:1] + obfuscated + address[-1:] + host | python | def email_secure(self):
""" Obfuscated email used for display """
email = self._email
if not email: return ''
address, host = email.split('@')
if len(address) <= 2: return ('*' * len(address)) + '@' + host
import re
host = '@' + host
obfuscated = re.sub(r'[a-zA-z0-9]', '*', address[1:-1])
return address[:1] + obfuscated + address[-1:] + host | [
"def",
"email_secure",
"(",
"self",
")",
":",
"email",
"=",
"self",
".",
"_email",
"if",
"not",
"email",
":",
"return",
"''",
"address",
",",
"host",
"=",
"email",
".",
"split",
"(",
"'@'",
")",
"if",
"len",
"(",
"address",
")",
"<=",
"2",
":",
"return",
"(",
"'*'",
"*",
"len",
"(",
"address",
")",
")",
"+",
"'@'",
"+",
"host",
"import",
"re",
"host",
"=",
"'@'",
"+",
"host",
"obfuscated",
"=",
"re",
".",
"sub",
"(",
"r'[a-zA-z0-9]'",
",",
"'*'",
",",
"address",
"[",
"1",
":",
"-",
"1",
"]",
")",
"return",
"address",
"[",
":",
"1",
"]",
"+",
"obfuscated",
"+",
"address",
"[",
"-",
"1",
":",
"]",
"+",
"host"
]
| Obfuscated email used for display | [
"Obfuscated",
"email",
"used",
"for",
"display"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L256-L266 | train |
projectshift/shift-boiler | boiler/user/models.py | User.email | def email(self, email):
""" Set email and generate confirmation """
if email == self.email:
return
email = email.lower()
if self._email is None:
self._email = email
self.require_email_confirmation()
else:
self.email_new = email
self.require_email_confirmation() | python | def email(self, email):
""" Set email and generate confirmation """
if email == self.email:
return
email = email.lower()
if self._email is None:
self._email = email
self.require_email_confirmation()
else:
self.email_new = email
self.require_email_confirmation() | [
"def",
"email",
"(",
"self",
",",
"email",
")",
":",
"if",
"email",
"==",
"self",
".",
"email",
":",
"return",
"email",
"=",
"email",
".",
"lower",
"(",
")",
"if",
"self",
".",
"_email",
"is",
"None",
":",
"self",
".",
"_email",
"=",
"email",
"self",
".",
"require_email_confirmation",
"(",
")",
"else",
":",
"self",
".",
"email_new",
"=",
"email",
"self",
".",
"require_email_confirmation",
"(",
")"
]
| Set email and generate confirmation | [
"Set",
"email",
"and",
"generate",
"confirmation"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L269-L280 | train |
projectshift/shift-boiler | boiler/user/models.py | User.require_email_confirmation | def require_email_confirmation(self):
""" Mark email as unconfirmed"""
self.email_confirmed = False
self.email_link = self.generate_hash(50)
now = datetime.datetime.utcnow()
self.email_link_expires = now + datetime.timedelta(hours=24) | python | def require_email_confirmation(self):
""" Mark email as unconfirmed"""
self.email_confirmed = False
self.email_link = self.generate_hash(50)
now = datetime.datetime.utcnow()
self.email_link_expires = now + datetime.timedelta(hours=24) | [
"def",
"require_email_confirmation",
"(",
"self",
")",
":",
"self",
".",
"email_confirmed",
"=",
"False",
"self",
".",
"email_link",
"=",
"self",
".",
"generate_hash",
"(",
"50",
")",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"self",
".",
"email_link_expires",
"=",
"now",
"+",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"24",
")"
]
| Mark email as unconfirmed | [
"Mark",
"email",
"as",
"unconfirmed"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L282-L287 | train |
projectshift/shift-boiler | boiler/user/models.py | User.cancel_email_change | def cancel_email_change(self):
""" Cancel email change for new users and roll back data """
if not self.email_new:
return
self.email_new = None
self.email_confirmed = True
self.email_link = None
self.email_new = None
self.email_link_expires = None | python | def cancel_email_change(self):
""" Cancel email change for new users and roll back data """
if not self.email_new:
return
self.email_new = None
self.email_confirmed = True
self.email_link = None
self.email_new = None
self.email_link_expires = None | [
"def",
"cancel_email_change",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"email_new",
":",
"return",
"self",
".",
"email_new",
"=",
"None",
"self",
".",
"email_confirmed",
"=",
"True",
"self",
".",
"email_link",
"=",
"None",
"self",
".",
"email_new",
"=",
"None",
"self",
".",
"email_link_expires",
"=",
"None"
]
| Cancel email change for new users and roll back data | [
"Cancel",
"email",
"change",
"for",
"new",
"users",
"and",
"roll",
"back",
"data"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L299-L308 | train |
projectshift/shift-boiler | boiler/user/models.py | User.email_link_expired | def email_link_expired(self, now=None):
""" Check if email link expired """
if not now: now = datetime.datetime.utcnow()
return self.email_link_expires < now | python | def email_link_expired(self, now=None):
""" Check if email link expired """
if not now: now = datetime.datetime.utcnow()
return self.email_link_expires < now | [
"def",
"email_link_expired",
"(",
"self",
",",
"now",
"=",
"None",
")",
":",
"if",
"not",
"now",
":",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"return",
"self",
".",
"email_link_expires",
"<",
"now"
]
| Check if email link expired | [
"Check",
"if",
"email",
"link",
"expired"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L310-L313 | train |
projectshift/shift-boiler | boiler/user/models.py | User.password | def password(self, password):
""" Encode a string and set as password """
from boiler.user.util.passlib import passlib_context
password = str(password)
encrypted = passlib_context.encrypt(password)
self._password = encrypted | python | def password(self, password):
""" Encode a string and set as password """
from boiler.user.util.passlib import passlib_context
password = str(password)
encrypted = passlib_context.encrypt(password)
self._password = encrypted | [
"def",
"password",
"(",
"self",
",",
"password",
")",
":",
"from",
"boiler",
".",
"user",
".",
"util",
".",
"passlib",
"import",
"passlib_context",
"password",
"=",
"str",
"(",
"password",
")",
"encrypted",
"=",
"passlib_context",
".",
"encrypt",
"(",
"password",
")",
"self",
".",
"_password",
"=",
"encrypted"
]
| Encode a string and set as password | [
"Encode",
"a",
"string",
"and",
"set",
"as",
"password"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L325-L330 | train |
projectshift/shift-boiler | boiler/user/models.py | User.verify_password | def verify_password(self, password):
""" Verify a given string for being valid password """
if self.password is None:
return False
from boiler.user.util.passlib import passlib_context
return passlib_context.verify(str(password), self.password) | python | def verify_password(self, password):
""" Verify a given string for being valid password """
if self.password is None:
return False
from boiler.user.util.passlib import passlib_context
return passlib_context.verify(str(password), self.password) | [
"def",
"verify_password",
"(",
"self",
",",
"password",
")",
":",
"if",
"self",
".",
"password",
"is",
"None",
":",
"return",
"False",
"from",
"boiler",
".",
"user",
".",
"util",
".",
"passlib",
"import",
"passlib_context",
"return",
"passlib_context",
".",
"verify",
"(",
"str",
"(",
"password",
")",
",",
"self",
".",
"password",
")"
]
| Verify a given string for being valid password | [
"Verify",
"a",
"given",
"string",
"for",
"being",
"valid",
"password"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L332-L338 | train |
projectshift/shift-boiler | boiler/user/models.py | User.generate_password_link | def generate_password_link(self):
""" Generates a link to reset password """
self.password_link = self.generate_hash(50)
now = datetime.datetime.utcnow()
self.password_link_expires = now + datetime.timedelta(hours=24) | python | def generate_password_link(self):
""" Generates a link to reset password """
self.password_link = self.generate_hash(50)
now = datetime.datetime.utcnow()
self.password_link_expires = now + datetime.timedelta(hours=24) | [
"def",
"generate_password_link",
"(",
"self",
")",
":",
"self",
".",
"password_link",
"=",
"self",
".",
"generate_hash",
"(",
"50",
")",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"self",
".",
"password_link_expires",
"=",
"now",
"+",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"24",
")"
]
| Generates a link to reset password | [
"Generates",
"a",
"link",
"to",
"reset",
"password"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L340-L344 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.