Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def _get_mappedids(entry, graph):
model = Model(graph)
omim_num = str(entry['mimNumber'])
omim_curie = 'OMIM:' + omim_num
orpha_mappings = []
if 'externalLinks' in entry:
links = entry['externalLinks']
if 'orphanetDiseases' in links:
# triple semi-colon delimited list of
# double semi-colon delimited orphanet ID/disease pairs
# 2970;;566;;Prune belly syndrome
items = links['orphanetDiseases'].strip().split(';;;')
for item in items:
orphdis = item.strip().split(';;')
orpha_num = orphdis[0].strip()
orpha_label = orphdis[2].strip()
orpha_curie = 'ORPHA:' + orpha_num
orpha_mappings.append(orpha_curie)
model.addClassToGraph(orpha_curie, orpha_label)
model.addXref(omim_curie, orpha_curie)
if 'umlsIDs' in links:
umls_mappings = links['umlsIDs'].split(',')
for umls in umls_mappings:
umls_curie = 'UMLS:' + umls
model.addClassToGraph(umls_curie, None)
model.addXref(omim_curie, umls_curie) | [
"\n Extract the Orphanet and UMLS ids as equivalences from the entry\n :param entry:\n :return:\n "
]
|
Please provide a description of the function:def _get_alt_labels(self, titles):
labels = []
# "alternativeTitles": "
# ACROCEPHALOSYNDACTYLY, TYPE V; ACS5;;\nACS V;;\nNOACK SYNDROME",
# "includedTitles":
# "CRANIOFACIAL-SKELETAL-DERMATOLOGIC DYSPLASIA, INCLUDED"
for title in titles.split(';;'):
# remove ', included', if present
label = re.sub(r',\s*INCLUDED', '', title.strip(), re.IGNORECASE)
label = self._cleanup_label(label)
labels.append(label)
return labels | [
"\n From a string of delimited titles, make an array.\n This assumes that the titles are double-semicolon (';;') delimited.\n This will additionally pass each through the _cleanup_label method to\n convert the screaming ALL CAPS to something more pleasant to read.\n :param titles:\n :return: an array of cleaned-up labels\n "
]
|
Please provide a description of the function:def _get_pubs(self, entry, graph):
ref_to_pmid = {}
entry_num = entry['mimNumber']
if 'referenceList' in entry:
reflist = entry['referenceList']
for rlst in reflist:
if 'pubmedID' in rlst['reference']:
pub_id = 'PMID:' + str(rlst['reference']['pubmedID'])
ref = Reference(
graph, pub_id, self.globaltt['journal article'])
else:
# make blank node for internal reference
pub_id = '_:OMIM' + str(entry_num) + 'ref' + str(
rlst['reference']['referenceNumber'])
ref = Reference(graph, pub_id)
title = author_list = source = citation = None
if 'title' in rlst['reference']:
title = rlst['reference']['title']
ref.setTitle(title)
if 'authors' in rlst['reference']:
author_list = rlst['reference']['authors']
ref.setAuthorList(author_list)
citation = re.split(r'\.\,', author_list)[0] + ' et al'
if 'source' in rlst['reference']:
source = rlst['reference']['source']
citation = '; '.join(
[tok for tok in [citation, title, source] if tok is not None])
ref.setShortCitation(citation)
ref.addRefToGraph()
ref_to_pmid[rlst['reference']['referenceNumber']] = pub_id
# add is_about for the pub
omim_id = 'OMIM:' + str(entry_num)
graph.addTriple(omim_id, self.globaltt['mentions'], pub_id)
return ref_to_pmid | [
"\n Extract mentioned publications from the reference list\n :param entry:\n :return:\n "
]
|
Please provide a description of the function:def _get_omimtype(entry, globaltt):
# An asterisk (*) before an entry number indicates a gene.
# A number symbol (#) before an entry number indicates
# that it is a descriptive entry, usually of a phenotype,
# and does not represent a unique locus.
# The reason for the use of the number symbol
# is given in the first paragraph of the entry.
# Discussion of any gene(s) related to the phenotype resides in
# another entry(ies) as described in the first paragraph.
#
# A plus sign (+) before an entry number indicates that the
# entry contains the description of a gene of
# known sequence and a phenotype.
#
# A percent sign (%) before an entry number indicates that the
# entry describes a confirmed mendelian phenotype or phenotypic locus
# for which the underlying molecular basis is not known.
#
# No symbol before an entry number generally indicates a
# description of a phenotype for which the mendelian basis,
# although suspected, has not been clearly established
# or that the separateness of this phenotype
# from that in another entry is unclear.
#
# A caret (^) before an entry number means the
# entry no longer exists because it was removed from the database
# or moved to another entry as indicated.
prefix = None
type_id = None
if 'prefix' in entry:
prefix = entry['prefix']
if prefix == '*':
# gene, may not have a known sequence or a phenotype
# note that some genes are also phenotypes,
# even in this class, like 102480
# examples: 102560,102480,100678,102750
type_id = globaltt['gene']
elif prefix == '#':
# phenotype/disease -- indicate that here?
# examples: 104200,105400,114480,115300,121900
# type_id = globaltt['Phenotype'] # 'UPHENO_0001001' # species agnostic
# type_id = globaltt['human phenotypic abnormality']
pass
elif prefix == '+':
# gene of known sequence and has a phenotype
# examples: 107670,110600,126453
type_id = globaltt['gene'] # doublecheck this
elif prefix == '%':
# this is a disease (with a known locus).
# examples include: 102150,104000,107200,100070
type_id = globaltt['heritable_phenotypic_marker']
elif prefix == '':
# this is probably just a phenotype
pass
return type_id | [
"\n (note: there is anlaternative using mimTitle in omia)\n\n\n Here, we look at the omim 'prefix' to help to type the entry.\n For now, we only classify omim entries as genes;\n the rest we leave alone.\n :param entry:\n :return:\n "
]
|
Please provide a description of the function:def calc_root(key: bytes, value: bytes, branch: Sequence[Hash32]) -> Hash32:
validate_is_bytes(key)
validate_is_bytes(value)
validate_length(branch, len(key)*8)
path = to_int(key)
target_bit = 1
# traverse the path in leaf->root order
# branch is in root->leaf order (key is in MSB to LSB order)
node_hash = keccak(value)
for sibling_node in reversed(branch):
if path & target_bit:
node_hash = keccak(sibling_node + node_hash)
else:
node_hash = keccak(node_hash + sibling_node)
target_bit <<= 1
return node_hash | [
"\n Obtain the merkle root of a given key/value/branch set.\n Can be used to validate a merkle proof or compute it's value from data.\n\n :param key: the keypath to decide the ordering of the sibling nodes in the branch\n :param value: the value (or leaf) that starts the merkle proof computation\n :param branch: the sequence of sibling nodes used to recursively perform the computation\n\n :return: the root hash of the merkle proof computation\n\n .. doctest::\n\n >>> key = b'\\x02' # Keypath\n >>> value = b'' # Value (or leaf)\n >>> branch = tuple([b'\\x00'] * 8) # Any list of hashes\n >>> calc_root(key, value, branch)\n b'.+4IKt[\\xd2\\x14\\xe4).\\xf5\\xc6\\n\\x11=\\x01\\xe89\\xa1Z\\x07#\\xfd~(;\\xfb\\xb8\\x8a\\x0e'\n\n "
]
|
Please provide a description of the function:def update(self, key: bytes, value: bytes, node_updates: Sequence[Hash32]):
validate_is_bytes(key)
validate_length(key, self._key_size)
# Path diff is the logical XOR of the updated key and this account
path_diff = (to_int(self.key) ^ to_int(key))
# Same key (diff of 0), update the tracked value
if path_diff == 0:
self._value = value
# No need to update branch
else:
# Find the first mismatched bit between keypaths. This is
# where the branch point occurs, and we should update the
# sibling node in the source branch at the branch point.
# NOTE: Keys are in MSB->LSB (root->leaf) order.
# Node lists are in root->leaf order.
# Be sure to convert between them effectively.
for bit in reversed(range(self._branch_size)):
if path_diff & (1 << bit) > 0:
branch_point = (self._branch_size - 1) - bit
break
# NOTE: node_updates only has to be as long as necessary
# to obtain the update. This allows an optimization
# of pruning updates to the maximum possible depth
# that would be required to update, which may be
# significantly smaller than the tree depth.
if len(node_updates) <= branch_point:
raise ValidationError("Updated node list is not deep enough")
# Update sibling node in the branch where our key differs from the update
self._branch[branch_point] = node_updates[branch_point] | [
"\n Merge an update for another key with the one we are tracking internally.\n\n :param key: keypath of the update we are processing\n :param value: value of the update we are processing\n :param node_updates: sequence of sibling nodes (in root->leaf order)\n must be at least as large as the first diverging\n key in the keypath\n\n "
]
|
Please provide a description of the function:def _get(self, key: bytes) -> Tuple[bytes, Tuple[Hash32]]:
validate_is_bytes(key)
validate_length(key, self._key_size)
branch = []
target_bit = 1 << (self.depth - 1)
path = to_int(key)
node_hash = self.root_hash
# Append the sibling node to the branch
# Iterate on the parent
for _ in range(self.depth):
node = self.db[node_hash]
left, right = node[:32], node[32:]
if path & target_bit:
branch.append(left)
node_hash = right
else:
branch.append(right)
node_hash = left
target_bit >>= 1
# Value is the last hash in the chain
# NOTE: Didn't do exception here for testing purposes
return self.db[node_hash], tuple(branch) | [
"\n Returns db value and branch in root->leaf order\n "
]
|
Please provide a description of the function:def set(self, key: bytes, value: bytes) -> Tuple[Hash32]:
validate_is_bytes(key)
validate_length(key, self._key_size)
validate_is_bytes(value)
path = to_int(key)
node = value
_, branch = self._get(key)
proof_update = [] # Keep track of proof updates
target_bit = 1
# branch is in root->leaf order, so flip
for sibling_node in reversed(branch):
# Set
node_hash = keccak(node)
proof_update.append(node_hash)
self.db[node_hash] = node
# Update
if (path & target_bit):
node = sibling_node + node_hash
else:
node = node_hash + sibling_node
target_bit <<= 1
# Finally, update root hash
self.root_hash = keccak(node)
self.db[self.root_hash] = node
# updates need to be in root->leaf order, so flip back
return tuple(reversed(proof_update)) | [
"\n Returns all updated hashes in root->leaf order\n "
]
|
Please provide a description of the function:def delete(self, key: bytes) -> Tuple[Hash32]:
validate_is_bytes(key)
validate_length(key, self._key_size)
return self.set(key, self._default) | [
"\n Equals to setting the value to None\n Returns all updated hashes in root->leaf order\n "
]
|
Please provide a description of the function:def next_batch(self, n=1):
if len(self.queue) == 0:
return []
batch = list(reversed((self.queue[-n:])))
self.queue = self.queue[:-n]
return batch | [
"Return the next requests that should be dispatched."
]
|
Please provide a description of the function:def schedule(self, node_key, parent, depth, leaf_callback, is_raw=False):
if node_key in self._existing_nodes:
self.logger.debug("Node %s already exists in db" % encode_hex(node_key))
return
if node_key in self.db:
self._existing_nodes.add(node_key)
self.logger.debug("Node %s already exists in db" % encode_hex(node_key))
return
if parent is not None:
parent.dependencies += 1
existing = self.requests.get(node_key)
if existing is not None:
self.logger.debug(
"Already requesting %s, will just update parents list" % node_key)
existing.parents.append(parent)
return
request = SyncRequest(node_key, parent, depth, leaf_callback, is_raw)
# Requests get added to both self.queue and self.requests; the former is used to keep
# track which requests should be sent next, and the latter is used to avoid scheduling a
# request for a given node multiple times.
self.logger.debug("Scheduling retrieval of %s" % encode_hex(request.node_key))
self.requests[request.node_key] = request
bisect.insort(self.queue, request) | [
"Schedule a request for the node with the given key."
]
|
Please provide a description of the function:def get_children(self, request):
node = decode_node(request.data)
return _get_children(node, request.depth) | [
"Return all children of the node retrieved by the given request.\n\n :rtype: A two-tuple with one list containing the children that reference other nodes and\n another containing the leaf children.\n "
]
|
Please provide a description of the function:def process(self, results):
for node_key, data in results:
request = self.requests.get(node_key)
if request is None:
# This may happen if we resend a request for a node after waiting too long,
# and then eventually get two responses with it.
self.logger.info(
"No SyncRequest found for %s, maybe we got more than one response for it"
% encode_hex(node_key))
return
if request.data is not None:
raise SyncRequestAlreadyProcessed("%s has been processed already" % request)
request.data = data
if request.is_raw:
self.commit(request)
continue
references, leaves = self.get_children(request)
for depth, ref in references:
self.schedule(ref, request, depth, request.leaf_callback)
if request.leaf_callback is not None:
for leaf in leaves:
request.leaf_callback(leaf, request)
if request.dependencies == 0:
self.commit(request) | [
"Process request results.\n\n :param results: A list of two-tuples containing the node's key and data.\n "
]
|
Please provide a description of the function:def check_if_branch_exist(db, root_hash, key_prefix):
validate_is_bytes(key_prefix)
return _check_if_branch_exist(db, root_hash, encode_to_bin(key_prefix)) | [
"\n Given a key prefix, return whether this prefix is\n the prefix of an existing key in the trie.\n "
]
|
Please provide a description of the function:def get_branch(db, root_hash, key):
validate_is_bytes(key)
return tuple(_get_branch(db, root_hash, encode_to_bin(key))) | [
"\n Get a long-format Merkle branch\n "
]
|
Please provide a description of the function:def get_witness_for_key_prefix(db, node_hash, key):
validate_is_bytes(key)
return tuple(_get_witness_for_key_prefix(db, node_hash, encode_to_bin(key))) | [
"\n Get all witness given a keypath prefix.\n Include\n\n 1. witness along the keypath and\n 2. witness in the subtrie of the last node in keypath\n "
]
|
Please provide a description of the function:def parse_node(node):
if node is None or node == b'':
raise InvalidNode("Blank node is not a valid node type in Binary Trie")
elif node[0] == BRANCH_TYPE:
if len(node) != 65:
raise InvalidNode("Invalid branch node, both child node should be 32 bytes long each")
# Output: node type, left child, right child
return BRANCH_TYPE, node[1:33], node[33:]
elif node[0] == KV_TYPE:
if len(node) <= 33:
raise InvalidNode("Invalid kv node, short of key path or child node hash")
# Output: node type, keypath: child
return KV_TYPE, decode_to_bin_keypath(node[1:-32]), node[-32:]
elif node[0] == LEAF_TYPE:
if len(node) == 1:
raise InvalidNode("Invalid leaf node, can not contain empty value")
# Output: node type, None, value
return LEAF_TYPE, None, node[1:]
else:
raise InvalidNode("Unable to parse node") | [
"\n Input: a serialized node\n "
]
|
Please provide a description of the function:def encode_kv_node(keypath, child_node_hash):
if keypath is None or keypath == b'':
raise ValidationError("Key path can not be empty")
validate_is_bytes(keypath)
validate_is_bytes(child_node_hash)
validate_length(child_node_hash, 32)
return KV_TYPE_PREFIX + encode_from_bin_keypath(keypath) + child_node_hash | [
"\n Serializes a key/value node\n "
]
|
Please provide a description of the function:def encode_branch_node(left_child_node_hash, right_child_node_hash):
validate_is_bytes(left_child_node_hash)
validate_length(left_child_node_hash, 32)
validate_is_bytes(right_child_node_hash)
validate_length(right_child_node_hash, 32)
return BRANCH_TYPE_PREFIX + left_child_node_hash + right_child_node_hash | [
"\n Serializes a branch node\n "
]
|
Please provide a description of the function:def encode_leaf_node(value):
validate_is_bytes(value)
if value is None or value == b'':
raise ValidationError("Value of leaf node can not be empty")
return LEAF_TYPE_PREFIX + value | [
"\n Serializes a leaf node\n "
]
|
Please provide a description of the function:def batch_commit(self, *, do_deletes=False):
'''
Batch and commit and end of context
'''
try:
yield
except Exception as exc:
raise exc
else:
for key, value in self.cache.items():
if value is not DELETED:
self.wrapped_db[key] = value
elif do_deletes:
self.wrapped_db.pop(key, None)
# if do_deletes is False, ignore deletes to underlying db
finally:
self.cache = {} | []
|
Please provide a description of the function:def _prune_node(self, node):
if self.is_pruning:
# node is mutable, so capture the key for later pruning now
prune_key, node_body = self._node_to_db_mapping(node)
should_prune = (node_body is not None)
else:
should_prune = False
yield
# Prune only if no exception is raised
if should_prune:
del self.db[prune_key] | [
"\n Prune the given node if context exits cleanly.\n "
]
|
Please provide a description of the function:def _normalize_branch_node(self, node):
iter_node = iter(node)
if any(iter_node) and any(iter_node):
return node
if node[16]:
return [compute_leaf_key([]), node[16]]
sub_node_idx, sub_node_hash = next(
(idx, v)
for idx, v
in enumerate(node[:16])
if v
)
sub_node = self.get_node(sub_node_hash)
sub_node_type = get_node_type(sub_node)
if sub_node_type in {NODE_TYPE_LEAF, NODE_TYPE_EXTENSION}:
with self._prune_node(sub_node):
new_subnode_key = encode_nibbles(tuple(itertools.chain(
[sub_node_idx],
decode_nibbles(sub_node[0]),
)))
return [new_subnode_key, sub_node[1]]
elif sub_node_type == NODE_TYPE_BRANCH:
subnode_hash = self._persist_node(sub_node)
return [encode_nibbles([sub_node_idx]), subnode_hash]
else:
raise Exception("Invariant: this code block should be unreachable") | [
"\n A branch node which is left with only a single non-blank item should be\n turned into either a leaf or extension node.\n "
]
|
Please provide a description of the function:def _delete_branch_node(self, node, trie_key):
if not trie_key:
node[-1] = BLANK_NODE
return self._normalize_branch_node(node)
node_to_delete = self.get_node(node[trie_key[0]])
sub_node = self._delete(node_to_delete, trie_key[1:])
encoded_sub_node = self._persist_node(sub_node)
if encoded_sub_node == node[trie_key[0]]:
return node
node[trie_key[0]] = encoded_sub_node
if encoded_sub_node == BLANK_NODE:
return self._normalize_branch_node(node)
return node | [
"\n Delete a key from inside or underneath a branch node\n "
]
|
Please provide a description of the function:def get(self, key):
validate_is_bytes(key)
return self._get(self.root_hash, encode_to_bin(key)) | [
"\n Fetches the value with a given keypath from the given node.\n\n Key will be encoded into binary array format first.\n "
]
|
Please provide a description of the function:def _get(self, node_hash, keypath):
# Empty trie
if node_hash == BLANK_HASH:
return None
nodetype, left_child, right_child = parse_node(self.db[node_hash])
# Key-value node descend
if nodetype == LEAF_TYPE:
if keypath:
return None
return right_child
elif nodetype == KV_TYPE:
# Keypath too short
if not keypath:
return None
if keypath[:len(left_child)] == left_child:
return self._get(right_child, keypath[len(left_child):])
else:
return None
# Branch node descend
elif nodetype == BRANCH_TYPE:
# Keypath too short
if not keypath:
return None
if keypath[:1] == BYTE_0:
return self._get(left_child, keypath[1:])
else:
return self._get(right_child, keypath[1:]) | [
"\n Note: keypath should be in binary array format, i.e., encoded by encode_to_bin()\n "
]
|
Please provide a description of the function:def set(self, key, value):
validate_is_bytes(key)
validate_is_bytes(value)
self.root_hash = self._set(self.root_hash, encode_to_bin(key), value) | [
"\n Sets the value at the given keypath from the given node\n\n Key will be encoded into binary array format first.\n "
]
|
Please provide a description of the function:def _set(self, node_hash, keypath, value, if_delete_subtrie=False):
# Empty trie
if node_hash == BLANK_HASH:
if value:
return self._hash_and_save(
encode_kv_node(keypath, self._hash_and_save(encode_leaf_node(value)))
)
else:
return BLANK_HASH
nodetype, left_child, right_child = parse_node(self.db[node_hash])
# Node is a leaf node
if nodetype == LEAF_TYPE:
# Keypath must match, there should be no remaining keypath
if keypath:
raise NodeOverrideError(
"Fail to set the value because the prefix of it's key"
" is the same as existing key")
if if_delete_subtrie:
return BLANK_HASH
return self._hash_and_save(encode_leaf_node(value)) if value else BLANK_HASH
# node is a key-value node
elif nodetype == KV_TYPE:
# Keypath too short
if not keypath:
if if_delete_subtrie:
return BLANK_HASH
else:
raise NodeOverrideError(
"Fail to set the value because it's key"
" is the prefix of other existing key")
return self._set_kv_node(
keypath,
node_hash,
nodetype,
left_child,
right_child,
value,
if_delete_subtrie
)
# node is a branch node
elif nodetype == BRANCH_TYPE:
# Keypath too short
if not keypath:
if if_delete_subtrie:
return BLANK_HASH
else:
raise NodeOverrideError(
"Fail to set the value because it's key"
" is the prefix of other existing key")
return self._set_branch_node(
keypath,
nodetype,
left_child,
right_child,
value,
if_delete_subtrie
)
raise Exception("Invariant: This shouldn't ever happen") | [
"\n If if_delete_subtrie is set to True, what it will do is that it take in a keypath\n and traverse til the end of keypath, then delete the whole subtrie of that node.\n\n Note: keypath should be in binary array format, i.e., encoded by encode_to_bin()\n "
]
|
Please provide a description of the function:def delete(self, key):
validate_is_bytes(key)
self.root_hash = self._set(self.root_hash, encode_to_bin(key), b'') | [
"\n Equals to setting the value to None\n "
]
|
Please provide a description of the function:def delete_subtrie(self, key):
validate_is_bytes(key)
self.root_hash = self._set(
self.root_hash,
encode_to_bin(key),
value=b'',
if_delete_subtrie=True,
) | [
"\n Given a key prefix, delete the whole subtrie that starts with the key prefix.\n\n Key will be encoded into binary array format first.\n\n It will call `_set` with `if_delete_subtrie` set to True.\n "
]
|
Please provide a description of the function:def _hash_and_save(self, node):
validate_is_bin_node(node)
node_hash = keccak(node)
self.db[node_hash] = node
return node_hash | [
"\n Saves a node into the database and returns its hash\n "
]
|
Please provide a description of the function:def decode_from_bin(input_bin):
for chunk in partition_all(8, input_bin):
yield sum(
2**exp * bit
for exp, bit
in enumerate(reversed(chunk))
) | [
"\n 0100000101010111010000110100100101001001 -> ASCII\n "
]
|
Please provide a description of the function:def encode_to_bin(value):
for char in value:
for exp in EXP:
if char & exp:
yield True
else:
yield False | [
"\n ASCII -> 0100000101010111010000110100100101001001\n "
]
|
Please provide a description of the function:def encode_from_bin_keypath(input_bin):
padded_bin = bytes((4 - len(input_bin)) % 4) + input_bin
prefix = TWO_BITS[len(input_bin) % 4]
if len(padded_bin) % 8 == 4:
return decode_from_bin(PREFIX_00 + prefix + padded_bin)
else:
return decode_from_bin(PREFIX_100000 + prefix + padded_bin) | [
"\n Encodes a sequence of 0s and 1s into tightly packed bytes\n Used in encoding key path of a KV-NODE\n "
]
|
Please provide a description of the function:def decode_to_bin_keypath(path):
path = encode_to_bin(path)
if path[0] == 1:
path = path[4:]
assert path[0:2] == PREFIX_00
padded_len = TWO_BITS.index(path[2:4])
return path[4+((4 - padded_len) % 4):] | [
"\n Decodes bytes into a sequence of 0s and 1s\n Used in decoding key path of a KV-NODE\n "
]
|
Please provide a description of the function:def encode_nibbles(nibbles):
if is_nibbles_terminated(nibbles):
flag = HP_FLAG_2
else:
flag = HP_FLAG_0
raw_nibbles = remove_nibbles_terminator(nibbles)
is_odd = len(raw_nibbles) % 2
if is_odd:
flagged_nibbles = tuple(itertools.chain(
(flag + 1,),
raw_nibbles,
))
else:
flagged_nibbles = tuple(itertools.chain(
(flag, 0),
raw_nibbles,
))
prefixed_value = nibbles_to_bytes(flagged_nibbles)
return prefixed_value | [
"\n The Hex Prefix function\n "
]
|
Please provide a description of the function:def decode_nibbles(value):
nibbles_with_flag = bytes_to_nibbles(value)
flag = nibbles_with_flag[0]
needs_terminator = flag in {HP_FLAG_2, HP_FLAG_2 + 1}
is_odd_length = flag in {HP_FLAG_0 + 1, HP_FLAG_2 + 1}
if is_odd_length:
raw_nibbles = nibbles_with_flag[1:]
else:
raw_nibbles = nibbles_with_flag[2:]
if needs_terminator:
nibbles = add_nibbles_terminator(raw_nibbles)
else:
nibbles = raw_nibbles
return nibbles | [
"\n The inverse of the Hex Prefix function\n "
]
|
Please provide a description of the function:def get_video_json(video):
return json.dumps({
'id': video.id,
'edit_link': reverse('wagtailvideos:edit', args=(video.id,)),
'title': video.title,
'preview': {
'url': video.thumbnail.url if video.thumbnail else '',
}
}) | [
"\n helper function: given an image, return the json to pass back to the\n image chooser panel\n "
]
|
Please provide a description of the function:def get_local_file(file):
try:
with open(file.path):
yield file.path
except NotImplementedError:
_, ext = os.path.splitext(file.name)
with NamedTemporaryFile(prefix='wagtailvideo-', suffix=ext) as tmp:
try:
file.open('rb')
for chunk in file.chunks():
tmp.write(chunk)
finally:
file.close()
tmp.flush()
yield tmp.name | [
"\n Get a local version of the file, downloading it from the remote storage if\n required. The returned value should be used as a context manager to\n ensure any temporary files are cleaned up afterwards.\n "
]
|
Please provide a description of the function:def rustcall(func, *args):
lib.semaphore_err_clear()
rv = func(*args)
err = lib.semaphore_err_get_last_code()
if not err:
return rv
msg = lib.semaphore_err_get_last_message()
cls = exceptions_by_code.get(err, SemaphoreError)
exc = cls(decode_str(msg))
backtrace = decode_str(lib.semaphore_err_get_backtrace())
if backtrace:
exc.rust_info = backtrace
raise exc | [
"Calls rust method and does some error handling."
]
|
Please provide a description of the function:def decode_str(s, free=False):
try:
if s.len == 0:
return u""
return ffi.unpack(s.data, s.len).decode("utf-8", "replace")
finally:
if free:
lib.semaphore_str_free(ffi.addressof(s)) | [
"Decodes a SymbolicStr"
]
|
Please provide a description of the function:def encode_str(s, mutable=False):
rv = ffi.new("SemaphoreStr *")
if isinstance(s, text_type):
s = s.encode("utf-8")
if mutable:
s = bytearray(s)
rv.data = ffi.from_buffer(s)
rv.len = len(s)
# we have to hold a weak reference here to ensure our string does not
# get collected before the string is used.
attached_refs[rv] = s
return rv | [
"Encodes a SemaphoreStr"
]
|
Please provide a description of the function:def decode_uuid(value):
return uuid.UUID(bytes=bytes(bytearray(ffi.unpack(value.data, 16)))) | [
"Decodes the given uuid value."
]
|
Please provide a description of the function:def has_cargo_fmt():
try:
c = subprocess.Popen(
["cargo", "fmt", "--", "--help"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return c.wait() == 0
except OSError:
return False | [
"Runs a quick check to see if cargo fmt is installed."
]
|
Please provide a description of the function:def get_modified_files():
c = subprocess.Popen(
["git", "diff-index", "--cached", "--name-only", "HEAD"], stdout=subprocess.PIPE
)
return c.communicate()[0].splitlines() | [
"Returns a list of all modified files."
]
|
Please provide a description of the function:def _get_next_chunk(fp, previously_read_position, chunk_size):
seek_position, read_size = _get_what_to_read_next(fp, previously_read_position, chunk_size)
fp.seek(seek_position)
read_content = fp.read(read_size)
read_position = seek_position
return read_content, read_position | [
"Return next chunk of data that we would from the file pointer.\n\n Args:\n fp: file-like object\n previously_read_position: file pointer position that we have read from\n chunk_size: desired read chunk_size\n\n Returns:\n (bytestring, int): data that has been read in, the file pointer position where the data has been read from\n "
]
|
Please provide a description of the function:def _get_what_to_read_next(fp, previously_read_position, chunk_size):
seek_position = max(previously_read_position - chunk_size, 0)
read_size = chunk_size
# examples: say, our new_lines are potentially "\r\n", "\n", "\r"
# find a reading point where it is not "\n", rewind further if necessary
# if we have "\r\n" and we read in "\n",
# the next iteration would treat "\r" as a different new line.
# Q: why don't I just check if it is b"\n", but use a function ?
# A: so that we can potentially expand this into generic sets of separators, later on.
while seek_position > 0:
fp.seek(seek_position)
if _is_partially_read_new_line(fp.read(1)):
seek_position -= 1
read_size += 1 # as we rewind further, let's make sure we read more to compensate
else:
break
# take care of special case when we are back to the beginnin of the file
read_size = min(previously_read_position - seek_position, read_size)
return seek_position, read_size | [
"Return information on which file pointer position to read from and how many bytes.\n\n Args:\n fp\n past_read_positon (int): The file pointer position that has been read previously\n chunk_size(int): ideal io chunk_size\n\n Returns:\n (int, int): The next seek position, how many bytes to read next\n "
]
|
Please provide a description of the function:def _remove_trailing_new_line(l):
# replace only 1 instance of newline
# match longest line first (hence the reverse=True), we want to match "\r\n" rather than "\n" if we can
for n in sorted(new_lines_bytes, key=lambda x: len(x), reverse=True):
if l.endswith(n):
remove_new_line = slice(None, -len(n))
return l[remove_new_line]
return l | [
"Remove a single instance of new line at the end of l if it exists.\n\n Returns:\n bytestring\n "
]
|
Please provide a description of the function:def _find_furthest_new_line(read_buffer):
new_line_positions = [read_buffer.rfind(n) for n in new_lines_bytes]
return max(new_line_positions) | [
"Return -1 if read_buffer does not contain new line otherwise the position of the rightmost newline.\n\n Args:\n read_buffer (bytestring)\n\n Returns:\n int: The right most position of new line character in read_buffer if found, else -1\n "
]
|
Please provide a description of the function:def add_to_buffer(self, content, read_position):
self.read_position = read_position
if self.read_buffer is None:
self.read_buffer = content
else:
self.read_buffer = content + self.read_buffer | [
"Add additional bytes content as read from the read_position.\n\n Args:\n content (bytes): data to be added to buffer working BufferWorkSpac.\n read_position (int): where in the file pointer the data was read from.\n "
]
|
Please provide a description of the function:def yieldable(self):
if self.read_buffer is None:
return False
t = _remove_trailing_new_line(self.read_buffer)
n = _find_furthest_new_line(t)
if n >= 0:
return True
# we have read in entire file and have some unprocessed lines
if self.read_position == 0 and self.read_buffer is not None:
return True
return False | [
"Return True if there is a line that the buffer can return, False otherwise."
]
|
Please provide a description of the function:def return_line(self):
assert(self.yieldable())
t = _remove_trailing_new_line(self.read_buffer)
i = _find_furthest_new_line(t)
if i >= 0:
l = i + 1
after_new_line = slice(l, None)
up_to_include_new_line = slice(0, l)
r = t[after_new_line]
self.read_buffer = t[up_to_include_new_line]
else: # the case where we have read in entire file and at the "last" line
r = t
self.read_buffer = None
return r | [
"Return a new line if it is available.\n\n Precondition: self.yieldable() must be True\n "
]
|
Please provide a description of the function:def read_until_yieldable(self):
while not self.yieldable():
read_content, read_position = _get_next_chunk(self.fp, self.read_position, self.chunk_size)
self.add_to_buffer(read_content, read_position) | [
"Read in additional chunks until it is yieldable."
]
|
Please provide a description of the function:def readline(self):
try:
r = next(self.iterator) + os.linesep
return r
except StopIteration:
return "" | [
"Return a line content (with a trailing newline) if there are content. Return '' otherwise."
]
|
Please provide a description of the function:def next(self):
# Using binary mode, because some encodings such as "utf-8" use variable number of
# bytes to encode different Unicode points.
# Without using binary mode, we would probably need to understand each encoding more
# and do the seek operations to find the proper boundary before issuing read
if self.closed:
raise StopIteration
if self.__buf.has_returned_every_line():
self.close()
raise StopIteration
self.__buf.read_until_yieldable()
r = self.__buf.return_line()
return r.decode(self.encoding) | [
"Returns unicode string from the last line until the beginning of file.\n\n Gets exhausted if::\n\n * already reached the beginning of the file on previous iteration\n * the file got closed\n\n When it gets exhausted, it closes the file handler.\n "
]
|
Please provide a description of the function:def save_chat_message(*args, **kwargs):
data = kwargs.get('data')
if data.get('message') and data.get('channel'):
cm = ChatMessage.objects.create(
sender=data.get('sender'),
content=data.get('message'),
channel=data.get('channel')
)
t = loader.get_template('message.html')
# now send broadcast a message back to anyone listening on the channel
hey_joe.send({'html': t.render({'message': cm})}, cm.channel) | [
"\n kwargs will always include:\n\n 'data': \n # will always be exactly what your client sent on the socket\n # in this case...\n {u'message': u'hi', u'sender': u'anonymous', u'channel': u'homepage'},\n\n 'dispatcher': \n # the dispatcher that will allow for broadcasting a response\n <hendrix.contrib.concurrency.messaging.MessageDispatcher object at 0x10ddb1c10>,\n\n "
]
|
Please provide a description of the function:def home(request, chat_channel_name=None):
if not chat_channel_name:
chat_channel_name = 'homepage'
context = {
'address': chat_channel_name,
'history': [],
}
if ChatMessage.objects.filter(channel=chat_channel_name).exists():
context['history'] = ChatMessage.objects.filter(
channel=chat_channel_name)
# TODO add https
websocket_prefix = "ws"
websocket_port = 9000
context['websocket_prefix'] = websocket_prefix
context['websocket_port'] = websocket_port
return render(request, 'chat.html', context) | [
"\n if we have a chat_channel_name kwarg,\n have the response include that channel name\n so the javascript knows to subscribe to that\n channel...\n "
]
|
Please provide a description of the function:def hendrixLauncher(action, options, with_tiempo=False):
if options['key'] and options['cert'] and options['cache']:
from hendrix.deploy import hybrid
HendrixDeploy = hybrid.HendrixDeployHybrid
elif options['key'] and options['cert']:
from hendrix.deploy import tls
HendrixDeploy = tls.HendrixDeployTLS
elif options['cache']:
HendrixDeploy = cache.HendrixDeployCache
else:
HendrixDeploy = base.HendrixDeploy
if with_tiempo:
deploy = HendrixDeploy(action='start', options=options)
deploy.run()
else:
deploy = HendrixDeploy(action, options)
deploy.run() | [
"\n Decides which version of HendrixDeploy to use and then\n launches it.\n "
]
|
Please provide a description of the function:def logReload(options):
event_handler = Reload(options)
observer = Observer()
observer.schedule(event_handler, path='.', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
pid = os.getpid()
chalk.eraser()
chalk.green('\nHendrix successfully closed.')
os.kill(pid, 15)
observer.join()
exit('\n') | [
"\n encompasses all the logic for reloading observer.\n "
]
|
Please provide a description of the function:def launch(*args, **options):
action = args[0]
if options['reload']:
logReload(options)
else:
assignDeploymentInstance(action, options) | [
"\n launch acts on the user specified action and options by executing\n Hedrix.run\n "
]
|
Please provide a description of the function:def findSettingsModule():
"Find the settings module dot path within django's manage.py file"
try:
with open('manage.py', 'r') as manage:
manage_contents = manage.read()
search = re.search(
r"([\"\'](?P<module>[a-z\.]+)[\"\'])", manage_contents
)
if search: # django version < 1.7
settings_mod = search.group("module")
else:
# in 1.7, manage.py settings declaration looks like:
# os.environ.setdefault(
# "DJANGO_SETTINGS_MODULE", "example_app.settings"
# )
search = re.search(
"\".*?\"(,\\s)??\"(?P<module>.*?)\"\\)$",
manage_contents, re.I | re.S | re.M
)
settings_mod = search.group("module")
os.environ.setdefault('DJANGO_SETTINGS_MODULE', settings_mod)
except IOError as e:
msg = (
str(e) + '\nPlease ensure that you are in the same directory '
'as django\'s "manage.py" file.'
)
raise IOError(chalk.red(msg), None, sys.exc_info()[2])
except AttributeError:
settings_mod = ''
return settings_mod | []
|
Please provide a description of the function:def subprocessLaunch():
if not redis_available:
raise RedisException("can't launch this subprocess without tiempo/redis.")
try:
action = 'start'
options = REDIS.get('worker_args')
assignDeploymentInstance(action='start', options=options)
except Exception:
chalk.red('\n Encountered an unhandled exception while trying to %s hendrix.\n' % action, pipe=chalk.stderr)
raise | [
"\n This function is called by the hxw script.\n It takes no arguments, and returns an instance of HendrixDeploy\n "
]
|
Please provide a description of the function:def main(args=None):
"The function to execute when running hx"
if args is None:
args = sys.argv[1:]
options, args = HendrixOptionParser.parse_args(args)
options = vars(options)
try:
action = args[0]
except IndexError:
HendrixOptionParser.print_help()
return
exposeProject(options)
options = djangoVsWsgi(options)
options = devFriendly(options)
redirect = noiseControl(options)
try:
launch(*args, **options)
except Exception:
chalk.red('\n Encountered an unhandled exception while trying to %s hendrix.\n' % action, pipe=chalk.stderr)
raise | []
|
Please provide a description of the function:def handleHeader(self, key, value):
"extends handleHeader to save headers to a local response object"
key_lower = key.lower()
if key_lower == 'location':
value = self.modLocationPort(value)
self._response.headers[key_lower] = value
if key_lower != 'cache-control':
# This causes us to not pass on the 'cache-control' parameter
# to the browser
# TODO: we should have a means of giving the user the option to
# configure how they want to manage browser-side cache control
proxy.ProxyClient.handleHeader(self, key, value) | []
|
Please provide a description of the function:def handleStatus(self, version, code, message):
"extends handleStatus to instantiate a local response object"
proxy.ProxyClient.handleStatus(self, version, code, message)
# client.Response is currently just a container for needed data
self._response = client.Response(version, code, message, {}, None) | []
|
Please provide a description of the function:def modLocationPort(self, location):
components = urlparse.urlparse(location)
reverse_proxy_port = self.father.getHost().port
reverse_proxy_host = self.father.getHost().host
# returns an ordered dict of urlparse.ParseResult components
_components = components._asdict()
_components['netloc'] = '%s:%d' % (
reverse_proxy_host, reverse_proxy_port
)
return urlparse.urlunparse(_components.values()) | [
"\n Ensures that the location port is a the given port value\n Used in `handleHeader`\n "
]
|
Please provide a description of the function:def handleResponseEnd(self):
try:
if not self._finished:
reactor.callInThread(
self.resource.cacheContent,
self.father,
self._response,
self.buffer
)
proxy.ProxyClient.handleResponseEnd(self)
except RuntimeError:
# because we don't care if the user hits
# refresh before the request is done
pass | [
"\n Extends handleResponseEnd to not care about the user closing/refreshing\n their browser before the response is finished. Also calls cacheContent\n in a thread that we don't care when it finishes.\n "
]
|
Please provide a description of the function:def handleResponsePart(self, buffer):
self.father.write(buffer)
self.buffer.write(buffer) | [
"\n Sends the content to the browser and keeps a local copy of it.\n buffer is just a str of the content to be shown, father is the intial\n request.\n "
]
|
Please provide a description of the function:def getChild(self, path, request):
return CacheProxyResource(
self.host, self.port, self.path + '/' + urlquote(path, safe=""),
self.reactor
) | [
"\n This is necessary because the parent class would call\n proxy.ReverseProxyResource instead of CacheProxyResource\n "
]
|
Please provide a description of the function:def getChildWithDefault(self, path, request):
cached_resource = self.getCachedResource(request)
if cached_resource:
reactor.callInThread(
responseInColor,
request,
'200 OK',
cached_resource,
'Cached',
'underscore'
)
return cached_resource
# original logic
if path in self.children:
return self.children[path]
return self.getChild(path, request) | [
"\n Retrieve a static or dynamically generated child resource from me.\n "
]
|
Please provide a description of the function:def render(self, request):
# set up and evaluate a connection to the target server
if self.port == 80:
host = self.host
else:
host = "%s:%d" % (self.host, self.port)
request.requestHeaders.addRawHeader('host', host)
request.content.seek(0, 0)
qs = urlparse.urlparse(request.uri)[4]
if qs:
rest = self.path + '?' + qs
else:
rest = self.path
global_self = self.getGlobalSelf()
clientFactory = self.proxyClientFactoryClass(
request.method, rest, request.clientproto,
request.getAllHeaders(), request.content.read(), request,
global_self # this is new
)
self.reactor.connectTCP(self.host, self.port, clientFactory)
return NOT_DONE_YET | [
"\n Render a request by forwarding it to the proxied server.\n "
]
|
Please provide a description of the function:def getGlobalSelf(self):
transports = self.reactor.getReaders()
for transport in transports:
try:
resource = transport.factory.resource
if isinstance(resource, self.__class__) and resource.port == self.port:
return resource
except AttributeError:
pass
return | [
"\n This searches the reactor for the original instance of\n CacheProxyResource. This is necessary because with each call of\n getChild a new instance of CacheProxyResource is created.\n "
]
|
Please provide a description of the function:def dataReceived(self, data):
try:
address = self.guid
data = json.loads(data)
threads.deferToThread(send_signal, self.dispatcher, data)
if 'hx_subscribe' in data:
return self.dispatcher.subscribe(self.transport, data)
if 'address' in data:
address = data['address']
else:
address = self.guid
self.dispatcher.send(address, data)
except Exception as e:
raise
self.dispatcher.send(
self.guid,
{'message': data, 'error': str(e)}
) | [
"\n Takes \"data\" which we assume is json encoded\n If data has a subject_id attribute, we pass that to the dispatcher\n as the subject_id so it will get carried through into any\n return communications and be identifiable to the client\n\n falls back to just passing the message along...\n\n "
]
|
Please provide a description of the function:def connectionMade(self):
self.transport.uid = str(uuid.uuid1())
self.guid = self.dispatcher.add(self.transport)
self.dispatcher.send(self.guid, {'setup_connection': self.guid}) | [
"\n establish the address of this new connection and add it to the list of\n sockets managed by the dispatcher\n\n reply to the transport with a \"setup_connection\" notice\n containing the recipient's address for use by the client as a return\n address for future communications\n "
]
|
Please provide a description of the function:def generateInitd(conf_file):
allowed_opts = [
'virtualenv', 'project_path', 'settings', 'processes',
'http_port', 'cache', 'cache_port', 'https_port', 'key', 'cert'
]
base_opts = ['--daemonize', ] # always daemonize
options = base_opts
with open(conf_file, 'r') as cfg:
conf = yaml.load(cfg)
conf_specs = set(conf.keys())
if len(conf_specs - set(allowed_opts)):
raise RuntimeError('Improperly configured.')
try:
virtualenv = conf.pop('virtualenv')
project_path = conf.pop('project_path')
except:
raise RuntimeError('Improperly configured.')
cache = False
if 'cache' in conf:
cache = conf.pop('cache')
if not cache:
options.append('--nocache')
workers = 0
if 'processes' in conf:
processes = conf.pop('processes')
workers = int(processes) - 1
if workers > 0:
options += ['--workers', str(workers)]
for key, value in conf.iteritems():
options += ['--%s' % key, str(value)]
with open(os.path.join(SHARE_PATH, 'init.d.j2'), 'r') as f:
TEMPLATE_FILE = f.read()
template = jinja2.Template(TEMPLATE_FILE)
initd_content = template.render(
{
'venv_path': virtualenv,
'project_path': project_path,
'hendrix_opts': ' '.join(options)
}
)
return initd_content | [
"\n Helper function to generate the text content needed to create an init.d\n executable\n "
]
|
Please provide a description of the function:def startResponse(self, status, headers, excInfo=None):
self.status = status
self.headers = headers
self.reactor.callInThread(
responseInColor, self.request, status, headers
)
return self.write | [
"\n extends startResponse to call speakerBox in a thread\n "
]
|
Please provide a description of the function:def processURI(self, uri, prefix=''):
components = urlparse.urlparse(uri)
query = dict(urlparse.parse_qsl(components.query))
bust = True
bust &= bool(query) # bust the cache if the query has stuff in it
# bust the cache if the query key 'cache' isn't true
bust &= query.get('cache') != 'true'
return prefix + components.path, bust | [
"\n helper function to return just the path (uri) and whether or not it's\n busted\n "
]
|
Please provide a description of the function:def cacheContent(self, request, response, buffer):
content = buffer.getvalue()
code = int(response.code)
cache_it = False
uri, bust = self.processURI(request.uri, PREFIX)
# Conditions for adding uri response to cache:
# * if it was successful i.e. status of in the 200s
# * requested using GET
# * not busted
if request.method == "GET" and code / 100 == 2 and not bust:
cache_control = response.headers.get('cache-control')
if cache_control:
params = dict(urlparse.parse_qsl(cache_control))
if int(params.get('max-age', '0')) > 0:
cache_it = True
if cache_it:
content = compressBuffer(content)
self.addResource(content, uri, response.headers)
buffer.close() | [
"\n Checks if the response should be cached.\n Caches the content in a gzipped format given that a `cache_it` flag is\n True\n To be used CacheClient\n "
]
|
Please provide a description of the function:def get_additional_services(settings_module):
additional_services = []
if hasattr(settings_module, 'HENDRIX_SERVICES'):
for name, module_path in settings_module.HENDRIX_SERVICES:
path_to_module, service_name = module_path.rsplit('.', 1)
resource_module = importlib.import_module(path_to_module)
additional_services.append(
(name, getattr(resource_module, service_name))
)
return additional_services | [
"\n if HENDRIX_SERVICES is specified in settings_module,\n it should be a list twisted internet services\n\n example:\n\n HENDRIX_SERVICES = (\n ('myServiceName', 'apps.offload.services.TimeService'),\n )\n "
]
|
Please provide a description of the function:def get_additional_resources(settings_module):
additional_resources = []
if hasattr(settings_module, 'HENDRIX_CHILD_RESOURCES'):
for module_path in settings_module.HENDRIX_CHILD_RESOURCES:
path_to_module, resource_name = module_path.rsplit('.', 1)
resource_module = importlib.import_module(path_to_module)
additional_resources.append(
getattr(resource_module, resource_name)
)
return additional_resources | [
"\n if HENDRIX_CHILD_RESOURCES is specified in settings_module,\n it should be a list resources subclassed from hendrix.contrib.NamedResource\n\n example:\n\n HENDRIX_CHILD_RESOURCES = (\n 'apps.offload.resources.LongRunningProcessResource',\n 'apps.chat.resources.ChatResource',\n )\n "
]
|
Please provide a description of the function:def getConf(cls, settings, options):
"updates the options dict to use config options in the settings module"
ports = ['http_port', 'https_port', 'cache_port']
for port_name in ports:
port = getattr(settings, port_name.upper(), None)
# only use the settings ports if the defaults were left unchanged
default = getattr(defaults, port_name.upper())
if port and options.get(port_name) == default:
options[port_name] = port
_opts = [
('key', 'hx_private_key'),
('cert', 'hx_certficate'),
('wsgi', 'wsgi_application')
]
for opt_name, settings_name in _opts:
opt = getattr(settings, settings_name.upper(), None)
if opt:
options[opt_name] = opt
if not options['settings']:
options['settings'] = environ['DJANGO_SETTINGS_MODULE']
return options | []
|
Please provide a description of the function:def addHendrix(self):
'''
Instantiates a HendrixService with this object's threadpool.
It will be added as a service later.
'''
self.hendrix = HendrixService(
self.application,
threadpool=self.getThreadPool(),
resources=self.resources,
services=self.services,
loud=self.options['loud']
)
if self.options["https_only"] is not True:
self.hendrix.spawn_new_server(self.options['http_port'], HendrixTCPService) | []
|
Please provide a description of the function:def catalogServers(self, hendrix):
"collects a list of service names serving on TCP or SSL"
for service in hendrix.services:
if isinstance(service, (TCPServer, SSLServer)):
self.servers.append(service.name) | []
|
Please provide a description of the function:def run(self):
"sets up the desired services and runs the requested action"
self.addServices()
self.catalogServers(self.hendrix)
action = self.action
fd = self.options['fd']
if action.startswith('start'):
chalk.blue(self._listening_message())
getattr(self, action)(fd)
###########################
# annnnd run the reactor! #
###########################
try:
self.reactor.run()
finally:
shutil.rmtree(PID_DIR, ignore_errors=True) # cleanup tmp PID dir
elif action == 'restart':
getattr(self, action)(fd=fd)
else:
getattr(self, action)() | []
|
Please provide a description of the function:def setFDs(self):
# 0 corresponds to stdin, 1 to stdout, 2 to stderr
self.childFDs = {0: 0, 1: 1, 2: 2}
self.fds = {}
for name in self.servers:
self.port = self.hendrix.get_port(name)
fd = self.port.fileno()
self.childFDs[fd] = fd
self.fds[name] = fd | [
"\n Iterator for file descriptors.\n Seperated from launchworkers for clarity and readability.\n "
]
|
Please provide a description of the function:def addSubprocess(self, fds, name, factory):
self._lock.run(self._addSubprocess, self, fds, name, factory) | [
"\n Public method for _addSubprocess.\n Wraps reactor.adoptStreamConnection in \n a simple DeferredLock to guarantee\n workers play well together.\n "
]
|
Please provide a description of the function:def disownService(self, name):
_service = self.hendrix.getServiceNamed(name)
_service.disownServiceParent()
return _service.factory | [
"\n disowns a service on hendirix by name\n returns a factory for use in the adoptStreamPort part of setting up\n multiple processes\n "
]
|
Please provide a description of the function:def get_pid(options):
namespace = options['settings'] if options['settings'] else options['wsgi']
return os.path.join('{}', '{}_{}.pid').format(PID_DIR, options['http_port'], namespace.replace('.', '_')) | [
"returns The default location of the pid file for process management"
]
|
Please provide a description of the function:def responseInColor(request, status, headers, prefix='Response', opts=None):
"Prints the response info in color"
code, message = status.split(None, 1)
message = '%s [%s] => Request %s %s %s on pid %d' % (
prefix,
code,
str(request.host),
request.method,
request.path,
os.getpid()
)
signal = int(code) / 100
if signal == 2:
chalk.green(message, opts=opts)
elif signal == 3:
chalk.blue(message, opts=opts)
else:
chalk.red(message, opts=opts) | []
|
Please provide a description of the function:def import_string(dotted_path):
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = "%s doesn't look like a module path" % dotted_path
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "%s" does not define a "%s" attribute/class' % (
dotted_path, class_name
)
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2]) | [
"\n Import a dotted module path and return the attribute/class designated by\n the last name in the path. Raise ImportError if the import failed.\n "
]
|
Please provide a description of the function:def addLocalCacheService(self):
"adds a CacheService to the instatiated HendrixService"
_cache = self.getCacheService()
_cache.setName('cache_proxy')
_cache.setServiceParent(self.hendrix) | []
|
Please provide a description of the function:def addGlobalServices(self):
if self.options.get('global_cache') and self.options.get('cache'):
# only add the cache service here if the global_cache and cache
# options were set to True
_cache = self.getCacheService()
_cache.startService() | [
"\n This is where we put service that we don't want to be duplicated on\n worker subprocesses\n "
]
|
Please provide a description of the function:def DjangoStaticResource(path, rel_url='static'):
rel_url = rel_url.strip('/')
StaticFilesResource = MediaResource(path)
StaticFilesResource.namespace = rel_url
chalk.green(
"Adding media resource for URL '%s' at path '%s'" % (rel_url, path)
)
return StaticFilesResource | [
"\n takes an app level file dir to find the site root and servers static files\n from static\n Usage:\n [...in app.resource...]\n from hendrix.resources import DjangoStaticResource\n StaticResource = DjangoStaticResource('/abspath/to/static/folder')\n ... OR ...\n StaticResource = DjangoStaticResource(\n '/abspath/to/static/folder', 'custom-static-relative-url'\n )\n\n [...in settings...]\n HENDRIX_CHILD_RESOURCES = (\n ...,\n 'app.resource.StaticResource',\n ...\n )\n "
]
|
Please provide a description of the function:def getChild(self, name, request):
request.prepath = []
request.postpath.insert(0, name)
# re-establishes request.postpath so to contain the entire path
return self.wsgi_resource | [
"\n Postpath needs to contain all segments of\n the url, if it is incomplete then that incomplete url will be passed on\n to the child resource (in this case our wsgi application).\n "
]
|
Please provide a description of the function:def putNamedChild(self, res):
try:
EmptyResource = resource.Resource
namespace = res.namespace
parts = namespace.strip('/').split('/')
# initialise parent and children
parent = self
children = self.children
# loop through all of the path parts except for the last one
for name in parts[:-1]:
child = children.get(name)
if not child:
# if the child does not exist then create an empty one
# and associate it to the parent
child = EmptyResource()
parent.putChild(name, child)
# update parent and children for the next iteration
parent = child
children = parent.children
name = parts[-1] # get the path part that we care about
if children.get(name):
self.logger.warn(
'A resource already exists at this path. Check '
'your resources list to ensure each path is '
'unique. The previous resource will be overridden.'
)
parent.putChild(name, res)
except AttributeError:
# raise an attribute error if the resource `res` doesn't contain
# the attribute `namespace`
msg = (
'%r improperly configured. additional_resources instances must'
' have a namespace attribute'
) % resource
raise AttributeError(msg, None, sys.exc_info()[2]) | [
"\n putNamedChild takes either an instance of hendrix.contrib.NamedResource\n or any resource.Resource with a \"namespace\" attribute as a means of\n allowing application level control of resource namespacing.\n\n if a child is already found at an existing path,\n resources with paths that are children of those physical paths\n will be added as children of those resources\n\n "
]
|
Please provide a description of the function:def send_json_message(address, message, **kwargs):
data = {
'message': message,
}
if not kwargs.get('subject_id'):
data['subject_id'] = address
data.update(kwargs)
hxdispatcher.send(address, data) | [
"\n a shortcut for message sending\n "
]
|
Please provide a description of the function:def send_callback_json_message(value, *args, **kwargs):
if value:
kwargs['result'] = value
send_json_message(args[0], args[1], **kwargs)
return value | [
"\n useful for sending messages from callbacks as it puts the\n result of the callback in the dict for serialization\n "
]
|
Please provide a description of the function:def send(self, message): # usually a json string...
for transport in self.transports.values():
transport.protocol.sendMessage(message) | [
"\n sends whatever it is to each transport\n "
]
|
Please provide a description of the function:def remove(self, transport):
if transport.uid in self.transports:
del (self.transports[transport.uid]) | [
"\n removes a transport if a member of this group\n "
]
|
Please provide a description of the function:def add(self, transport, address=None):
if not address:
address = str(uuid.uuid1())
if address in self.recipients:
self.recipients[address].add(transport)
else:
self.recipients[address] = RecipientManager(transport, address)
return address | [
"\n add a new recipient to be addressable by this MessageDispatcher\n generate a new uuid address if one is not specified\n "
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.