text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _complete_history(self, cmd, args, text):
"""Find candidates for the 'history' command."""
if args:
return
return [ x for x in { 'clear', 'clearall' } \
if x.startswith(text) ] | 0.034483 |
def register(self, username=""):
"""Performs /register with type: m.login.application_service
Args:
username(str): Username to register.
"""
if not username:
username = utils.mxid2localpart(self.identity)
content = {
"type": "m.login.application_service",
"username": username,
}
return self._send("POST", "/register", content,
api_path=MATRIX_V2_API_PATH) | 0.004107 |
def bsp_traverse_post_order(
node: tcod.bsp.BSP,
callback: Callable[[tcod.bsp.BSP, Any], None],
userData: Any = 0,
) -> None:
"""Traverse this nodes hierarchy with a callback.
.. deprecated:: 2.0
Use :any:`BSP.post_order` instead.
"""
_bsp_traverse(node.post_order(), callback, userData) | 0.003096 |
def einstein_mass_in_units(self, unit_mass='angular', critical_surface_density=None):
"""The Einstein Mass of this galaxy, which is the sum of Einstein Radii of its mass profiles.
If the galaxy is composed of multiple ellipitcal profiles with different axis-ratios, this Einstein Mass \
may be inaccurate. This is because the differently oriented ellipses of each mass profile """
if self.has_mass_profile:
return sum(
map(lambda p: p.einstein_mass_in_units(unit_mass=unit_mass,
critical_surface_density=critical_surface_density),
self.mass_profiles))
else:
return None | 0.009642 |
def make_filter_string(cls, filter_specification):
"""
Converts the given filter specification to a CQL filter expression.
"""
registry = get_current_registry()
visitor_cls = registry.getUtility(IFilterSpecificationVisitor,
name=EXPRESSION_KINDS.CQL)
visitor = visitor_cls()
filter_specification.accept(visitor)
return str(visitor.expression) | 0.004464 |
def add_feature(self,var):
"""Add a feature to `self`.
:Parameters:
- `var`: the feature name.
:Types:
- `var`: `unicode`"""
if self.has_feature(var):
return
n=self.xmlnode.newChild(None, "feature", None)
n.setProp("var", to_utf8(var)) | 0.0125 |
def getPartnerURL(self, CorpNum, TOGO):
""" ํ๋น ํ์ ์์ฌํฌ์ธํธ ํ์ธ
args
CorpNum : ํ๋นํ์ ์ฌ์
์๋ฒํธ
TOGO : "CHRG"
return
URL
raise
PopbillException
"""
try:
return linkhub.getPartnerURL(self._getToken(CorpNum), TOGO)
except LinkhubException as LE:
raise PopbillException(LE.code, LE.message) | 0.00463 |
def _basicsize(t, base=0, heap=False, obj=None):
'''Get non-zero basicsize of type,
including the header sizes.
'''
s = max(getattr(t, '__basicsize__', 0), base)
# include gc header size
if t != _Type_type:
h = getattr(t, '__flags__', 0) & _Py_TPFLAGS_HAVE_GC
elif heap: # type, allocated on heap
h = True
else: # None has no __flags__ attr
h = getattr(obj, '__flags__', 0) & _Py_TPFLAGS_HEAPTYPE
if h:
s += _sizeof_CPyGC_Head
# include reference counters
return s + _sizeof_Crefcounts | 0.014134 |
def get_property(self, remote_path, option):
"""Gets metadata property of remote resource on WebDAV server.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND
:param remote_path: the path to remote resource.
:param option: the property attribute as dictionary with following keys:
`namespace`: (optional) the namespace for XML property which will be set,
`name`: the name of property which will be set.
:return: the value of property or None if property is not found.
"""
urn = Urn(remote_path)
if not self.check(urn.path()):
raise RemoteResourceNotFound(urn.path())
data = WebDavXmlUtils.create_get_property_request_content(option)
response = self.execute_request(action='get_property', path=urn.quote(), data=data)
return WebDavXmlUtils.parse_get_property_response(response.content, option['name']) | 0.007085 |
def unembed_samples(samples, embedding, chain_break_method=None):
"""Return samples over the variables in the source graph.
Args:
samples (iterable): An iterable of samples where each sample
is a dict of the form {v: val, ...} where v is a variable
in the target model and val is the associated value as
determined by a binary quadratic model sampler.
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a node in the
source graph and s is a node in the target graph.
chain_break_method (function, optional): The method used to resolve chain
breaks. Default is :method:`majority_vote`.
Returns:
list: A list of unembedded samples. Each sample is a dict of the form
{v: val, ...} where v is a variable in the source graph and val
is the value associated with the variable.
"""
if chain_break_method is None:
chain_break_method = majority_vote
return list(itertools.chain(*(chain_break_method(sample, embedding) for sample in samples))) | 0.003442 |
def compare_forks(self, cur_fork_head, new_fork_head):
"""The longest chain is selected. If they are equal, then the hash
value of the previous block id and publisher signature is computed.
The lowest result value is the winning block.
Args:
cur_fork_head: The current head of the block chain.
new_fork_head: The head of the fork that is being evaluated.
Returns:
bool: True if choosing the new chain head, False if choosing
the current chain head.
"""
# If the new fork head is not DevMode consensus, bail out. This should
# never happen, but we need to protect against it.
if new_fork_head.consensus != b"Devmode":
raise \
TypeError(
'New fork head {} is not a DevMode block'.format(
new_fork_head.identifier[:8]))
# If the current fork head is not DevMode consensus, check the new fork
# head to see if its immediate predecessor is the current fork head. If
# so that means that consensus mode is changing. If not, we are again
# in a situation that should never happen, but we need to guard
# against.
if cur_fork_head.consensus != b"Devmode":
if new_fork_head.previous_block_id == cur_fork_head.identifier:
LOGGER.info(
'Choose new fork %s: New fork head switches consensus to '
'DevMode',
new_fork_head.identifier[:8])
return True
raise \
TypeError(
'Trying to compare a DevMode block {} to a non-DevMode '
'block {} that is not the direct predecessor'.format(
new_fork_head.identifier[:8],
cur_fork_head.identifier[:8]))
if new_fork_head.block_num == cur_fork_head.block_num:
cur_fork_hash = self.hash_signer_public_key(
cur_fork_head.header.signer_public_key,
cur_fork_head.header.previous_block_id)
new_fork_hash = self.hash_signer_public_key(
new_fork_head.header.signer_public_key,
new_fork_head.header.previous_block_id)
result = new_fork_hash < cur_fork_hash
else:
result = new_fork_head.block_num > cur_fork_head.block_num
return result | 0.000815 |
def connect(self):
"""
Hook up the moderation methods to pre- and post-save signals
from the comment models.
"""
signals.comment_will_be_posted.connect(self.pre_save_moderation, sender=comments.get_model())
signals.comment_was_posted.connect(self.post_save_moderation, sender=comments.get_model()) | 0.011561 |
def infix_to_postfix(nodes, *, recurse_types=None):
"""Convert a list of nodes in infix order to a list of nodes in postfix order.
E.G. with normal algebraic precedence, 3 + 4 * 5 -> 3 4 5 * +
"""
output = []
operators = []
for node in nodes:
if isinstance(node, OperatorNode):
# Drain out all operators whose precedence is gte the node's...
cmp_operator = node.operator
while operators:
current_operator = operators[-1].operator
if current_operator.precedence > cmp_operator.precedence or \
current_operator.precedence == cmp_operator.precedence and current_operator.association == Association.left:
output.append(operators.pop())
else:
break
operators.append(node)
else:
if recurse_types is not None and node.node_type in recurse_types:
output.extend(infix_to_postfix(node.children, recurse_types=recurse_types))
else:
output.append(node)
return output + list(reversed(operators)) | 0.016765 |
def add(self, other):
"""
Adds two block matrices together. The matrices must have the
same size and matching `rowsPerBlock` and `colsPerBlock` values.
If one of the sub matrix blocks that are being added is a
SparseMatrix, the resulting sub matrix block will also be a
SparseMatrix, even if it is being added to a DenseMatrix. If
two dense sub matrix blocks are added, the output block will
also be a DenseMatrix.
>>> dm1 = Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])
>>> dm2 = Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12])
>>> sm = Matrices.sparse(3, 2, [0, 1, 3], [0, 1, 2], [7, 11, 12])
>>> blocks1 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks2 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks3 = sc.parallelize([((0, 0), sm), ((1, 0), dm2)])
>>> mat1 = BlockMatrix(blocks1, 3, 2)
>>> mat2 = BlockMatrix(blocks2, 3, 2)
>>> mat3 = BlockMatrix(blocks3, 3, 2)
>>> mat1.add(mat2).toLocalMatrix()
DenseMatrix(6, 2, [2.0, 4.0, 6.0, 14.0, 16.0, 18.0, 8.0, 10.0, 12.0, 20.0, 22.0, 24.0], 0)
>>> mat1.add(mat3).toLocalMatrix()
DenseMatrix(6, 2, [8.0, 2.0, 3.0, 14.0, 16.0, 18.0, 4.0, 16.0, 18.0, 20.0, 22.0, 24.0], 0)
"""
if not isinstance(other, BlockMatrix):
raise TypeError("Other should be a BlockMatrix, got %s" % type(other))
other_java_block_matrix = other._java_matrix_wrapper._java_model
java_block_matrix = self._java_matrix_wrapper.call("add", other_java_block_matrix)
return BlockMatrix(java_block_matrix, self.rowsPerBlock, self.colsPerBlock) | 0.00413 |
def assert_false(expr, msg_fmt="{msg}"):
"""Fail the test unless the expression is falsy.
>>> assert_false("")
>>> assert_false("Hello World!")
Traceback (most recent call last):
...
AssertionError: 'Hello World!' is not falsy
The following msg_fmt arguments are supported:
* msg - the default error message
* expr - tested expression
"""
if expr:
msg = "{!r} is not falsy".format(expr)
fail(msg_fmt.format(msg=msg, expr=expr)) | 0.002024 |
def pair(address, key):
'''
Pair the bluetooth adapter with a device
CLI Example:
.. code-block:: bash
salt '*' bluetooth.pair DE:AD:BE:EF:CA:FE 1234
Where DE:AD:BE:EF:CA:FE is the address of the device to pair with, and 1234
is the passphrase.
TODO: This function is currently broken, as the bluez-simple-agent program
no longer ships with BlueZ >= 5.0. It needs to be refactored.
'''
if not salt.utils.validate.net.mac(address):
raise CommandExecutionError(
'Invalid BD address passed to bluetooth.pair'
)
try:
int(key)
except Exception:
raise CommandExecutionError(
'bluetooth.pair requires a numerical key to be used'
)
addy = address_()
cmd = 'echo {0} | bluez-simple-agent {1} {2}'.format(
_cmd_quote(addy['device']), _cmd_quote(address), _cmd_quote(key)
)
out = __salt__['cmd.run'](cmd, python_shell=True).splitlines()
return out | 0.001009 |
def write_memory_dump():
"""Dump memory to a temporary filename with the meliae package.
@return: JSON filename where memory dump has been written to
@rtype: string
"""
# first do a full garbage collection run
gc.collect()
if gc.garbage:
log.warn(LOG_CHECK, "Unreachabe objects: %s", pprint.pformat(gc.garbage))
from meliae import scanner
fo, filename = get_temp_file(mode='wb', suffix='.json', prefix='lcdump_')
try:
scanner.dump_all_objects(fo)
finally:
fo.close()
return filename | 0.00361 |
def nreturned(self):
"""
Extract counters if available (lazy).
Looks for nreturned, nReturned, or nMatched counter.
"""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nreturned | 0.006494 |
def to_css(self):
''' Generate the CSS representation of this RGB color.
Returns:
str, ``"rgb(...)"`` or ``"rgba(...)"``
'''
if self.a == 1.0:
return "rgb(%d, %d, %d)" % (self.r, self.g, self.b)
else:
return "rgba(%d, %d, %d, %s)" % (self.r, self.g, self.b, self.a) | 0.005831 |
def set_derived_metric_tags(self, id, **kwargs): # noqa: E501
"""Set all tags associated with a specific derived metric definition # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_derived_metric_tags(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param list[str] body:
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.set_derived_metric_tags_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.set_derived_metric_tags_with_http_info(id, **kwargs) # noqa: E501
return data | 0.002064 |
def get_gold_labels(self, cand_lists, annotator=None):
"""Load sparse matrix of GoldLabels for each candidate_class.
:param cand_lists: The candidates to get gold labels for.
:type cand_lists: List of list of candidates.
:param annotator: A specific annotator key to get labels for. Default
None.
:type annotator: str
:return: A list of MxN sparse matrix where M are the candidates and N is the
annotators. If annotator is provided, return a list of Mx1 matrix.
:rtype: list[csr_matrix]
"""
return get_sparse_matrix(self.session, GoldLabelKey, cand_lists, key=annotator) | 0.005997 |
def save_data(trigger_id, data):
"""
call the consumer and handle the data
:param trigger_id:
:param data:
:return:
"""
status = True
# consumer - the service which uses the data
default_provider.load_services()
service = TriggerService.objects.get(id=trigger_id)
service_consumer = default_provider.get_service(str(service.consumer.name.name))
kwargs = {'user': service.user}
if len(data) > 0:
getattr(service_consumer, '__init__')(service.consumer.token, **kwargs)
status = getattr(service_consumer, 'save_data')(service.id, **data)
return status | 0.00313 |
def get_all_parents(self):
"""
Return all parents of this company.
"""
ownership = Ownership.objects.filter(child=self)
parents = Company.objects.filter(parent__in=ownership)
for parent in parents:
parents = parents | parent.get_all_parents()
return parents | 0.006154 |
def format(self, record, *args, **kwargs):
"""
Format a message in the log
Act like the normal format, but indent anything that is a
newline within the message.
"""
return logging.Formatter.format(
self, record, *args, **kwargs).replace('\n', '\n' + ' ' * 8) | 0.00625 |
def node(self, source, args=(), env={}):
"""
Calls node with an inline source.
Returns decoded output of stdout and stderr; decoding determine
by locale.
"""
return self._exec(self.node_bin, source, args=args, env=env) | 0.007463 |
def recv_connect(self, version=None, support=None, session=None):
"""DDP connect handler."""
del session # Meteor doesn't even use this!
if self.connection is not None:
raise MeteorError(
400, 'Session already established.',
self.connection.connection_id,
)
elif None in (version, support) or version not in self.versions:
self.reply('failed', version=self.versions[0])
elif version not in support:
raise MeteorError(400, 'Client version/support mismatch.')
else:
from dddp.models import Connection
cur = connection.cursor()
cur.execute('SELECT pg_backend_pid()')
(backend_pid,) = cur.fetchone()
this.version = version
this.support = support
self.connection = Connection.objects.create(
server_addr='%d:%s' % (
backend_pid,
self.ws.handler.socket.getsockname(),
),
remote_addr=self.remote_addr,
version=version,
)
self.pgworker.connections[self.connection.pk] = self
atexit.register(self.on_close, 'Shutting down.')
self.reply('connected', session=self.connection.connection_id) | 0.001489 |
def HA2(credentials, request, algorithm):
"""Create HA2 md5 hash
If the qop directive's value is "auth" or is unspecified, then HA2:
HA2 = md5(A2) = MD5(method:digestURI)
If the qop directive's value is "auth-int" , then HA2 is
HA2 = md5(A2) = MD5(method:digestURI:MD5(entityBody))
"""
if credentials.get("qop") == "auth" or credentials.get('qop') is None:
return H(b":".join([request['method'].encode('utf-8'), request['uri'].encode('utf-8')]), algorithm)
elif credentials.get("qop") == "auth-int":
for k in 'method', 'uri', 'body':
if k not in request:
raise ValueError("%s required" % k)
A2 = b":".join([request['method'].encode('utf-8'),
request['uri'].encode('utf-8'),
H(request['body'], algorithm).encode('utf-8')])
return H(A2, algorithm)
raise ValueError | 0.002186 |
def set_registry(self, address: Address) -> None:
"""
Sets the current registry used in ``web3.pm`` functions that read/write to an on-chain
registry. This method accepts checksummed/canonical addresses or ENS names. Addresses
must point to an instance of the Vyper Reference Registry implementation.
If you want to use a different registry implementation with ``web3.pm``, manually
set the ``web3.pm.registry`` attribute to any subclass of ``ERCRegistry``.
To use an ENS domain as the address, make sure a valid ENS instance set as ``web3.ens``.
* Parameters:
* ``address``: Address of on-chain Vyper Reference Registry.
"""
if is_canonical_address(address) or is_checksum_address(address):
canonical_address = to_canonical_address(address)
self.registry = VyperReferenceRegistry(canonical_address, self.web3)
elif is_ens_name(address):
self._validate_set_ens()
addr_lookup = self.web3.ens.address(address)
if not addr_lookup:
raise NameNotFound(
"No address found after ENS lookup for name: {0}.".format(address)
)
self.registry = VyperReferenceRegistry(
to_canonical_address(addr_lookup), self.web3
)
else:
raise PMError(
"Expected a canonical/checksummed address or ENS name for the address, "
"instead received {0}.".format(type(address))
) | 0.007038 |
def getSamplingWorkflowEnabled(self):
"""Returns True if the sample of this Analysis Request has to be
collected by the laboratory personnel
"""
template = self.getTemplate()
if template:
return template.getSamplingRequired()
return self.bika_setup.getSamplingWorkflowEnabled() | 0.005935 |
def numeric_range(cls, field, from_value, to_value, include_lower=None, include_upper=None):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/numeric-range-filter.html
Filters documents with fields that have values within a certain numeric range. Similar to range filter, except that it works only with numeric values, and the filter execution works differently.
'''
instance = cls(numeric_range={field: {'from': from_value, 'to': to_value}})
if include_lower is not None:
instance['numeric_range'][field]['include_lower'] = include_lower
if include_upper is not None:
instance['numeric_range'][field]['include_upper'] = include_upper
return instance | 0.006684 |
def create_map(self, pix):
"""Create a new map with reference pixel coordinates shifted
to the pixel coordinates ``pix``.
Parameters
----------
pix : `~numpy.ndarray`
Reference pixel of new map.
Returns
-------
out_map : `~numpy.ndarray`
The shifted map.
"""
k0 = self._m0.shift_to_coords(pix)
k1 = self._m1.shift_to_coords(pix)
k0[np.isfinite(k1)] = k1[np.isfinite(k1)]
k0[~np.isfinite(k0)] = 0
return k0 | 0.005435 |
def undock_sidebar(self, window_key, widget=None, event=None):
"""Undock/separate sidebar into independent window
The sidebar is undocked and put into a separate new window. The sidebar is hidden in the main-window by
triggering the method on_[widget_name]_hide_clicked(). Triggering this method shows the
[widget_name]_return_button in the main-window, which does not serve any purpose when the bar is undocked.
This button is therefore deliberately
hidden. The undock button, which is also part of the sidebar is hidden, because the re-dock button is
included in the top_tool_bar of the newly opened window. Not hiding it will result in two re-dock buttons
visible in the new window. The new window size and position are loaded from runtime_config, if they exist.
"""
undocked_window_name = window_key.lower() + '_window'
widget_name = window_key.lower()
undocked_window_view = getattr(self.view, undocked_window_name)
undocked_window = undocked_window_view.get_top_widget()
if os.getenv("RAFCON_START_MINIMIZED", False):
undocked_window.iconify()
gui_helper_label.set_window_size_and_position(undocked_window, window_key)
self.view[widget_name].reparent(undocked_window_view['central_eventbox'])
self.view['undock_{}_button'.format(widget_name)].hide()
getattr(self, 'on_{}_hide_clicked'.format(widget_name))(None)
self.view['{}_return_button'.format(widget_name)].hide()
main_window = self.view.get_top_widget()
state_handler = main_window.connect('window-state-event', self.undock_window_callback, undocked_window)
self.handler_ids[undocked_window_name] = {"state": state_handler}
undocked_window.set_transient_for(main_window)
main_window.grab_focus()
global_runtime_config.set_config_value(window_key + '_WINDOW_UNDOCKED', True) | 0.006154 |
def __log_number_of_constants(self):
"""
Logs the number of constants generated.
"""
n_id = len(self._labels)
n_widths = len(self._constants) - n_id
self._io.writeln('')
self._io.text('Number of constants based on column widths: {0}'.format(n_widths))
self._io.text('Number of constants based on database IDs : {0}'.format(n_id)) | 0.010152 |
def send_response(self, msgid, error=None, result=None):
"""Send a response
"""
msg = self._encoder.create_response(msgid, error, result)
self._send_message(msg) | 0.010363 |
def request_get_variable(self, py_db, seq, thread_id, frame_id, scope, attrs):
'''
:param scope: 'FRAME' or 'GLOBAL'
'''
int_cmd = InternalGetVariable(seq, thread_id, frame_id, scope, attrs)
py_db.post_internal_command(int_cmd, thread_id) | 0.007194 |
def _load_config(self, client_secrets_file, client_id, client_secret):
"""Loads oauth2 configuration in order of priority.
Priority:
1. Config passed to the constructor or init_app.
2. Config passed via the GOOGLE_OAUTH2_CLIENT_SECRETS_FILE app
config.
3. Config passed via the GOOGLE_OAUTH2_CLIENT_ID and
GOOGLE_OAUTH2_CLIENT_SECRET app config.
Raises:
ValueError if no config could be found.
"""
if client_id and client_secret:
self.client_id, self.client_secret = client_id, client_secret
return
if client_secrets_file:
self._load_client_secrets(client_secrets_file)
return
if 'GOOGLE_OAUTH2_CLIENT_SECRETS_FILE' in self.app.config:
self._load_client_secrets(
self.app.config['GOOGLE_OAUTH2_CLIENT_SECRETS_FILE'])
return
try:
self.client_id, self.client_secret = (
self.app.config['GOOGLE_OAUTH2_CLIENT_ID'],
self.app.config['GOOGLE_OAUTH2_CLIENT_SECRET'])
except KeyError:
raise ValueError(
'OAuth2 configuration could not be found. Either specify the '
'client_secrets_file or client_id and client_secret or set '
'the app configuration variables '
'GOOGLE_OAUTH2_CLIENT_SECRETS_FILE or '
'GOOGLE_OAUTH2_CLIENT_ID and GOOGLE_OAUTH2_CLIENT_SECRET.') | 0.001305 |
def _assign_enterprise_role_to_users(self, _get_batch_method, options, is_feature_role=False):
"""
Assigns enterprise role to users.
"""
role_name = options['role']
batch_limit = options['batch_limit']
batch_sleep = options['batch_sleep']
batch_offset = options['batch_offset']
current_batch_index = batch_offset
users_batch = _get_batch_method(
batch_offset,
batch_offset + batch_limit
)
role_class = SystemWideEnterpriseRole
role_assignment_class = SystemWideEnterpriseUserRoleAssignment
if is_feature_role:
role_class = EnterpriseFeatureRole
role_assignment_class = EnterpriseFeatureUserRoleAssignment
enterprise_role = role_class.objects.get(name=role_name)
while users_batch.count() > 0:
for index, user in enumerate(users_batch):
LOGGER.info(
'Processing user with index %s and id %s',
current_batch_index + index, user.id
)
role_assignment_class.objects.get_or_create(
user=user,
role=enterprise_role
)
sleep(batch_sleep)
current_batch_index += len(users_batch)
users_batch = _get_batch_method(
current_batch_index,
current_batch_index + batch_limit
) | 0.002044 |
def load_filter_plugins(entrypoint_group: str) -> Iterable[Filter]:
"""
Load all blacklist plugins that are registered with pkg_resources
Parameters
==========
entrypoint_group: str
The entrypoint group name to load plugins from
Returns
=======
List of Blacklist:
A list of objects derived from the Blacklist class
"""
global loaded_filter_plugins
enabled_plugins: List[str] = []
config = BandersnatchConfig().config
try:
config_blacklist_plugins = config["blacklist"]["plugins"]
split_plugins = config_blacklist_plugins.split("\n")
if "all" in split_plugins:
enabled_plugins = ["all"]
else:
for plugin in split_plugins:
if not plugin:
continue
enabled_plugins.append(plugin)
except KeyError:
pass
# If the plugins for the entrypoint_group have been loaded return them
cached_plugins = loaded_filter_plugins.get(entrypoint_group)
if cached_plugins:
return cached_plugins
plugins = set()
for entry_point in pkg_resources.iter_entry_points(group=entrypoint_group):
plugin_class = entry_point.load()
plugin_instance = plugin_class()
if "all" in enabled_plugins or plugin_instance.name in enabled_plugins:
plugins.add(plugin_instance)
loaded_filter_plugins[entrypoint_group] = list(plugins)
return plugins | 0.000682 |
def keystroke_model():
"""Generates a 2-state model with lognormal emissions and frequency smoothing"""
model = Pohmm(n_hidden_states=2,
init_spread=2,
emissions=['lognormal', 'lognormal'],
smoothing='freq',
init_method='obs',
thresh=1)
return model | 0.005698 |
def cont_moments_cv(cont,
flt_epsilon=1.19209e-07,
dbl_epsilon=2.2204460492503131e-16):
"""Compute the moments of a contour
The moments are computed in the same way as they are computed
in OpenCV's `contourMoments` in `moments.cpp`.
Parameters
----------
cont: array of shape (N,2)
The contour for which to compute the moments.
flt_epsilon: float
The value of ``FLT_EPSILON`` in OpenCV/gcc.
dbl_epsilon: float
The value of ``DBL_EPSILON`` in OpenCV/gcc.
Returns
-------
moments: dict
A dictionary of moments. If the moment `m00` is smaller
than half of `flt_epsilon`, `None` is returned.
"""
# Make sure we have no unsigned integers
if np.issubdtype(cont.dtype, np.unsignedinteger):
cont = cont.astype(np.int)
xi = cont[:, 0]
yi = cont[:, 1]
xi_1 = np.roll(xi, -1)
yi_1 = np.roll(yi, -1)
xi_12 = xi_1**2
yi_12 = yi_1**2
xi2 = xi**2
yi2 = yi**2
dxy = xi_1 * yi - xi * yi_1
xii_1 = xi_1 + xi
yii_1 = yi_1 + yi
a00 = np.sum(dxy)
a10 = np.sum(dxy * xii_1)
a01 = np.sum(dxy * yii_1)
a20 = np.sum(dxy * (xi_1 * xii_1 + xi2))
a11 = np.sum(dxy * (xi_1 * (yii_1 + yi_1) + xi * (yii_1 + yi)))
a02 = np.sum(dxy * (yi_1 * yii_1 + yi2))
a30 = np.sum(dxy * xii_1 * (xi_12 + xi2))
a03 = np.sum(dxy * yii_1 * (yi_12 + yi2))
a21 = np.sum(dxy * (xi_12 * (3 * yi_1 + yi) + 2 *
xi * xi_1 * yii_1 + xi2 * (yi_1 + 3 * yi)))
a12 = np.sum(dxy * (yi_12 * (3 * xi_1 + xi) + 2 *
yi * yi_1 * xii_1 + yi2 * (xi_1 + 3 * xi)))
if abs(a00) > flt_epsilon:
db1_2 = 0.5
db1_6 = 0.16666666666666666666666666666667
db1_12 = 0.083333333333333333333333333333333
db1_24 = 0.041666666666666666666666666666667
db1_20 = 0.05
db1_60 = 0.016666666666666666666666666666667
if a00 < 0:
db1_2 *= -1
db1_6 *= -1
db1_12 *= -1
db1_24 *= -1
db1_20 *= -1
db1_60 *= -1
m = dict(m00=a00 * db1_2,
m10=a10 * db1_6,
m01=a01 * db1_6,
m20=a20 * db1_12,
m11=a11 * db1_24,
m02=a02 * db1_12,
m30=a30 * db1_20,
m21=a21 * db1_60,
m12=a12 * db1_60,
m03=a03 * db1_20,
)
if m["m00"] > dbl_epsilon:
# Center of gravity
cx = m["m10"]/m["m00"]
cy = m["m01"]/m["m00"]
else:
cx = 0
cy = 0
# central second order moments
m["mu20"] = m["m20"] - m["m10"]*cx
m["mu11"] = m["m11"] - m["m10"]*cy
m["mu02"] = m["m02"] - m["m01"]*cy
m["mu30"] = m["m30"] - cx*(3*m["mu20"] + cx*m["m10"])
m["mu21"] = m["m21"] - cx*(2*m["mu11"] + cx*m["m01"]) - cy*m["mu20"]
m["mu12"] = m["m12"] - cy*(2*m["mu11"] + cy*m["m10"]) - cx*m["mu02"]
m["mu03"] = m["m03"] - cy*(3*m["mu02"] + cy*m["m01"])
return m
else:
return None | 0.000314 |
def GetCpuUsedMs(self):
'''Retrieves the number of milliseconds during which the virtual machine
has used the CPU. This value includes the time used by the guest
operating system and the time used by virtualization code for tasks for this
virtual machine. You can combine this value with the elapsed time
(VMGuestLib_GetElapsedMs) to estimate the effective virtual machine
CPU speed. This value is a subset of elapsedMs.'''
counter = c_uint64()
ret = vmGuestLib.VMGuestLib_GetCpuUsedMs(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value | 0.008535 |
def lookup(self, module_name):
"""Searches for a file providing given module.
Returns the normalized module id and path of the file.
"""
for search_path in self._paths:
module_path = os.path.join(search_path, module_name)
new_module_name, module_file = self._lookup(module_path, module_name)
if module_file:
return new_module_name, module_file
return None, None | 0.006608 |
def table(name, auth=None, eager=True):
"""Returns a given table for the given user."""
auth = auth or []
dynamodb = boto.connect_dynamodb(*auth)
table = dynamodb.get_table(name)
return Table(table=table, eager=eager) | 0.004202 |
def unzoom_all(self, event=None):
""" zoom out full data range """
if len(self.conf.zoom_lims) > 0:
self.conf.zoom_lims = [self.conf.zoom_lims[0]]
self.unzoom(event) | 0.00995 |
async def get_ltd_product(session, slug=None, url=None):
"""Get the product resource (JSON document) from the LSST the Docs API.
Parameters
----------
session : `aiohttp.ClientSession`
Your application's aiohttp client session.
See http://aiohttp.readthedocs.io/en/stable/client.html.
slug : `str`, optional
Slug identfying the product. This is the same as the subdomain.
For example, ``'ldm-151'`` is the slug for ``ldm-151.lsst.io``.
A full product URL can be provided instead, see ``url``.
url : `str`, optional
The full LTD Keeper URL for the product resource. For example,
``'https://keeper.lsst.codes/products/ldm-151'``. The ``slug``
can be provided instead.
Returns
-------
product : `dict`
Product dataset. See
https://ltd-keeper.lsst.io/products.html#get--products-(slug)
for fields.
"""
if url is None:
url = 'https://keeper.lsst.codes/products/{}'.format(slug)
async with session.get(url) as response:
data = await response.json()
return data | 0.000898 |
def SetValue(self, identifier, value):
"""Sets a value by identifier.
Args:
identifier (str): case insensitive unique identifier for the value.
value (object): value.
Raises:
TypeError: if the identifier is not a string type.
"""
if not isinstance(identifier, py2to3.STRING_TYPES):
raise TypeError('Identifier not a string type.')
identifier = identifier.lower()
self._values[identifier] = value | 0.004444 |
def filter_by_col_value(self, column_name,
min_val=None, max_val=None):
"""filters sheet/table by column.
The routine returns the serial-numbers with min_val <= values >= max_val
in the selected column.
Args:
column_name (str): column name.
min_val (int): minimum value of serial number.
max_val (int): maximum value of serial number.
Returns:
pandas.DataFrame
"""
sheet = self.table
identity = self.db_sheet_cols.id
exists_col_number = self.db_sheet_cols.exists
exists = sheet.loc[:, exists_col_number] > 0
if min_val is not None and max_val is not None:
criterion1 = sheet.loc[:, column_name] >= min_val
criterion2 = sheet.loc[:, column_name] <= max_val
sheet = sheet[criterion1 & criterion2 & exists]
elif min_val is not None or max_val is not None:
if min_val is not None:
criterion = sheet.loc[:, column_name] >= min_val
if max_val is not None:
criterion = sheet.loc[:, column_name] <= max_val
# noinspection PyUnboundLocalVariable
sheet = sheet[criterion & exists]
else:
sheet = sheet[exists]
return sheet.loc[:, identity].values.astype(int) | 0.002911 |
def DOM_setOuterHTML(self, nodeId, outerHTML):
"""
Function path: DOM.setOuterHTML
Domain: DOM
Method name: setOuterHTML
Parameters:
Required arguments:
'nodeId' (type: NodeId) -> Id of the node to set markup for.
'outerHTML' (type: string) -> Outer HTML markup to set.
No return value.
Description: Sets node HTML markup, returns new node id.
"""
assert isinstance(outerHTML, (str,)
), "Argument 'outerHTML' must be of type '['str']'. Received type: '%s'" % type(
outerHTML)
subdom_funcs = self.synchronous_command('DOM.setOuterHTML', nodeId=nodeId,
outerHTML=outerHTML)
return subdom_funcs | 0.044207 |
def get_subject_with_file_validation(jwt_bu64, cert_path):
"""Same as get_subject_with_local_validation() except that the signing certificate
is read from a local PEM file."""
cert_obj = d1_common.cert.x509.deserialize_pem_file(cert_path)
return get_subject_with_local_validation(jwt_bu64, cert_obj) | 0.006349 |
def transform_item(self, item):
"""
Transforms JSON object
"""
obj = {
'id': item['primaryId'],
'label': item['symbol'],
'full_name': item['name'],
'type': item['soTermId'],
'taxon': {'id': item['taxonId']},
}
if 'synonyms' in item:
obj['synonyms'] = item['synonyms']
if 'crossReferenceIds' in item:
obj['xrefs'] = [self._normalize_id(x) for x in item['crossReferenceIds']]
# TODO: synonyms
# TODO: genomeLocations
# TODO: geneLiteratureUrl
return obj | 0.004808 |
def qn_to_qubo(expr):
"""Convert Sympy's expr to QUBO.
Args:
expr: Sympy's quadratic expression with variable `q0`, `q1`, ...
Returns:
[[float]]: Returns QUBO matrix.
"""
try:
import sympy
except ImportError:
raise ImportError("This function requires sympy. Please install it.")
assert type(expr) == sympy.Add
to_i = lambda s: int(str(s)[1:])
max_i = max(map(to_i, expr.free_symbols)) + 1
qubo = [[0.] * max_i for _ in range(max_i)]
for arg in expr.args:
syms = arg.free_symbols
assert len(syms) <= 2
if len(syms) == 2:
assert type(arg) == sympy.Mul
i, j = list(map(to_i, syms))
if i > j:
i, j = j, i
if i == j:
if len(arg.args) == 2:
qubo[i][i] = float(arg.args[0])
elif len(arg.args) == 1:
qubo[i][i] = 1.0
else:
raise ValueError(f"Too many args! arg.args = {arg.args}")
continue
if len(arg.args) == 3:
qubo[i][j] = float(arg.args[0])
elif len(arg.args) == 2:
qubo[i][j]
if len(syms) == 1:
if len(arg.args) == 2:
assert type(arg) == sympy.Mul
i = to_i(next(iter(syms)))
qubo[i][i] = float(arg.args[0])
elif len(arg.args) == 1:
qubo[i][i] = 1.0
else:
raise ValueError(f"Too many args! arg.args = {arg.args}")
return qubo | 0.001877 |
def get_protein_dict(cif_data):
""" Parse cif_data dict for a subset of its data.
Notes
-----
cif_data dict contains all the data from the .cif file, with values as strings.
This function returns a more 'human readable' dictionary of key-value pairs.
The keys have simpler (and still often more descriptive!) names, and the values are not restricted to being strings.
To add more key-value pairs to the protein_dict, follow the patterns used in this function.
Add the key and youre name for it to mmcif_data_names.
Will it need further parsing, like with the dates in the function below?
If the value is not a string, add it to a list of data-types at the end of the function.
More information on what key-value pairs can be obtained can be gleaned by examining cif_data and/or by viewing the
mmcif resource on the PDB website: http://mmcif.wwpdb.org/docs/pdb_to_pdbx_correspondences.html
WARNING: Do not alter the keys of protein_dict without caution.
The keys of protein_dict MUST match the column names of the Protein model in the protgraph database.
Parameters
----------
cif_data : dict
Key/value pairs taken directly from a .cif file.
Output of the function dict_from_mmcif.
Returns
-------
protein_dict : dict
A dictionary containing a parsed subset of the data in cif_data.
The keys have the same name as fields in the Protein model.
"""
# Dictionary relating the keys of protein_dict (column names in Protein model) to the keys of cif_data.
mmcif_data_names = {
'keywords': '_struct_keywords.text',
'header': '_struct_keywords.pdbx_keywords',
'space_group': '_symmetry.space_group_name_H-M',
'experimental_method': '_exptl.method',
'crystal_growth': '_exptl_crystal_grow.pdbx_details',
'resolution': '_refine.ls_d_res_high',
'r_value_obs': '_refine.ls_R_factor_obs',
'atoms_protein': '_refine_hist.pdbx_number_atoms_protein',
'atoms_solvent': '_refine_hist.number_atoms_solvent',
'atoms_ligand': '_refine_hist.pdbx_number_atoms_ligand',
'atoms_nucleic_acid': '_refine_hist.pdbx_number_atoms_nucleic_acid',
'atoms_total': '_refine_hist.number_atoms_total',
'title': '_struct.title',
'pdb_descriptor': '_struct.pdbx_descriptor',
'model_details': '_struct.pdbx_model_details',
'casp_flag': '_struct.pdbx_CASP_flag',
'model_type_details': '_struct.pdbx_model_type_details',
'ncbi_taxonomy': '_entity_src_nat.pdbx_ncbi_taxonomy_id',
'ncbi_taxonomy_gene': '_entity_src_gen.pdbx_gene_src_ncbi_taxonomy_id',
'ncbi_taxonomy_host_org': '_entity_src_gen.pdbx_host_org_ncbi_taxonomy_id',
}
# Set up initial protein_dict.
protein_dict = {}
for column_name, cif_name in mmcif_data_names.items():
try:
data = cif_data[cif_name]
except IndexError:
data = None
except KeyError:
data = None
protein_dict[column_name] = data
# These entries are modified from the mmcif dictionary.
# There may be many revision dates in cif_data. We save the original deposition, release and last_modified dates.
# If there are many dates, they will be in a list in cif_data, otherwise it's one date in a string
# Is there a tidier way to do this?
if isinstance(cif_data['_database_PDB_rev.date_original'], str):
protein_dict['deposition_date'] = cif_data['_database_PDB_rev.date_original']
else:
protein_dict['deposition_date'] = cif_data['_database_PDB_rev.date_original'][0]
if isinstance(cif_data['_database_PDB_rev.date'], str):
protein_dict['release_date'] = cif_data['_database_PDB_rev.date']
protein_dict['last_modified_date'] = cif_data['_database_PDB_rev.date']
else:
protein_dict['release_date'] = cif_data['_database_PDB_rev.date'][0]
protein_dict['last_modified_date'] = cif_data['_database_PDB_rev.date'][-1]
# crystal_growth should be a string or None
crystal_growth = protein_dict['crystal_growth']
if type(crystal_growth) == list and len(crystal_growth) >= 1:
protein_dict['crystal_growth'] = crystal_growth[0]
else:
protein_dict['crystal_growth'] = None
# taxonomy data types should be ints, not lists
taxonomy_keys = ['ncbi_taxonomy', 'ncbi_taxonomy_gene', 'ncbi_taxonomy_host_org']
for taxonomy_key in taxonomy_keys:
if protein_dict[taxonomy_key]:
if type(protein_dict[taxonomy_key]) == list:
try:
protein_dict[taxonomy_key] = int(protein_dict[taxonomy_key][0])
except ValueError or IndexError:
protein_dict[taxonomy_key] = None
# Convert data types from strings to their correct data type.
ints = ['atoms_ligand', 'atoms_nucleic_acid', 'atoms_protein', 'atoms_solvent', 'atoms_total']
floats = ['r_value_obs', 'resolution']
dates = ['deposition_date', 'release_date', 'last_modified_date']
for k, v in protein_dict.items():
if v:
if v == '?' or v == 'None' or v == '.':
protein_dict[k] = None
elif k in ints:
protein_dict[k] = int(v)
elif k in floats:
protein_dict[k] = float(v)
elif k in dates:
protein_dict[k] = datetime.datetime.strptime(v, '%Y-%m-%d')
# Parse awkward strings from cif_data.
elif type(v) == str:
v = v.replace('loop_', '')
v = v.replace(' # ', '')
if v[0] == v[-1] == '\'':
protein_dict[k] = v[1:-1]
return protein_dict | 0.003297 |
def prose_wc(args):
"""Processes data provided to print a count object, or update a file.
Args:
args: an ArgumentParser object returned by setup()
"""
if args.file is None:
return 1
if args.split_hyphens:
INTERSTITIAL_PUNCTUATION.append(re.compile(r'-'))
content = args.file.read().decode('utf-8')
filename = args.file.name
body = strip_frontmatter(content)
parsed = markdown_to_text(body)
result = wc(filename, body, parsed=parsed,
is_jekyll=(body != content))
if (args.update and
filename != '_stdin_' and
result['counts']['type'] == 'jekyll'):
update_file(filename, result, content, args.indent)
else:
_mockable_print({
'yaml': yaml.safe_dump(result, default_flow_style=False,
indent=args.indent),
'json': json.dumps(result, indent=args.indent),
'default': default_dump(result),
}[args.format])
return 0 | 0.001974 |
def monitors(self):
"""
Return a new raw REST interface to monitors resources
:rtype: :py:class:`ns1.rest.monitoring.Monitors`
"""
import ns1.rest.monitoring
return ns1.rest.monitoring.Monitors(self.config) | 0.007843 |
def ustr(text):
"""
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
"""
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text | 0.003195 |
def isready(self):
"""
Used to synchronize the python engine object with the back-end engine. Sends 'isready' and waits for 'readyok.'
"""
self.put('isready')
while True:
text = self.stdout.readline().strip()
if text == 'readyok':
return text | 0.009288 |
def import_module(mod_str):
"""
inspired by post on stackoverflow
:param name: import path string like 'netshowlib.linux.provider_discovery'
:return: module matching the import statement
"""
_module = __import__(mod_str)
_mod_parts = mod_str.split('.')
for _mod_part in _mod_parts[1:]:
_module = getattr(_module, _mod_part)
return _module | 0.002604 |
def iterall(cls, target, branch, build, flags, platform=None):
"""Return an iterable for all available builds matching a particular build type"""
flags = BuildFlags(*flags)
for task in BuildTask.iterall(build, branch, flags, platform):
yield cls(target, branch, task, flags, platform) | 0.009375 |
def ping(self, event):
"""Perform a ping to measure client <-> node latency"""
self.log('Client ping received:', event.data, lvl=verbose)
response = {
'component': 'hfos.ui.clientmanager',
'action': 'pong',
'data': [event.data, time() * 1000]
}
self.fire(send(event.client.uuid, response)) | 0.00545 |
def _create_fulltext_query(self):
"""Support the json-server fulltext search with a broad LIKE filter."""
filter_by = []
if 'q' in request.args:
columns = flat_model(model_tree(self.__class__.__name__, self.model_cls))
for q in request.args.getlist('q'):
filter_by += ['{col}::like::%{q}%'.format(col=col, q=q) for col in columns]
return filter_by | 0.009501 |
def parse_config_input_output(args=sys.argv):
"""Parse the args using the config_file, input_dir, output_dir pattern
Args:
args: sys.argv
Returns:
The populated namespace object from parser.parse_args().
Raises:
TBD
"""
parser = argparse.ArgumentParser(
description='Process the input files using the given config')
parser.add_argument(
'config_file',
help='Configuration file.',
metavar='FILE', type=extant_file)
parser.add_argument(
'input_dir',
help='Directory containing the input files.',
metavar='DIR', type=extant_dir)
parser.add_argument(
'output_dir',
help='Directory where the output files should be saved.',
metavar='DIR', type=extant_dir)
return parser.parse_args(args[1:]) | 0.001202 |
def shelter_find(self, **kwargs):
"""
shelter.find wrapper. Returns a generator of shelter record dicts
matching your search criteria.
:rtype: generator
:returns: A generator of shelter record dicts.
:raises: :py:exc:`petfinder.exceptions.LimitExceeded` once you have
reached the maximum number of records your credentials allow you
to receive.
"""
def shelter_find_parser(root, has_records):
"""
The parser that is used with the ``_do_autopaginating_api_call``
method for auto-pagination.
:param lxml.etree._Element root: The root Element in the response.
:param dict has_records: A dict that we track the loop state in.
dicts are passed by references, which is how this works.
"""
for shelter in root.find("shelters"):
has_records["has_records"] = True
record = {}
for field in shelter:
record[field.tag] = field.text
yield record
return self._do_autopaginating_api_call(
"shelter.find", kwargs, shelter_find_parser
) | 0.001635 |
def reset(self):
"""Resets the iterator to the beginning of the data."""
self.curr_idx = 0
random.shuffle(self.idx)
for buck in self.data:
np.random.shuffle(buck) | 0.009709 |
def redirect_legacy_content(request):
"""Redirect from legacy /content/id/version to new /contents/uuid@version.
Handles collection context (book) as well.
"""
routing_args = request.matchdict
objid = routing_args['objid']
objver = routing_args.get('objver')
filename = routing_args.get('filename')
id, version = _convert_legacy_id(objid, objver)
if not id:
raise httpexceptions.HTTPNotFound()
# We always use 301 redirects (HTTPMovedPermanently) here
# because we want search engines to move to the newer links
# We cache these redirects only briefly because, even when versioned,
# legacy collection versions don't include the minor version,
# so the latest archive url could change
if filename:
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
args = dict(id=id, version=version, filename=filename)
cursor.execute(SQL['get-resourceid-by-filename'], args)
try:
res = cursor.fetchone()
resourceid = res[0]
raise httpexceptions.HTTPMovedPermanently(
request.route_path('resource', hash=resourceid,
ignore=u'/{}'.format(filename)),
headers=[("Cache-Control", "max-age=60, public")])
except TypeError: # None returned
raise httpexceptions.HTTPNotFound()
ident_hash = join_ident_hash(id, version)
params = request.params
if params.get('collection'): # page in book
objid, objver = split_legacy_hash(params['collection'])
book_uuid, book_version = _convert_legacy_id(objid, objver)
if book_uuid:
id, ident_hash = \
_get_page_in_book(id, version, book_uuid, book_version)
raise httpexceptions.HTTPMovedPermanently(
request.route_path('content', ident_hash=ident_hash),
headers=[("Cache-Control", "max-age=60, public")]) | 0.000486 |
def is_promisc(ip, fake_bcast="ff:ff:00:00:00:00", **kargs):
"""Try to guess if target is in Promisc mode. The target is provided by its ip.""" # noqa: E501
responses = srp1(Ether(dst=fake_bcast) / ARP(op="who-has", pdst=ip), type=ETH_P_ARP, iface_hint=ip, timeout=1, verbose=0, **kargs) # noqa: E501
return responses is not None | 0.002899 |
def Send(self, command_id, data=b'', size=0):
"""Send/buffer FileSync packets.
Packets are buffered and only flushed when this connection is read from. All
messages have a response from the device, so this will always get flushed.
Args:
command_id: Command to send.
data: Optional data to send, must set data or size.
size: Optionally override size from len(data).
"""
if data:
if not isinstance(data, bytes):
data = data.encode('utf8')
size = len(data)
if not self._CanAddToSendBuffer(len(data)):
self._Flush()
buf = struct.pack(b'<2I', self.id_to_wire[command_id], size) + data
self.send_buffer[self.send_idx:self.send_idx + len(buf)] = buf
self.send_idx += len(buf) | 0.004802 |
def sample(args):
"""
%prog sample bedfile sizesfile
Sample bed file and remove high-coverage regions.
When option --targetsize is used, this program uses a differnent mode. It
first calculates the current total bases from all ranges and then compare to
targetsize, if more, then sample down as close to targetsize as possible.
Selection via --raindrop has the effect of making coverage even. Selected
reads have the property that their end points are not within a certain
window from one another. One sweep goes from left to right, the other in
the reverse direction.
"""
import random
from jcvi.assembly.coverage import Coverage
p = OptionParser(sample.__doc__)
p.add_option("--raindrop", default=0, type="int",
help="Raindrop selection, ignores all other options")
p.add_option("--max", default=10, type="int",
help="Max depth allowed")
p.add_option("--targetsize", type="int",
help="Sample bed file to get target base number")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, sizesfile = args
pf = bedfile.rsplit(".", 1)[0]
raindrop = opts.raindrop
# Raindrop method
if raindrop:
bed = Bed(bedfile)
forward = []
for b in bed:
if not forward or abs(b.start - forward[-1].start) >= raindrop:
forward.append(b)
reverse = []
bed.sort(key=lambda x: -x.end)
for b in bed:
if not reverse or abs(b.end - reverse[-1].end) >= raindrop:
reverse.append(b)
for tag, L in zip(("forward", "reverse"), (forward, reverse)):
logging.debug("Selected {0} features in {1} direction, span: {2}".\
format(len(L), tag, sum(x.span for x in L)))
selected = Bed()
selected.extend(set(forward + reverse))
selected.print_to_file(opts.outfile, sorted=True)
return
targetsize = opts.targetsize
if targetsize:
bed = Bed(bedfile)
samplebed = pf + ".sample.bed"
fw = open(samplebed, "w")
nfeats = len(bed)
nbases = bed.sum(unique=False)
targetfeats = int(round(nfeats * targetsize / nbases))
sub_bed = random.sample(bed, targetfeats)
for b in sub_bed:
print(b, file=fw)
logging.debug("File written to `{0}`.".format(samplebed))
return
c = Coverage(bedfile, sizesfile)
coveragefile = c.filename
samplecoveragefile = pf + ".sample.coverage"
fw = open(samplecoveragefile, "w")
fp = open(coveragefile)
for row in fp:
seqid, start, end, cov = row.split()
cov = int(cov)
if cov <= opts.max:
fw.write(row)
fw.close()
samplebedfile = pf + ".sample.bed"
cmd = "intersectBed -a {0} -b {1} -wa -u".format(bedfile, samplecoveragefile)
sh(cmd, outfile=samplebedfile)
logging.debug("Sampled bedfile written to `{0}`.".format(samplebedfile)) | 0.001621 |
def get_method_docstring(cls, method_name):
"""
return method docstring
if method docstring is empty we get docstring from parent
:param method:
:type method:
:return:
:rtype:
"""
method = getattr(cls, method_name, None)
if method is None:
return
docstrign = inspect.getdoc(method)
if docstrign is None:
for base in cls.__bases__:
docstrign = get_method_docstring(base, method_name)
if docstrign:
return docstrign
else:
return None
return docstrign | 0.001727 |
def _detect_timezone():
'''
Get timezone as set by the system
'''
default_timezone = 'America/New_York'
locale_code = locale.getdefaultlocale()
return default_timezone if not locale_code[0] else \
str(pytz.country_timezones[locale_code[0][-2:]][0]) | 0.003571 |
def splitText(text):
""" Split text into sub segments of size not bigger than MAX_SEGMENT_SIZE. """
segments = []
remaining_text = __class__.cleanSpaces(text)
while len(remaining_text) > __class__.MAX_SEGMENT_SIZE:
cur_text = remaining_text[:__class__.MAX_SEGMENT_SIZE]
# try to split at punctuation
split_idx = __class__.findLastCharIndexMatching(cur_text,
# https://en.wikipedia.org/wiki/Unicode_character_property#General_Category
lambda x: unicodedata.category(x) in ("Ps", "Pe", "Pi", "Pf", "Po"))
if split_idx is None:
# try to split at whitespace
split_idx = __class__.findLastCharIndexMatching(cur_text,
lambda x: unicodedata.category(x).startswith("Z"))
if split_idx is None:
# try to split at anything not a letter or number
split_idx = __class__.findLastCharIndexMatching(cur_text,
lambda x: not (unicodedata.category(x)[0] in ("L", "N")))
if split_idx is None:
# split at the last char
split_idx = __class__.MAX_SEGMENT_SIZE - 1
new_segment = cur_text[:split_idx + 1].rstrip()
segments.append(new_segment)
remaining_text = remaining_text[split_idx + 1:].lstrip(string.whitespace + string.punctuation)
if remaining_text:
segments.append(remaining_text)
return segments | 0.010417 |
def one(self, filter_by=None):
"""
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of table), and return True to keep record, or False to skip it.
Example : .one(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Record instance if one and only one record is found. Else raises.
Raises
------
RecordDoesNotExistError if no record is found
MultipleRecordsReturnedError if multiple records are found
"""
return Queryset(self, records=self._records.values()).one(filter_by=filter_by) | 0.005682 |
def add_floatspin(self, setting):
'''add a floating point spin control'''
from wx.lib.agw.floatspin import FloatSpin
tab = self.panel(setting.tab)
default = setting.value
(minv, maxv) = setting.range
ctrl = FloatSpin(tab, -1,
value = default,
min_val = minv,
max_val = maxv,
increment = setting.increment)
if setting.format is not None:
ctrl.SetFormat(setting.format)
if setting.digits is not None:
ctrl.SetDigits(setting.digits)
self._add_input(setting, ctrl, value=default) | 0.014881 |
def _convert_to_dataset_class(df, dataset_class, expectations_config=None, autoinspect_func=None):
"""
Convert a (pandas) dataframe to a great_expectations dataset, with (optional) expectations_config
"""
if expectations_config is not None:
# Cast the dataframe into the new class, and manually initialize expectations according to the provided configuration
df.__class__ = dataset_class
df._initialize_expectations(expectations_config)
else:
# Instantiate the new Dataset with default expectations
try:
df = dataset_class(df, autoinspect_func=autoinspect_func)
except:
raise NotImplementedError(
"read_csv requires a Dataset class that can be instantiated from a Pandas DataFrame")
return df | 0.007453 |
def format_value(value,number_format):
"Convert number to string using a style string"
style,sufix,scale = decode_format(number_format)
fmt = "{0:" + style + "}" + sufix
return fmt.format(scale * value) | 0.018182 |
def _child_as_list(self, pk, ck):
"""Returns a list of values from the child FlatterDict instance
with string based integer keys.
:param str pk: The parent key
:param str ck: The child key
:rtype: list
"""
return [self._values[pk][ck][k]
for k in sorted(self._values[pk][ck].keys(),
key=lambda x: int(x))] | 0.004866 |
def _add_notification(self, message):
""" add a notification in the notification queue of a user
"""
sess = cherrypy.session
username = sess.get(SESSION_KEY, None)
if username not in self.notifications:
self.notifications[username] = []
self.notifications[username].append(message) | 0.005865 |
def update(self, friendly_name=values.unset, unique_name=values.unset,
email=values.unset, cc_emails=values.unset, status=values.unset,
verification_code=values.unset, verification_type=values.unset,
verification_document_sid=values.unset, extension=values.unset,
call_delay=values.unset):
"""
Update the HostedNumberOrderInstance
:param unicode friendly_name: A human readable description of this resource.
:param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder.
:param unicode email: Email.
:param unicode cc_emails: A list of emails.
:param HostedNumberOrderInstance.Status status: The Status of this HostedNumberOrder.
:param unicode verification_code: A verification code.
:param HostedNumberOrderInstance.VerificationType verification_type: Verification Type.
:param unicode verification_document_sid: Verification Document Sid
:param unicode extension: Digits to dial after connecting the verification call.
:param unicode call_delay: The number of seconds, between 0 and 60, to delay before initiating the verification call.
:returns: Updated HostedNumberOrderInstance
:rtype: twilio.rest.preview.hosted_numbers.hosted_number_order.HostedNumberOrderInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'UniqueName': unique_name,
'Email': email,
'CcEmails': serialize.map(cc_emails, lambda e: e),
'Status': status,
'VerificationCode': verification_code,
'VerificationType': verification_type,
'VerificationDocumentSid': verification_document_sid,
'Extension': extension,
'CallDelay': call_delay,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return HostedNumberOrderInstance(self._version, payload, sid=self._solution['sid'], ) | 0.006737 |
def graph_nodes_sorted(self):
""" Returns an (ascending) sorted list of graph's nodes (name is used as key).
Returns
-------
:any:`list`
Description #TODO check
"""
return sorted(self._graph.nodes(), key=lambda _: repr(_)) | 0.013699 |
def negative_binomial_like(x, mu, alpha):
R"""
Negative binomial log-likelihood.
The negative binomial
distribution describes a Poisson random variable whose rate
parameter is gamma distributed. PyMC's chosen parameterization is
based on this mixture interpretation.
.. math::
f(x \mid \mu, \alpha) = \frac{\Gamma(x+\alpha)}{x! \Gamma(\alpha)} (\alpha/(\mu+\alpha))^\alpha (\mu/(\mu+\alpha))^x
:Parameters:
- `x` : x = 0,1,2,...
- `mu` : mu > 0
- `alpha` : alpha > 0
.. note::
- :math:`E[x]=\mu`
- In Wikipedia's parameterization,
:math:`r=\alpha`,
:math:`p=\mu/(\mu+\alpha)`,
:math:`\mu=rp/(1-p)`
"""
alpha = np.array(alpha)
if (alpha > 1e10).any():
if (alpha > 1e10).all():
# Return Poisson when alpha gets very large
return flib.poisson(x, mu)
# Split big and small dispersion values
big = alpha > 1e10
return flib.poisson(x[big], mu[big]) + flib.negbin2(x[big - True],
mu[big - True], alpha[big - True])
return flib.negbin2(x, mu, alpha) | 0.002549 |
def set_codes(self, codes, reject=False):
"""
Set the accepted or rejected codes codes list.
:param codes: A list of the response codes.
:param reject: If True, the listed codes will be rejected, and
the conversion will format as "-"; if False,
only the listed codes will be accepted, and the
conversion will format as "-" for all the
others.
"""
self.codes = set(codes)
self.reject = reject | 0.003697 |
def global_exception_handler(handler):
"""add a callback for when an exception goes uncaught in any greenlet
:param handler:
the callback function. must be a function taking 3 arguments:
- ``klass`` the exception class
- ``exc`` the exception instance
- ``tb`` the traceback object
:type handler: function
Note also that the callback is only held by a weakref, so if all other refs
to the function are lost it will stop handling greenlets' exceptions
"""
if not hasattr(handler, "__call__"):
raise TypeError("exception handlers must be callable")
log.info("setting a new global exception handler")
state.global_exception_handlers.append(weakref.ref(handler))
return handler | 0.001319 |
def parse_type(self, hdat, dataobj=None):
'''
Parses the dtype out of the header data or the array, depending on which is given; if both,
then the header-data overrides the array; if neither, then np.float32.
'''
try: dataobj = dataobj.dataobj
except Exception: pass
dtype = np.asarray(dataobj).dtype if dataobj else self.default_type()
if hdat and 'type' in hdat: dtype = np.dtype(hdat['type'])
elif hdat and 'dtype' in hdat: dtype = np.dtype(hdat['dtype'])
return dtype | 0.017889 |
def read_file(filename, print_error=True):
"""Returns the contents of a file."""
try:
for encoding in ['utf-8', 'latin1']:
try:
with io.open(filename, encoding=encoding) as fp:
return fp.read()
except UnicodeDecodeError:
pass
except IOError as exception:
if print_error:
print(exception, file=sys.stderr)
return None | 0.002273 |
def read(self, b=-1):
"""Keep reading from source stream until either the source stream is done
or the requested number of bytes have been obtained.
:param int b: number of bytes to read
:return: All bytes read from wrapped stream
:rtype: bytes
"""
remaining_bytes = b
data = io.BytesIO()
while True:
try:
chunk = to_bytes(self.__wrapped__.read(remaining_bytes))
except ValueError:
if self.__wrapped__.closed:
break
raise
if not chunk:
break
data.write(chunk)
remaining_bytes -= len(chunk)
if remaining_bytes <= 0:
break
return data.getvalue() | 0.003755 |
def get_identifier(self, idx):
"""Return an identifier for the data at index `idx`
.. versionchanged:: 0.4.2
indexing starts at 1 instead of 0
"""
name = self._get_cropped_file_names()[idx]
return "{}:{}:{}".format(self.identifier, name, idx + 1) | 0.006689 |
def get_times(self):
"""
Return a list of occurrance times of the events
:return: list of times
"""
if not self.n:
return list()
ret = list()
for item in self._event_times:
ret += list(self.__dict__[item])
return ret + list(matrix(ret) - 1e-6) | 0.00597 |
def parse(self, limit=None):
"""
Override Source.parse()
Parses version and interaction information from CTD
Args:
:param limit (int, optional) limit the number of rows processed
Returns:
:return None
"""
if limit is not None:
LOG.info("Only parsing first %d rows", limit)
LOG.info("Parsing files...")
# pub_map = dict()
# file_path = '/'.join((self.rawdir,
# self.static_files['publications']['file']))
# if os.path.exists(file_path) is True:
# pub_map = self._parse_publication_file(
# self.static_files['publications']['file']
# )
if self.test_only:
self.test_mode = True
self.geno = Genotype(self.graph)
self.pathway = Pathway(self.graph)
self._parse_ctd_file(
limit, self.files['chemical_disease_interactions']['file'])
self._parse_ctd_file(limit, self.files['gene_pathway']['file'])
self._parse_ctd_file(limit, self.files['gene_disease']['file'])
self._parse_curated_chem_disease(limit)
LOG.info("Done parsing files.")
return | 0.001671 |
def get_single_child_from_xml(elem, tag):
"""
Get a single child tag from an XML element.
Similar to "elem.find(tag)", but warns if there are multiple child tags with the given name.
"""
children = elem.findall(tag)
if not children:
return None
if len(children) > 1:
logging.warning('Tag "%s" has more than one child tags with name "%s" in input file, '
'ignoring all but the first.',
elem.tag, tag)
return children[0] | 0.005837 |
def timediff(time):
"""Return the difference in seconds between now and the given time."""
now = datetime.datetime.utcnow()
diff = now - time
diff_sec = diff.total_seconds()
return diff_sec | 0.004785 |
def wait(self, pattern, seconds=None):
""" Searches for an image pattern in the given region, given a specified timeout period
Functionally identical to find(). If a number is passed instead of a pattern,
just waits the specified number of seconds.
Sikuli supports OCR search with a text parameter. This does not (yet).
"""
if isinstance(pattern, (int, float)):
if pattern == FOREVER:
while True:
time.sleep(1) # Infinite loop
time.sleep(pattern)
return None
if seconds is None:
seconds = self.autoWaitTimeout
findFailedRetry = True
timeout = time.time() + seconds
while findFailedRetry:
while True:
match = self.exists(pattern)
if match:
return match
if time.time() >= timeout:
break
path = pattern.path if isinstance(pattern, Pattern) else pattern
findFailedRetry = self._raiseFindFailed("Could not find pattern '{}'".format(path))
if findFailedRetry:
time.sleep(self._repeatWaitTime)
return None | 0.005677 |
def _resolve_dependencies(self, cur, dependencies):
"""
Function checks if dependant packages are installed in DB
"""
list_of_deps_ids = []
_list_of_deps_unresolved = []
_is_deps_resolved = True
for k, v in dependencies.items():
pgpm.lib.utils.db.SqlScriptsHelper.set_search_path(cur, self._pgpm_schema_name)
cur.execute("SELECT _find_schema('{0}', '{1}')"
.format(k, v))
pgpm_v_ext = tuple(cur.fetchone()[0][1:-1].split(','))
try:
list_of_deps_ids.append(int(pgpm_v_ext[0]))
except:
pass
if not pgpm_v_ext[0]:
_is_deps_resolved = False
_list_of_deps_unresolved.append("{0}: {1}".format(k, v))
return _is_deps_resolved, list_of_deps_ids, _list_of_deps_unresolved | 0.00451 |
def censor(input_text):
""" Returns the input string with profanity replaced with a random string
of characters plucked from the censor_characters pool.
"""
ret = input_text
words = get_words()
for word in words:
curse_word = re.compile(re.escape(word), re.IGNORECASE)
cen = "".join(get_censor_char() for i in list(word))
ret = curse_word.sub(cen, ret)
return ret | 0.002404 |
def _process_callback(self, statefield):
"""
Exchange the auth code for actual credentials,
then redirect to the originally requested page.
"""
# retrieve session and callback variables
try:
session_csrf_token = session.get('oidc_csrf_token')
state = _json_loads(urlsafe_b64decode(request.args['state'].encode('utf-8')))
csrf_token = state['csrf_token']
code = request.args['code']
except (KeyError, ValueError):
logger.debug("Can't retrieve CSRF token, state, or code",
exc_info=True)
return True, self._oidc_error()
# check callback CSRF token passed to IdP
# against session CSRF token held by user
if csrf_token != session_csrf_token:
logger.debug("CSRF token mismatch")
return True, self._oidc_error()
# make a request to IdP to exchange the auth code for OAuth credentials
flow = self._flow_for_request()
credentials = flow.step2_exchange(code)
id_token = credentials.id_token
if not self._is_id_token_valid(id_token):
logger.debug("Invalid ID token")
if id_token.get('hd') != current_app.config[
'OIDC_GOOGLE_APPS_DOMAIN']:
return True, self._oidc_error(
"You must log in with an account from the {0} domain."
.format(current_app.config['OIDC_GOOGLE_APPS_DOMAIN']),
self.WRONG_GOOGLE_APPS_DOMAIN)
return True, self._oidc_error()
# store credentials by subject
# when Google is the IdP, the subject is their G+ account number
self.credentials_store[id_token['sub']] = credentials.to_json()
# Retrieve the extra statefield data
try:
response = self.extra_data_serializer.loads(state[statefield])
except BadSignature:
logger.error('State field was invalid')
return True, self._oidc_error()
# set a persistent signed cookie containing the ID token
# and redirect to the final destination
self._set_cookie_id_token(id_token)
return False, response | 0.001336 |
def distance(self, e):
"""
Distance between this interval and e -- number of nucleotides.
We consider intervals that overlap to have a distance of 0 to each other.
The distance between two intervals on different chromosomes is considered
undefined, and causes an exception to be raised.
:return: the distance from this GenomicInterval to e.
:param e: the other genomic interval to find the distance to.
:raise GenomicIntervalError: if self and e are on different chromosomes.
"""
if e.chrom != self.chrom:
raise GenomicIntervalError("cannot get distance from " + str(self) +
" to " + str(e) + " as they're on " +
"different chromosomes")
dist = 0
if not e.intersects(self):
dist = max(self.start, e.start) - min(self.end, e.end)
return dist | 0.00344 |
def _create_product_map(self):
"""Create a map of all products produced by this or a dependency."""
self._product_map = {}
for dep in self._tile.dependencies:
try:
dep_tile = IOTile(os.path.join('build', 'deps', dep['unique_id']))
except (ArgumentError, EnvironmentError):
raise BuildError("Could not find required dependency", name=dep['name'])
self._add_products(dep_tile)
self._add_products(self._tile, show_all=True) | 0.007634 |
def execute(self, input_data):
''' Execute Method '''
# View on all the meta data files in the sample
fields = ['filename', 'md5', 'length', 'customer', 'import_time', 'type_tag']
view = {key:input_data['meta'][key] for key in fields}
return view | 0.013937 |
def load_phenofile(self, file, indices=[], names=[], sample_file=False):
"""Load phenotype data from phenotype file
Whitespace delimited, FAMID, INDID, VAR1, [VAR2], etc
Users can specify phenotypes of interest via indices and names.
Indices are 1 based and start with the first variable. names
must match name specified in the header (case is ignored). """
file.seek(0)
self.phenotype_names = []
if file:
header = ["", "", ""]
header = file.readline().strip().upper().split()
line_number = 0
valid_indices = [int(x) for x in indices]
valid_names = []
for name in names:
if name.strip() != "":
valid_names.append(name)
# Rows ignored because family/individual missing from our pedigree data
ignored_data = []
# We can accept a default phenotype column if we only have 3 columns
if len(header) == 3:
if len(valid_names) + len(valid_indices) == 0:
valid_indices.append(1)
if header[0].upper() == "FID":
phenotype_names = header[2:]
for name in valid_names:
try:
valid_indices.append(phenotype_names.index(name.upper()) + 1)
except:
raise InvalidSelection(
"The name, %s, was not found in %s" % (name, file.name)
)
line_number = 1
if len(valid_indices) > 0 and max(valid_indices) > (len(header)-2):
raise InvalidSelection(
"The index, %s, is larger than the number of entries in the file, %s:%s" %
(max(valid_indices), file.name, line_number)
)
for i in xrange(0, len(valid_indices)):
self.phenotype_names.append(phenotype_names[valid_indices[i]-1])
# Dump the second line for sample_file==True
if sample_file:
file.readline()
line_number += 1
# if we don't have a header, we'll create dummy names
else:
file.seek(0)
if len(valid_names) > 0:
raise MalformedInputFile(
"Names only work with phenotype files with headers: %s for file %s" %
(",".join(names), file.name)
)
if len(valid_indices) > 0 and max(valid_indices) > (len(header)-2):
raise InvalidSelection(
"The index, %s, is larger than the number of entries in the file, %s:%s" %
(max(valid_indices), file.name, line_number)
)
self.phenotype_names = []
for i in xrange(0, len(valid_indices)):
self.phenotype_names.append("Pheno-%s" % (valid_indices[i]))
pheno_count = len(valid_indices)
self.phenotype_data = numpy.empty((pheno_count, len(self.pedigree_data)))
self.phenotype_data.fill(PhenoCovar.missing_encoding)
for line in file:
line_number += 1
words = line.split()
iid = ":".join(words[0:2])
# Indexes are 1 based...silly humans
if len(valid_indices) > 0 and max(valid_indices) > (len(words)-2):
raise InvalidSelection(
"The index, %s, is larger than the number of entries in the file, %s:%s" %
(max(valid_indices), file.name, line_number)
)
pidx = 0
for idx in valid_indices:
try:
pheno = float(words[1+idx])
if iid not in self.pedigree_data:
ignored_data.append(iid)
else:
self.phenotype_data[pidx][self.pedigree_data[iid]] = pheno
pidx += 1
except:
raise MalformedInputFile(
("Invalid input found in phenotype file on line: %s:%d. \n"+
"The line in question looks like this: \n--> %s") %
(file.name, line_number, line.strip())
)
if self.phenotype_data.shape[1] == len(ignored_data):
raise NoMatchedPhenoCovars("No matching individuals were found in the phenotype file") | 0.005524 |
def add_data(self, addr, data):
"""! @brief Add a block of data to be programmed.
@note Programming does not start until the method program() is called.
@param self
@param addr Base address of the block of data passed to this method. The entire block of
data must be contained within the flash memory region associated with this instance.
@param data Data to be programmed. Should be a list of byte values.
@exception FlashFailure Address range of added data is outside the address range of the
flash region associated with the builder.
@exception ValueError Attempt to add overlapping data.
"""
# Ignore empty data.
if len(data) == 0:
return
# Sanity check
if not self.flash.region.contains_range(start=addr, length=len(data)):
raise FlashFailure("Flash address range 0x%x-0x%x is not contained within region '%s'" %
(addr, addr + len(data) - 1, self.flash.region.name))
# Add operation to list
self.flash_operation_list.append(_FlashOperation(addr, data))
self.buffered_data_size += len(data)
# Keep list sorted
self.flash_operation_list = sorted(self.flash_operation_list, key=lambda operation: operation.addr)
# Verify this does not overlap
prev_flash_operation = None
for operation in self.flash_operation_list:
if prev_flash_operation is not None:
if prev_flash_operation.addr + len(prev_flash_operation.data) > operation.addr:
raise ValueError("Error adding data - Data at 0x%x..0x%x overlaps with 0x%x..0x%x"
% (prev_flash_operation.addr, prev_flash_operation.addr + len(prev_flash_operation.data),
operation.addr, operation.addr + len(operation.data)))
prev_flash_operation = operation | 0.008603 |
def launch(self, command_line, dependencies_description=None, env=[], remote_staging=[], job_config=None):
"""
Queue up the execution of the supplied `command_line` on the remote
server. Called launch for historical reasons, should be renamed to
enqueue or something like that.
**Parameters**
command_line : str
Command to execute.
"""
launch_params = dict(command_line=command_line, job_id=self.job_id)
submit_params_dict = submit_params(self.destination_params)
if submit_params_dict:
launch_params['params'] = json_dumps(submit_params_dict)
if dependencies_description:
launch_params['dependencies_description'] = json_dumps(dependencies_description.to_dict())
if env:
launch_params['env'] = json_dumps(env)
if remote_staging:
launch_params['remote_staging'] = json_dumps(remote_staging)
if job_config and 'touch_outputs' in job_config:
# message clients pass the entire job config
launch_params['submit_extras'] = json_dumps({'touch_outputs': job_config['touch_outputs']})
if job_config and self.setup_handler.local:
# Setup not yet called, job properties were inferred from
# destination arguments. Hence, must have Pulsar setup job
# before queueing.
setup_params = _setup_params_from_job_config(job_config)
launch_params['setup_params'] = json_dumps(setup_params)
return self._raw_execute("submit", launch_params) | 0.003135 |
def cmd(send, msg, args):
"""Handles quotes.
Syntax: {command} <number|nick>, !quote --add <quote> --nick <nick> (--approve), !quote --list, !quote --delete <number>, !quote --edit <number> <quote> --nick <nick>
!quote --search (--offset <num>) <number>
"""
session = args['db']
parser = arguments.ArgParser(args['config'])
parser.add_argument('--approve', action='store_true')
parser.add_argument('--nick', nargs='?')
parser.add_argument('--offset', nargs='?', type=int, default=0)
parser.add_argument('quote', nargs='*')
group = parser.add_mutually_exclusive_group()
group.add_argument('--list', action='store_true')
group.add_argument('--add', action='store_true')
group.add_argument('--delete', '--remove', type=int)
group.add_argument('--edit', type=int)
group.add_argument('--search', nargs='*')
if not msg:
send(do_get_quote(session))
return
try:
cmdargs = parser.parse_args(msg)
except arguments.ArgumentException as e:
send(str(e))
return
if cmdargs.add:
if args['type'] == 'privmsg':
send("You want everybody to know about your witty sayings, right?")
else:
if cmdargs.nick is None:
send('You must specify a nick.')
elif not cmdargs.quote:
send('You must specify a quote.')
else:
isadmin = args['is_admin'](args['nick']) or not args['config']['feature']['quoteapprove']
approved = cmdargs.approve or not args['config']['feature']['quoteapprove']
do_add_quote(cmdargs.nick, " ".join(cmdargs.quote), session, isadmin, approved, send, args)
elif cmdargs.list:
send(do_list_quotes(session, args['config']['core']['url']))
elif cmdargs.delete:
send(do_delete_quote(args, session, cmdargs.delete))
elif cmdargs.edit:
if args['is_admin'](args['nick']):
send(do_update_quote(session, cmdargs.edit, cmdargs.nick, cmdargs.quote))
else:
send("You aren't allowed to edit quotes. Please ask a bot admin to do it")
elif cmdargs.search:
if cmdargs.approve or cmdargs.nick:
send("Invalid option for --search")
else:
send(search_quote(session, cmdargs.offset, cmdargs.search))
else:
if msg.isdigit():
send(do_get_quote(session, int(msg)))
else:
if not re.match(args['config']['core']['nickregex'], msg):
send('Invalid nick %s.' % msg)
else:
send(get_quotes_nick(session, msg)) | 0.002656 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.