text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def iterators(self, frequency=None):
"""
Returns the iterators (i.e. subject_id, visit_id) that the pipeline
iterates over
Parameters
----------
frequency : str | None
A selected data frequency to use to determine which iterators are
required. If None, all input frequencies of the pipeline are
assumed
"""
iterators = set()
if frequency is None:
input_freqs = list(self.input_frequencies)
else:
input_freqs = [frequency]
for freq in input_freqs:
iterators.update(self.study.FREQUENCIES[freq])
return iterators | 0.002946 |
def uri(host='localhost', port=5432, dbname='postgres', user='postgres',
password=None):
"""Return a PostgreSQL connection URI for the specified values.
:param str host: Host to connect to
:param int port: Port to connect on
:param str dbname: The database name
:param str user: User to connect as
:param str password: The password to use, None for no password
:return str: The PostgreSQL connection URI
"""
if port:
host = '%s:%s' % (host, port)
if password:
return 'postgresql://%s:%s@%s/%s' % (user, password, host, dbname)
return 'postgresql://%s@%s/%s' % (user, host, dbname) | 0.001536 |
def addLogHandler(func):
"""
Add a custom log handler.
@param func: a function object with prototype (level, object, category,
message) where level is either ERROR, WARN, INFO, DEBUG, or
LOG, and the rest of the arguments are strings or None. Use
getLevelName(level) to get a printable name for the log level.
@type func: a callable function
@raises TypeError: if func is not a callable
"""
if not callable(func):
raise TypeError("func must be callable")
if func not in _log_handlers:
_log_handlers.append(func) | 0.001626 |
def get_version():
"""
parse __init__.py for version number instead of importing the file
see http://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package
"""
version_file = os.path.join(PKG, 'lib/version.py')
ver_str_line = open(version_file, "rt").read()
version_regex = r'^__version__ = [\'"]([^\'"]*)[\'"]'
mo = re.search(version_regex, ver_str_line, re.M)
if mo:
return mo.group(1)
else:
raise RuntimeError('Unable to find version string in %s.'
% (version_file,)) | 0.003431 |
def op(self):
"""Returns the Instruction object corresponding to the op for the node else None"""
if 'type' not in self.data_dict or self.data_dict['type'] != 'op':
raise QiskitError("The node %s is not an op node" % (str(self)))
return self.data_dict.get('op') | 0.010101 |
def _parseElfHeader(self, data):
"""Returns the elf header"""
ehdr = self.__classes.EHDR.from_buffer(data)
return EhdrData(header=ehdr) | 0.012579 |
def _observe_block(self, change):
""" A change handler for the 'objects' list of the Include.
If the object is initialized objects which are removed will be
unparented and objects which are added will be reparented. Old
objects will be destroyed if the 'destroy_old' flag is True.
"""
if self.is_initialized:
if change['type'] == 'update':
old_block = change['oldvalue']
old_parent = old_block.parent
for c in self.children:
old_parent.child_removed(c)
new_block = change['value']
new_block.parent.insert_children(new_block, self.children) | 0.002853 |
def save_model(self, request, obj, form, change):
"""
If the ``ACCOUNTS_APPROVAL_REQUIRED`` setting is ``True``,
send a notification email to the user being saved if their
``active`` status has changed to ``True``.
If the ``ACCOUNTS_VERIFICATION_REQUIRED`` setting is ``True``,
send a verification email instead.
"""
must_send_verification_mail_after_save = False
if change and settings.ACCOUNTS_APPROVAL_REQUIRED:
if obj.is_active and not User.objects.get(id=obj.id).is_active:
if settings.ACCOUNTS_VERIFICATION_REQUIRED:
# Accounts verification requires an inactive account
obj.is_active = False
# The token generated by send_verification_mail()
# must match the _saved_ User object,
# so postpone send_verification_mail() until later
must_send_verification_mail_after_save = True
else:
send_approved_mail(request, obj)
super(UserProfileAdmin, self).save_model(request, obj, form, change)
if must_send_verification_mail_after_save:
user = User.objects.get(id=obj.id)
send_verification_mail(request, user, "signup_verify") | 0.001516 |
def get_repository_ids_by_asset(self, asset_id):
"""Gets the list of ``Repository`` ``Ids`` mapped to an ``Asset``.
arg: asset_id (osid.id.Id): ``Id`` of an ``Asset``
return: (osid.id.IdList) - list of repository ``Ids``
raise: NotFound - ``asset_id`` is not found
raise: NullArgument - ``asset_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
mgr = self._get_provider_manager('REPOSITORY', local=True)
lookup_session = mgr.get_asset_lookup_session(proxy=self._proxy)
lookup_session.use_federated_repository_view()
asset = lookup_session.get_asset(asset_id)
id_list = []
for idstr in asset._my_map['assignedRepositoryIds']:
id_list.append(Id(idstr))
return IdList(id_list) | 0.001885 |
def _user_thread_main(self, target):
"""Main entry point for the thread that will run user's code."""
try:
# Wait for GLib main loop to start running before starting user code.
while True:
if self._gobject_mainloop is not None and self._gobject_mainloop.is_running():
# Main loop is running, we should be ready to make bluez DBus calls.
break
# Main loop isn't running yet, give time back to other threads.
time.sleep(0)
# Run user's code.
self._return_code = target()
# Assume good result (0 return code) if none is returned.
if self._return_code is None:
self._return_code = 0
# Signal the main loop to exit.
self._gobject_mainloop.quit()
except Exception as ex:
# Something went wrong. Raise the exception on the main thread to
# exit.
self._exception = sys.exc_info()
self._gobject_mainloop.quit() | 0.004655 |
def get_ccle_cna():
"""Get CCLE CNA
-2 = homozygous deletion
-1 = hemizygous deletion
0 = neutral / no change
1 = gain
2 = high level amplification
"""
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
gene_list = body.get('gene_list')
cell_lines = body.get('cell_lines')
cna = cbio_client.get_ccle_cna(gene_list, cell_lines)
res = {'cna': cna}
return res | 0.002028 |
def closeStreamToFile(self) :
"""Appends the remaining commited lines and closes the stream. If no stream is active, raises a ValueError"""
if self.streamBuffer is None :
raise ValueError("Commit lines is only for when you are streaming to a file")
for i in xrange(len(self.streamBuffer)) :
self.streamBuffer[i] = str(self.streamBuffer[i])
self.streamFile.write('\n'.join(self.streamBuffer))
self.streamFile.close()
self.streamFile = None
self.writeRate = None
self.streamBuffer = None
self.keepInMemory = True | 0.033582 |
def log(context):
"""See history"""
context.obj.find_repo_type()
if context.obj.vc_name == 'git':
format = ("--pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset "
"%s %Cgreen(%cr) %C(bold blue)<%an>%Creset'")
context.obj.call(['git', 'log', '--graph', format,
'--abbrev-commit', '--stat'])
elif context.obj.vc_name == 'hg':
template = (
'"changeset: {rev}:{node|short} {tags}\n'
' summary: {desc|firstline|fill68|tabindent|tabindent}"')
context.obj.call(['hg', 'log', '-G', '--template', template]) | 0.00161 |
def registration_function_for_optionable(self, optionable_class):
"""Returns a function for registering options on the given scope."""
self._assert_not_frozen()
# TODO(benjy): Make this an instance of a class that implements __call__, so we can
# docstring it, and so it's less weird than attatching properties to a function.
def register(*args, **kwargs):
kwargs['registering_class'] = optionable_class
self.register(optionable_class.options_scope, *args, **kwargs)
# Clients can access the bootstrap option values as register.bootstrap.
register.bootstrap = self.bootstrap_option_values()
# Clients can access the scope as register.scope.
register.scope = optionable_class.options_scope
return register | 0.007937 |
def iiscgi(application):
"""A specialized version of the reference WSGI-CGI server to adapt to Microsoft IIS quirks.
This is not a production quality interface and will behave badly under load.
"""
try:
from wsgiref.handlers import IISCGIHandler
except ImportError:
print("Python 3.2 or newer is required.")
if not __debug__:
warnings.warn("Interactive debugging and other persistence-based processes will not work.")
IISCGIHandler().run(application) | 0.040512 |
def get_means_and_scales(self):
"""
Gets the mean and scales for normal approximating parameters
"""
return self.optim.parameters[::2], np.exp(self.optim.parameters[1::2]) | 0.009852 |
def get(self, request):
"""
method called on GET request on this view
:param django.http.HttpRequest request: The current request object:
:return: The rendering of ``cas_server/serviceValidate.xml`` if no errors is raised,
the rendering or ``cas_server/serviceValidateError.xml`` otherwise.
:rtype: django.http.HttpResponse
"""
# define the class parameters
self.request = request
self.service = request.GET.get('service')
self.ticket = request.GET.get('ticket')
self.pgt_url = request.GET.get('pgtUrl')
self.renew = True if request.GET.get('renew') else False
# service and ticket parameter are mandatory
if not self.service or not self.ticket:
logger.warning("ValidateService: missing ticket or service")
return ValidateError(
u'INVALID_REQUEST',
u"you must specify a service and a ticket"
).render(request)
else:
try:
# search the ticket in the database
self.ticket, proxies = self.process_ticket()
# prepare template rendering context
params = {
'username': self.ticket.username(),
'attributes': self.ticket.attributs_flat(),
'proxies': proxies,
'auth_date': self.ticket.user.last_login.replace(microsecond=0).isoformat(),
'is_new_login': 'true' if self.ticket.renew else 'false'
}
# if pgtUrl is set, require https or localhost
if self.pgt_url and (
self.pgt_url.startswith("https://") or
re.match(r"^http://(127\.0\.0\.1|localhost)(:[0-9]+)?(/.*)?$", self.pgt_url)
):
return self.process_pgturl(params)
else:
logger.info(
"ValidateService: ticket %s validated for user %s on service %s." % (
self.ticket.value,
self.ticket.user.username,
self.ticket.service
)
)
logger.debug(
"ValidateService: User attributs are:\n%s" % (
pprint.pformat(self.ticket.attributs),
)
)
return render(
request,
"cas_server/serviceValidate.xml",
params,
content_type="text/xml; charset=utf-8"
)
except ValidateError as error:
logger.warning(
"ValidateService: validation error: %s %s" % (error.code, error.msg)
)
return error.render(request) | 0.002696 |
def _directory (self):
"""The directory for this AitConfig."""
if self._filename is None:
return os.path.join(self._ROOT_DIR, 'config')
else:
return os.path.dirname(self._filename) | 0.013158 |
def setup_subparser(
self, func=None, setup_as=None, insert_at=None, interprete=True,
epilog_sections=None, overwrite=False, append_epilog=True,
return_parser=False, name=None, **kwargs):
"""
Create a subparser with the name of the given function
Parameters are the same as for the :meth:`setup_args` function, other
parameters are parsed to the :meth:`add_subparsers` method if (and only
if) this method has not already been called.
Parameters
----------
%(FuncArgParser.setup_args.parameters)s
return_parser: bool
If True, the create parser is returned instead of the function
name: str
The name of the created parser. If None, the function name is used
and underscores (``'_'``) are replaced by minus (``'-'``)
``**kwargs``
Any other parameter that is passed to the add_parser method that
creates the parser
Other Parameters
----------------
Returns
-------
FuncArgParser or %(FuncArgParser.setup_args.returns)s
If return_parser is True, the created subparser is returned
Examples
--------
Use this method as a decorator::
>>> from funcargparser import FuncArgParser
>>> parser = FuncArgParser()
>>> @parser.setup_subparser
... def my_func(my_argument=None):
... pass
>>> args = parser.parse_args('my-func -my-argument 1'.split())
"""
def setup(func):
if self._subparsers_action is None:
raise RuntimeError(
"No subparsers have yet been created! Run the "
"add_subparsers method first!")
# replace underscore by '-'
name2use = name
if name2use is None:
name2use = func.__name__.replace('_', '-')
kwargs.setdefault('help', docstrings.get_summary(
docstrings.dedents(inspect.getdoc(func))))
parser = self._subparsers_action.add_parser(name2use, **kwargs)
parser.setup_args(
func, setup_as=setup_as, insert_at=insert_at,
interprete=interprete, epilog_sections=epilog_sections,
overwrite=overwrite, append_epilog=append_epilog)
return func, parser
if func is None:
return lambda f: setup(f)[0]
else:
return setup(func)[int(return_parser)] | 0.000778 |
def perform(self):
"""
Performs all stored actions.
"""
if self._driver.w3c:
self.w3c_actions.perform()
else:
for action in self._actions:
action() | 0.008811 |
def _assign(self, assignment): # type: (Assignment) -> None
"""
Adds an Assignment to _assignments and _positive or _negative.
"""
self._assignments.append(assignment)
self._register(assignment) | 0.008511 |
def _clear_expired_zones(self):
"""
Update zone status for all expired zones.
"""
zones = []
for z in list(self._zones.keys()):
zones += [z]
for z in zones:
if self._zones[z].status != Zone.CLEAR and self._zone_expired(z):
self._update_zone(z, Zone.CLEAR) | 0.005797 |
def find_elements_by_css_selector(self, css_selector):
"""
Finds elements by css selector.
:Args:
- css_selector - CSS selector string, ex: 'a.nav#home'
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
::
elements = driver.find_elements_by_css_selector('.foo')
"""
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector) | 0.004 |
def _common_rational_period(rational_periods: List[fractions.Fraction]
) -> fractions.Fraction:
"""Finds the least common integer multiple of some fractions.
The solution is the smallest positive integer c such that there
exists integers n_k satisfying p_k * n_k = c for all k.
"""
assert rational_periods, "no well-defined solution for an empty list"
common_denom = _lcm(p.denominator for p in rational_periods)
int_periods = [p.numerator * common_denom // p.denominator
for p in rational_periods]
int_common_period = _lcm(int_periods)
return fractions.Fraction(int_common_period, common_denom) | 0.001473 |
def _calc_eddy_time(self):
""" estimate the eddy turn-over time in days """
ens = 0.
for j in range(self.nz):
ens = .5*self.Hi[j] * self.spec_var(self.wv2*self.ph[j])
return 2.*pi*np.sqrt( self.H / ens.sum() ) / 86400 | 0.015267 |
def define_from_values(cls, xdtu, ydtu, zdtu, xdtu_0, ydtu_0, zdtu_0):
"""Define class object from from provided values.
Parameters
----------
xdtu : float
XDTU fits keyword value.
ydtu : float
YDTU fits keyword value.
zdtu : float
ZDTU fits keyword value.
xdtu_0 : float
XDTU_0 fits keyword value.
ydtu_0 : float
YDTU_0 fits keyword value.
zdtu_0 : float
ZDTU_0 fits keyword value.
"""
self = DtuConfiguration()
# define DTU variables
self.xdtu = xdtu
self.ydtu = ydtu
self.zdtu = zdtu
self.xdtu_0 = xdtu_0
self.ydtu_0 = ydtu_0
self.zdtu_0 = zdtu_0
return self | 0.002538 |
def get_updates(self, offset=None, limit=None, timeout=None, allowed_updates=None):
"""
Use this method to receive incoming updates using long polling (wiki). An Array of Update objects is returned.
Notes1. This method will not work if an outgoing webhook is set up.2. In order to avoid getting duplicate updates, recalculate offset after each server response.
https://core.telegram.org/bots/api#getupdates
Optional keyword parameters:
:param offset: Identifier of the first update to be returned. Must be greater by one than the highest among the identifiers of previously received updates. By default, updates starting with the earliest unconfirmed update are returned. An update is considered confirmed as soon as getUpdates is called with an offset higher than its update_id. The negative offset can be specified to retrieve updates starting from -offset update from the end of the updates queue. All previous updates will forgotten.
:type offset: int
:param limit: Limits the number of updates to be retrieved. Values between 1—100 are accepted. Defaults to 100.
:type limit: int
:param timeout: Timeout in seconds for long polling. Defaults to 0, i.e. usual short polling. Should be positive, short polling should be used for testing purposes only.
:type timeout: int
:param allowed_updates: List the types of updates you want your bot to receive. For example, specify [“message”, “edited_channel_post”, “callback_query”] to only receive updates of these types. See Update for a complete list of available update types. Specify an empty list to receive all updates regardless of type (default). If not specified, the previous setting will be used.Please note that this parameter doesn't affect updates created before the call to the getUpdates, so unwanted updates may be received for a short period of time.
:type allowed_updates: list of str|unicode
Returns:
:return: An Array of Update objects is returned
:rtype: list of pytgbot.api_types.receivable.updates.Update
"""
assert_type_or_raise(offset, None, int, parameter_name="offset")
assert_type_or_raise(limit, None, int, parameter_name="limit")
assert_type_or_raise(timeout, None, int, parameter_name="timeout")
assert_type_or_raise(allowed_updates, None, list, parameter_name="allowed_updates")
result = self.do("getUpdates", offset=offset, limit=limit, timeout=timeout, allowed_updates=allowed_updates)
if self.return_python_objects:
logger.debug("Trying to parse {data}".format(data=repr(result)))
from pytgbot.api_types.receivable.updates import Update
try:
return Update.from_array_list(result, list_level=1)
except TgApiParseException:
logger.debug("Failed parsing as api_type Update", exc_info=True)
# end try
# no valid parsing so far
raise TgApiParseException("Could not parse result.") # See debug log for details!
# end if return_python_objects
return result | 0.007081 |
def as_ompenv(cls, obj):
"""Convert an object into a OmpEnv"""
if isinstance(obj, cls): return obj
if obj is None: return cls()
return cls(**obj) | 0.022599 |
def save_prep(cls, instance_or_instances):
"""Preprocess the object before the object is saved. This
automatically gets called when the save method gets called.
"""
instances = make_obj_list(instance_or_instances)
tokens = set(cls.objects.get_available_tokens(
count=len(instances),
token_length=cls.token_length
))
for instance in instances:
if not instance.token:
instance.token = tokens.pop()
super(AbstractTokenModel, cls).save_prep(
instance_or_instances=instances
) | 0.003273 |
def get_lines_from_to(strings: List[str],
firstlinestart: str,
list_of_lastline_starts: Iterable[Optional[str]]) \
-> List[str]:
"""
Takes a list of ``strings``. Returns a list of strings FROM
``firstlinestart`` (inclusive) TO the first of ``list_of_lastline_starts``
(exclusive).
To search to the end of the list, use ``list_of_lastline_starts = []``.
To search to a blank line, use ``list_of_lastline_starts = [None]``
"""
start_index = find_line_beginning(strings, firstlinestart)
# log.debug("start_index: {}", start_index)
if start_index == -1:
return []
end_offset = None # itself a valid slice index
for lls in list_of_lastline_starts:
possible_end_offset = find_line_beginning(strings[start_index:], lls)
# log.debug("lls {!r} -> possible_end_offset {}",
# lls, possible_end_offset)
if possible_end_offset != -1: # found one
if end_offset is None or possible_end_offset < end_offset:
end_offset = possible_end_offset
end_index = None if end_offset is None else (start_index + end_offset)
# log.debug("end_index: {}", end_index)
return strings[start_index:end_index] | 0.000789 |
def get_street_from_xy(self, **kwargs):
"""Obtain a list of streets around the specified point.
Args:
latitude (double): Latitude in decimal degrees.
longitude (double): Longitude in decimal degrees.
radius (int): Radius (in meters) of the search.
lang (str): Language code (*es* or *en*).
Returns:
Status boolean and parsed response (list[Street]), or message string
in case of error.
"""
# Endpoint parameters
params = {
'coordinateX': kwargs.get('longitude'),
'coordinateY': kwargs.get('latitude'),
'Radius': kwargs.get('radius'),
'cultureInfo': util.language_code(kwargs.get('lang'))
}
# Request
result = self.make_request('geo', 'get_street_from_xy', **params)
# Funny endpoint, no status code
if not util.check_result(result, 'site'):
return False, 'UNKNOWN ERROR'
# Parse
values = util.response_list(result, 'site')
return True, [emtype.Street(**a) for a in values] | 0.002676 |
def flatten_pages(self, pages, level=1):
"""Recursively flattens pages data structure into a one-dimensional data structure"""
flattened = []
for page in pages:
if type(page) is list:
flattened.append(
{
'file': page[0],
'title': page[1],
'level': level,
})
if type(page) is dict:
if type(list(page.values())[0]) is str:
flattened.append(
{
'file': list(page.values())[0],
'title': list(page.keys())[0],
'level': level,
})
if type(list(page.values())[0]) is list:
flattened.extend(
self.flatten_pages(
list(page.values())[0],
level + 1)
)
return flattened | 0.003543 |
def getoptT(X, W, Y, Z, S, M_E, E, m0, rho):
''' Perform line search
'''
iter_max = 20
norm2WZ = np.linalg.norm(W, ord='fro')**2 + np.linalg.norm(Z, ord='fro')**2
f = np.zeros(iter_max + 1)
f[0] = F_t(X, Y, S, M_E, E, m0, rho)
t = -1e-1
for i in range(iter_max):
f[i + 1] = F_t(X + t * W, Y + t * Z, S, M_E, E, m0, rho)
if f[i + 1] - f[0] <= 0.5 * t * norm2WZ:
return t
t /= 2
return t | 0.00211 |
def intermediary_to_schema(tables, relationships, output):
""" Transforms and save the intermediary representation to the file chosen. """
dot_file = _intermediary_to_dot(tables, relationships)
graph = AGraph()
graph = graph.from_string(dot_file)
extension = output.split('.')[-1]
graph.draw(path=output, prog='dot', format=extension) | 0.005587 |
def nb_persons(self, role = None):
"""
Returns the number of persons contained in the entity.
If ``role`` is provided, only the entity member with the given role are taken into account.
"""
if role:
if role.subroles:
role_condition = np.logical_or.reduce([self.members_role == subrole for subrole in role.subroles])
else:
role_condition = self.members_role == role
return self.sum(role_condition)
else:
return np.bincount(self.members_entity_id) | 0.010309 |
def _init_client(self, from_archive=False):
"""Init client"""
return JenkinsClient(self.url, self.blacklist_jobs, self.detail_depth,
self.sleep_time,
archive=self.archive, from_archive=from_archive) | 0.007326 |
def save(self, path):
"""
Writes file to a particular location
This won't work for cloud environments like Google's App Engine, use with caution
ensure to catch exceptions so you can provide informed feedback.
prestans does not mask File IO exceptions so your handler can respond better.
"""
file_handle = open(path, 'wb')
file_handle.write(self._file_contents)
file_handle.close() | 0.008772 |
def add_path(self, nodes, t=None):
"""Add a path at time t.
Parameters
----------
nodes : iterable container
A container of nodes.
t : snapshot id (default=None)
See Also
--------
add_path, add_cycle
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
"""
nlist = list(nodes)
interaction = zip(nlist[:-1], nlist[1:])
self.add_interactions_from(interaction, t) | 0.003839 |
def _parse_multifile(self, desired_type: Type[T], obj: PersistedObject,
parsing_plan_for_children: Dict[str, ParsingPlan], logger: Logger,
options: Dict[str, Dict[str, Any]]) -> T:
"""
First parse all children from the parsing plan, then calls _build_object_from_parsed_children
:param desired_type:
:param obj:
:param parsing_plan_for_children:
:param logger:
:param options:
:return:
"""
pass | 0.011429 |
def _parse_int(value, default=None):
"""
Attempt to cast *value* into an integer, returning *default* if it fails.
"""
if value is None:
return default
try:
return int(value)
except ValueError:
print "Couldn't cast value to `int`."
return default | 0.003311 |
def _get_channel_state_statelessly(self, grpc_channel, channel_id):
"""
We get state of the channel (nonce, amount, unspent_amount)
We do it by securely combine information from the server and blockchain
https://github.com/singnet/wiki/blob/master/multiPartyEscrowContract/MultiPartyEscrow_stateless_client.md
"""
server = self._get_channel_state_from_server (grpc_channel, channel_id)
blockchain = self._get_channel_state_from_blockchain( channel_id)
if (server["current_nonce"] == blockchain["nonce"]):
unspent_amount = blockchain["value"] - server["current_signed_amount"]
else:
unspent_amount = None # in this case we cannot securely define unspent_amount yet
return (server["current_nonce"], server["current_signed_amount"], unspent_amount) | 0.012629 |
def get_exception(self):
"""Retrieve the exception"""
if self.exc_info:
try:
six.reraise(*self.exc_info)
except Exception as e:
return e | 0.009615 |
def _compute_signature(parameters, access_key_secret, method, path):
'''
Generate an API request signature. Detailed document can be found at:
https://docs.qingcloud.com/api/common/signature.html
'''
parameters['signature_method'] = 'HmacSHA256'
string_to_sign = '{0}\n{1}\n'.format(method.upper(), path)
keys = sorted(parameters.keys())
pairs = []
for key in keys:
val = six.text_type(parameters[key]).encode('utf-8')
pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
h = hmac.new(access_key_secret, digestmod=sha256)
h.update(string_to_sign)
signature = base64.b64encode(h.digest()).strip()
return signature | 0.001333 |
def remove_record(self, common_name):
"""Delete the record associated with this common name"""
bundle = self.get_files(common_name)
num_signees = len(Counter(bundle.record['signees']))
if bundle.is_ca() and num_signees > 0:
raise CertificateAuthorityInUseError(
"Authority {name} has signed {x} certificates"
.format(name=common_name, x=num_signees)
)
try:
ca_name = bundle.record['parent_ca']
ca_record = self.get_record(ca_name)
self.remove_sign_link(ca_name, common_name)
except CertNotFoundError:
pass
record_copy = dict(self.store[common_name])
del self.store[common_name]
self.save()
return record_copy | 0.002522 |
def getBirthdate(self, string=True):
"""
Returns the birthdate as string object
Parameters
----------
None
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getBirthdate()=='30 jun 1969'
True
>>> f._close()
>>> del f
"""
if string:
return self._convert_string(self.birthdate.rstrip())
else:
return datetime.strptime(self._convert_string(self.birthdate.rstrip()), "%d %b %Y") | 0.005329 |
def icanhaz(parser, token):
"""
Finds the ICanHaz template for the given name and renders it surrounded by
the requisite ICanHaz <script> tags.
"""
bits = token.contents.split()
if len(bits) not in [2, 3]:
raise template.TemplateSyntaxError(
"'icanhaz' tag takes one argument: the name/id of the template")
return ICanHazNode(bits[1]) | 0.002611 |
def get_single_generation(self, table, db='default'):
"""Creates a random generation value for a single table name"""
key = self.keygen.gen_table_key(table, db)
val = self.cache_backend.get(key, None, db)
#if local.get('in_test', None): print force_bytes(val).ljust(32), key
if val is None:
val = self.keygen.random_generator()
self.cache_backend.set(key, val, settings.MIDDLEWARE_SECONDS, db)
return val | 0.006303 |
def go_in(self, vertex):
"""
Tell the edge to go into this vertex.
Args:
vertex (Vertex): vertex to go into.
"""
if self.vertex_in:
self.vertex_in.edges_in.remove(self)
self.vertex_in = vertex
vertex.edges_in.add(self) | 0.006689 |
def get_forwarding_address_details(destination_address, api_key, callback_url=None, coin_symbol='btc'):
"""
Give a destination address and return the details of the input address
that will automatically forward to the destination address
Note: a blockcypher api_key is required for this method
"""
assert is_valid_coin_symbol(coin_symbol)
assert api_key, 'api_key required'
url = make_url(coin_symbol, 'payments')
logger.info(url)
params = {'token': api_key}
data = {
'destination': destination_address,
}
if callback_url:
data['callback_url'] = callback_url
r = requests.post(url, json=data, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS)
return get_valid_json(r) | 0.003911 |
def xdrbody(self, prefix=''):
"""Return xdr code for the body (part between braces) of big 3 types"""
body = ''
prefix += indent
if self.type == 'enum':
body = ''.join(
["%s,\n" % l.xdrout(prefix) for l in self.body[:-1]])
body += "%s\n" % self.body[-1].xdrout(prefix)
elif self.type == 'struct':
body = ''.join(["%s\n" % l.xdrout(prefix) for l in self.body])
elif self.type == 'union':
for l in self.body[1:-1]:
body += ''.join(["%scase %s:\n" % (prefix, case) \
for case in l.cases])
body += ''.join([
"%s\n" % d.xdrout(prefix + indent) for d in l.declarations
])
if self.body[-1].declarations:
body += "%sdefault:\n" % prefix + \
''.join(["%s\n" % d.xdrout(prefix + indent)
for d in self.body[-1].declarations])
return body | 0.005814 |
def trial_end(self, trial_job_id, success):
"""trial_end
Parameters
----------
trial_job_id: int
trial job id
success: bool
True if succssfully finish the experiment, False otherwise
"""
if trial_job_id in self.running_history:
if success:
cnt = 0
history_sum = 0
self.completed_avg_history[trial_job_id] = []
for each in self.running_history[trial_job_id]:
cnt += 1
history_sum += each
self.completed_avg_history[trial_job_id].append(history_sum / cnt)
self.running_history.pop(trial_job_id)
else:
logger.warning('trial_end: trial_job_id does not in running_history') | 0.006061 |
def fit(self, Z):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
Z : DictRDD containing (X, y) pairs
X - Training vector.
{array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y - Target labels
Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z
check_rdd(X, (np.ndarray, sp.spmatrix))
def mapper(X):
"""Calculate statistics for every numpy or scipy blocks."""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
mean, var = mean_variance_axis(X, axis=0)
else:
mean, var = np.mean(X, axis=0), np.var(X, axis=0)
return X.shape[0], mean, var
def reducer(a, b):
"""Calculate the combined statistics."""
n_a, mean_a, var_a = a
n_b, mean_b, var_b = b
n_ab = n_a + n_b
mean_ab = ((mean_a * n_a) + (mean_b * n_b)) / n_ab
var_ab = (((n_a * var_a) + (n_b * var_b)) / n_ab) + \
((n_a * n_b) * ((mean_b - mean_a) / n_ab) ** 2)
return (n_ab, mean_ab, var_ab)
if check_rdd_dtype(X, (sp.spmatrix)):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.n_samples_seen_, self.mean_, self.var_ = X.map(mapper).treeReduce(reducer)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self | 0.001483 |
def hex(self, value, text=None, back=None, style=None, rgb_mode=False):
""" A chained method that sets the fore color to an hex value.
Arguments:
value : Hex value to convert.
text : Text to style if not building up color codes.
back : Back color for the text.
style : Style for the text.
rgb_mode : If False, the closest extended code is used,
otherwise true color (rgb) mode is used.
"""
if rgb_mode:
try:
colrval = hex2rgb(value, allow_short=True)
except ValueError:
raise InvalidColr(value)
else:
try:
colrval = hex2term(value, allow_short=True)
except ValueError:
raise InvalidColr(value)
return self.chained(text=text, fore=colrval, back=back, style=style) | 0.00211 |
def disableTemperature(self):
"""
Specifies the device should NOT write temperature values to the FIFO, is not applied until enableFIFO is called.
:return:
"""
logger.debug("Disabling temperature sensor")
self.fifoSensorMask &= ~self.enableTemperatureMask
self._setSampleSizeBytes() | 0.011799 |
def aot_rpush(self, exit_code):
"""Push message to AOT action channel."""
if self.tcex.default_args.tc_playbook_db_type == 'Redis':
try:
self.db.rpush(self.tcex.default_args.tc_exit_channel, exit_code)
except Exception as e:
self.tcex.exit(1, 'Exception during AOT exit push ({}).'.format(e)) | 0.010989 |
def add_protein_data(proteins, pgdb, headerfields, genecentric=False,
pool_to_output=False):
"""First creates a map with all master proteins with data,
then outputs protein data dicts for rows of a tsv. If a pool
is given then only output for that pool will be shown in the
protein table."""
proteindata = create_featuredata_map(pgdb, genecentric=genecentric,
psm_fill_fun=add_psms_to_proteindata,
pgene_fill_fun=add_protgene_to_protdata,
count_fun=count_peps_psms,
pool_to_output=pool_to_output,
get_uniques=True)
dataget_fun = {True: get_protein_data_genecentric,
False: get_protein_data_pgrouped}[genecentric is not False]
firstfield = prottabledata.ACCESSIONS[genecentric]
for protein in proteins:
outprotein = {k: v for k, v in protein.items()}
outprotein[firstfield] = outprotein.pop(prottabledata.HEADER_PROTEIN)
protein_acc = protein[prottabledata.HEADER_PROTEIN]
outprotein.update(dataget_fun(proteindata, protein_acc, headerfields))
outprotein = {k: str(v) for k, v in outprotein.items()}
yield outprotein | 0.001493 |
def _best_fit_font_size(self, max_size):
"""
Return the largest whole-number point size less than or equal to
*max_size* that this fitter can fit.
"""
predicate = self._fits_inside_predicate
sizes = _BinarySearchTree.from_ordered_sequence(
range(1, int(max_size)+1)
)
return sizes.find_max(predicate) | 0.005319 |
def _scalar_type_std_res(self, counts, total, colsum, rowsum):
"""Return ndarray containing standard residuals for category values.
The shape of the return value is the same as that of *counts*.
"""
expected_counts = expected_freq(counts)
residuals = counts - expected_counts
variance = (
np.outer(rowsum, colsum)
* np.outer(total - rowsum, total - colsum)
/ total ** 3
)
return residuals / np.sqrt(variance) | 0.003929 |
def waiter(self, count, *names):
'''
Construct and return a new Waiter for events on this base.
Example:
# wait up to 3 seconds for 10 foo:bar events...
waiter = base.waiter(10,'foo:bar')
# .. fire thread that will cause foo:bar events
events = waiter.wait(timeout=3)
if events == None:
# handle the timout case...
for event in events:
# parse the events if you need...
NOTE: use with caution... it's easy to accidentally construct
race conditions with this mechanism ;)
'''
return Waiter(self, count, self.loop, *names) | 0.002878 |
def stream_download(url, target_path, verbose=False):
""" Download a large file without loading it into memory. """
response = requests.get(url, stream=True)
handle = open(target_path, "wb")
if verbose:
print("Beginning streaming download of %s" % url)
start = datetime.now()
try:
content_length = int(response.headers['Content-Length'])
content_MB = content_length/1048576.0
print("Total file size: %.2f MB" % content_MB)
except KeyError:
pass # allow Content-Length to be missing
for chunk in response.iter_content(chunk_size=512):
if chunk: # filter out keep-alive new chunks
handle.write(chunk)
if verbose:
print(
"Download completed to %s in %s" %
(target_path, datetime.now() - start)) | 0.001168 |
def run_eidos(endpoint, *args):
"""Run a given enpoint of Eidos through the command line.
Parameters
----------
endpoint : str
The class within the Eidos package to run, for instance
'apps.ExtractFromDirectory' will run
'org.clulab.wm.eidos.apps.ExtractFromDirectory'
*args
Any further arguments to be passed as inputs to the class
being run.
"""
# Make the full path to the class that should be used
call_class = '%s.%s' % (eidos_package, endpoint)
# Assemble the command line command and append optonal args
cmd = ['java', '-Xmx12G', '-cp', eip, call_class] + list(args)
logger.info('Running Eidos with command "%s"' % (' '.join(cmd)))
subprocess.call(cmd) | 0.001339 |
def usage(self):
"""
Get the usage for the remote exectuion options
:return Usage for the remote execution options
"""
# Retrieve the text for just the arguments
usage = self.parser.format_help().split("optional arguments:")[1]
# Remove any blank lines and return
return "Remote Options:" + os.linesep + \
os.linesep.join([s for s in usage.splitlines() if s]) | 0.004545 |
def day_publications(date):
"""
Returns a QuerySet of Publications that were being read on `date`.
`date` is a date tobject.
"""
readings = Reading.objects \
.filter(start_date__lte=date) \
.filter(
Q(end_date__gte=date)
|
Q(end_date__isnull=True)
)
if readings:
return Publication.objects.filter(reading__in=readings) \
.select_related('series') \
.prefetch_related('roles__creator') \
.distinct()
else:
return Publication.objects.none() | 0.002817 |
def get_default(self, node):
"""
Unless specified otherwise, intr fields are implicitly stickybit
"""
if node.inst.properties.get("intr", False):
# Interrupt is set!
# Default is implicitly stickybit, unless the mutually-exclusive
# sticky property was set instead
return not node.inst.properties.get("sticky", False)
else:
return False | 0.004587 |
def delete_lb_policy(self, lb_name, policy_name):
"""
Deletes a policy from the LoadBalancer. The specified policy must not
be enabled for any listeners.
"""
params = {
'LoadBalancerName' : lb_name,
'PolicyName' : policy_name,
}
return self.get_status('DeleteLoadBalancerPolicy', params) | 0.009615 |
def _write_wrapped_codestream(self, ofile, box):
"""Write wrapped codestream."""
# Codestreams require a bit more care.
# Am I a raw codestream?
if len(self.box) == 0:
# Yes, just write the codestream box header plus all
# of myself out to file.
ofile.write(struct.pack('>I', self.length + 8))
ofile.write(b'jp2c')
with open(self.filename, 'rb') as ifile:
ofile.write(ifile.read())
return
# OK, I'm a jp2/jpx file. Need to find out where the raw codestream
# actually starts.
offset = box.offset
if offset == -1:
if self.box[1].brand == 'jpx ':
msg = ("The codestream box must have its offset and length "
"attributes fully specified if the file type brand is "
"JPX.")
raise IOError(msg)
# Find the first codestream in the file.
jp2c = [_box for _box in self.box if _box.box_id == 'jp2c']
offset = jp2c[0].offset
# Ready to write the codestream.
with open(self.filename, 'rb') as ifile:
ifile.seek(offset)
# Verify that the specified codestream is right.
read_buffer = ifile.read(8)
L, T = struct.unpack_from('>I4s', read_buffer, 0)
if T != b'jp2c':
msg = "Unable to locate the specified codestream."
raise IOError(msg)
if L == 0:
# The length of the box is presumed to last until the end of
# the file. Compute the effective length of the box.
L = os.path.getsize(ifile.name) - ifile.tell() + 8
elif L == 1:
# The length of the box is in the XL field, a 64-bit value.
read_buffer = ifile.read(8)
L, = struct.unpack('>Q', read_buffer)
ifile.seek(offset)
read_buffer = ifile.read(L)
ofile.write(read_buffer) | 0.000972 |
def solve(self, solver=None, solverparameters=None):
"""Call a solver on the SDP relaxation. Upon successful solution, it
returns the primal and dual objective values along with the solution
matrices. It also sets these values in the `sdpRelaxation` object,
along with some status information.
:param sdpRelaxation: The SDP relaxation to be solved.
:type sdpRelaxation: :class:`ncpol2sdpa.SdpRelaxation`.
:param solver: The solver to be called, either `None`, "sdpa", "mosek",
"cvxpy", "scs", or "cvxopt". The default is `None`,
which triggers autodetect.
:type solver: str.
:param solverparameters: Parameters to be passed to the solver. Actual
options depend on the solver:
SDPA:
- `"executable"`:
Specify the executable for SDPA. E.g.,
`"executable":"/usr/local/bin/sdpa"`, or
`"executable":"sdpa_gmp"`
- `"paramsfile"`: Specify the parameter file
Mosek:
Refer to the Mosek documentation. All
arguments are passed on.
Cvxopt:
Refer to the PICOS documentation. All
arguments are passed on.
Cvxpy:
Refer to the Cvxpy documentation. All
arguments are passed on.
SCS:
Refer to the Cvxpy documentation. All
arguments are passed on.
:type solverparameters: dict of str.
"""
if self.F is None:
raise Exception("Relaxation is not generated yet. Call "
"'SdpRelaxation.get_relaxation' first")
solve_sdp(self, solver, solverparameters) | 0.000922 |
def startLoading(self):
"""
Starts loading this item for the batch.
"""
if super(XBatchItem, self).startLoading():
tree = self.treeWidget()
if not isinstance(tree, XOrbTreeWidget):
self.takeFromTree()
return
next_batch = self.batch()
tree._loadBatch(self, next_batch) | 0.007444 |
def load_context(ctx_path, ctx_type, scm=None):
"""
:param ctx_path: context file path or '-' (read from stdin)
:param ctx_type: context file type
:param scm: JSON schema file in any formats anyconfig supports, to
validate given context files
"""
if ctx_path == '-':
return loads(sys.stdin.read(), ac_parser=ctx_type, ac_schema=scm)
return load(ctx_path, ac_parser=ctx_type, ac_schema=scm) | 0.002304 |
def _GetGsScopes(self):
"""Return all Google Storage scopes available on this VM."""
service_accounts = self.watcher.GetMetadata(metadata_key=self.metadata_key)
try:
scopes = service_accounts[self.service_account]['scopes']
return list(GS_SCOPES.intersection(set(scopes))) if scopes else None
except KeyError:
return None | 0.011268 |
def save_with_exif_info(img, *args, **kwargs):
"""Saves an image using PIL, preserving the exif information.
Args:
img (PIL.Image.Image):
*args: The arguments for the `save` method of the Image class.
**kwargs: The keywords for the `save` method of the Image class.
"""
if 'exif' in kwargs:
exif = kwargs.pop('exif')
else:
exif = img.info.get('exif')
img.save(*args, exif=exif, **kwargs) | 0.002208 |
def process_IAC(self, sock, cmd, option):
"""
Read in and parse IAC commands as passed by telnetlib.
SB/SE commands are stored in sbdataq, and passed in w/ a command
of SE.
"""
if cmd == DO:
if option == TM:
# timing mark - send WILL into outgoing stream
os.write(self.remote, IAC + WILL + TM)
else:
pass
elif cmd == IP:
# interrupt process
os.write(self.local, IPRESP)
elif cmd == SB:
pass
elif cmd == SE:
option = self.sbdataq[0]
if option == NAWS[0]:
# negotiate window size.
cols = six.indexbytes(self.sbdataq, 1)
rows = six.indexbytes(self.sbdataq, 2)
s = struct.pack('HHHH', rows, cols, 0, 0)
fcntl.ioctl(self.local, termios.TIOCSWINSZ, s)
elif cmd == DONT:
pass
else:
pass | 0.001969 |
def start_after(self, document_fields):
"""Start query after a cursor with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.start_after` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor.
"""
query = query_mod.Query(self)
return query.start_after(document_fields) | 0.002587 |
def eul2m(angle3, angle2, angle1, axis3, axis2, axis1):
"""
Construct a rotation matrix from a set of Euler angles.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/eul2m_c.html
:param angle3: Rotation angle about third rotation axis (radians).
:type angle3: float
:param angle2: Rotation angle about second rotation axis (radians).
:type angle2: float
:param angle1: Rotation angle about first rotation axis (radians).
:type angle1: float
:param axis3: Axis number of third rotation axis.
:type axis3: int
:param axis2: Axis number of second rotation axis.
:type axis2: int
:param axis1: Axis number of first rotation axis.]
:type axis1: int
:return: Product of the 3 rotations.
:rtype: 3x3-Element Array of floats
"""
angle3 = ctypes.c_double(angle3)
angle2 = ctypes.c_double(angle2)
angle1 = ctypes.c_double(angle1)
axis3 = ctypes.c_int(axis3)
axis2 = ctypes.c_int(axis2)
axis1 = ctypes.c_int(axis1)
r = stypes.emptyDoubleMatrix()
libspice.eul2m_c(angle3, angle2, angle1, axis3, axis2, axis1, r)
return stypes.cMatrixToNumpy(r) | 0.000872 |
async def add(self, setname, ip, timeout):
"""
Adds the given IP address to the specified set.
If timeout is specified, the IP will stay in the set for the given
duration. Else it will stay in the set during the set default timeout.
timeout must be given in seconds.
The resulting command looks like this:
``nft add element inet firewall ellis_blacklist4 { 192.0.2.10 timeout 30s }``
"""
# We have to double-quote the '{' '}' at both ends for `format` to work.
if timeout > 0:
to_ban = "{{ {0} timeout {1}s }}".format(ip, timeout)
else:
to_ban = "{{ {0} }}".format(ip)
args = ['add', 'element', self.table_family, self.table_name, setname, to_ban]
return await self.start(__class__.CMD, *args) | 0.006039 |
def dimensionize(maybe_a_list, nd=2):
"""Convert integers to a list of integers to fit the number of dimensions if
the argument is not already a list.
For example:
`dimensionize(3, nd=2)`
will produce the following result:
`(3, 3)`.
`dimensionize([3, 1], nd=2)`
will produce the following result:
`[3, 1]`.
For more information, refer to:
- https://github.com/guillaume-chevalier/python-conv-lib/blob/master/conv/conv.py
- https://github.com/guillaume-chevalier/python-conv-lib
- MIT License, Copyright (c) 2018 Guillaume Chevalier
"""
if not hasattr(maybe_a_list, '__iter__'):
# Argument is probably an integer so we map it to a list of size `nd`.
now_a_list = [maybe_a_list] * nd
return now_a_list
else:
# Argument is probably an `nd`-sized list.
return maybe_a_list | 0.003371 |
def get_csv_import_info(self, path):
"""Launches the csv dialog and returns csv_info
csv_info is a tuple of dialect, has_header, digest_types
Parameters
----------
path: String
\tFile path of csv file
"""
csvfilename = os.path.split(path)[1]
try:
filterdlg = CsvImportDialog(self.main_window, csvfilepath=path)
except csv.Error, err:
# Display modal warning dialog
msg = _("'{filepath}' does not seem to be a valid CSV file.\n \n"
"Opening it yielded the error:\n{error}")
msg = msg.format(filepath=csvfilename, error=err)
short_msg = _('Error reading CSV file')
self.display_warning(msg, short_msg)
return
if filterdlg.ShowModal() == wx.ID_OK:
dialect, has_header = filterdlg.csvwidgets.get_dialect()
digest_types = filterdlg.grid.dtypes
encoding = filterdlg.csvwidgets.encoding
else:
filterdlg.Destroy()
return
filterdlg.Destroy()
return dialect, has_header, digest_types, encoding | 0.001704 |
def _factor(lexer):
"""Return a factor expression."""
tok = _expect_token(lexer, FACTOR_TOKS)
# '~' F
toktype = type(tok)
if toktype is OP_not:
return ('not', _factor(lexer))
# '(' EXPR ')'
elif toktype is LPAREN:
expr = _expr(lexer)
_expect_token(lexer, {RPAREN})
return expr
# OPN '(' ... ')'
elif any(toktype is t for t in OPN_TOKS):
op = tok.ASTOP
_expect_token(lexer, {LPAREN})
tok = next(lexer)
# OPN '(' ')'
if isinstance(tok, RPAREN):
xs = tuple()
# OPN '(' XS ')'
else:
lexer.unpop_token(tok)
xs = _args(lexer)
_expect_token(lexer, {RPAREN})
return (op, ) + xs
# ITE '(' EXPR ',' EXPR ',' EXPR ')'
elif toktype is KW_ite:
_expect_token(lexer, {LPAREN})
s = _expr(lexer)
_expect_token(lexer, {COMMA})
d1 = _expr(lexer)
_expect_token(lexer, {COMMA})
d0 = _expr(lexer)
_expect_token(lexer, {RPAREN})
return ('ite', s, d1, d0)
# Implies '(' EXPR ',' EXPR ')'
elif toktype is KW_implies:
_expect_token(lexer, {LPAREN})
p = _expr(lexer)
_expect_token(lexer, {COMMA})
q = _expr(lexer)
_expect_token(lexer, {RPAREN})
return ('implies', p, q)
# Not '(' EXPR ')'
elif toktype is KW_not:
_expect_token(lexer, {LPAREN})
x = _expr(lexer)
_expect_token(lexer, {RPAREN})
return ('not', x)
# VARIABLE
elif toktype is NameToken:
lexer.unpop_token(tok)
return _variable(lexer)
# '0' | '1'
else:
if tok.value not in {0, 1}:
raise Error("unexpected token: " + str(tok))
return ('const', tok.value) | 0.000557 |
def _write(self, session, openFile, replaceParamFile):
"""
Link Node Dataset File Write to File Method
"""
# Retrieve TimeStep objects
timeSteps = self.timeSteps
# Write Lines
openFile.write('%s\n' % self.name)
openFile.write('NUM_LINKS %s\n' % self.numLinks)
openFile.write('TIME_STEP %s\n' % self.timeStepInterval)
openFile.write('NUM_TS %s\n' % self.numTimeSteps)
openFile.write('START_TIME %s\n' % self.startTime)
for timeStep in timeSteps:
openFile.write('TS %s\n' % timeStep.timeStep)
# Retrieve LinkDataset objects
linkDatasets = timeStep.linkDatasets
for linkDataset in linkDatasets:
# Write number of node datasets values
openFile.write('{0} '.format(linkDataset.numNodeDatasets))
# Retrieve NodeDatasets
nodeDatasets = linkDataset.nodeDatasets
if linkDataset.numNodeDatasets > 0:
for nodeDataset in nodeDatasets:
# Write status and value
openFile.write('{0} {1:.5f} '.format(nodeDataset.status, nodeDataset.value))
else:
for nodeDataset in nodeDatasets:
# Write status and value
if linkDataset.numNodeDatasets < 0:
openFile.write('{0:.5f}'.format(nodeDataset.value))
else:
openFile.write('{0:.3f}'.format(nodeDataset.value))
# Write new line character after each link dataset
openFile.write('\n')
# Insert empty line between time steps
openFile.write('\n') | 0.001655 |
def logscale(x_min, x_max, n):
"""
:param x_min: minumum value
:param x_max: maximum value
:param n: number of steps
:returns: an array of n values from x_min to x_max
"""
if not (isinstance(n, int) and n > 0):
raise ValueError('n must be a positive integer, got %s' % n)
if x_min <= 0:
raise ValueError('x_min must be positive, got %s' % x_min)
if x_max <= x_min:
raise ValueError('x_max (%s) must be bigger than x_min (%s)' %
(x_max, x_min))
delta = numpy.log(x_max / x_min)
return numpy.exp(delta * numpy.arange(n) / (n - 1)) * x_min | 0.00159 |
def set_group_conditions(self, group_id, conditions, trigger_mode=None):
"""
Set the group conditions.
This replaces any existing conditions on the group and member conditions for all trigger modes.
:param group_id: Group to be updated
:param conditions: New conditions to replace old ones
:param trigger_mode: Optional TriggerMode used
:type conditions: GroupConditionsInfo
:type trigger_mode: TriggerMode
:return: The new Group conditions
"""
data = self._serialize_object(conditions)
if trigger_mode is not None:
url = self._service_url(['triggers', 'groups', group_id, 'conditions', trigger_mode])
else:
url = self._service_url(['triggers', 'groups', group_id, 'conditions'])
response = self._put(url, data)
return Condition.list_to_object_list(response) | 0.005519 |
def close(self):
"""
Send a dead message to the remote, causing :meth:`ChannelError` to be
raised in any waiting thread.
"""
_vv and IOLOG.debug('%r.close()', self)
self.context.send(
Message.dead(
reason=self.explicit_close_msg,
handle=self.dst_handle
)
) | 0.005435 |
def check_complicance(self):
"""Check compliance with Media RSS Specification, Version 1.5.1.
see http://www.rssboard.org/media-rss
Raises AttributeError on error.
"""
# check Media RSS requirement: one of the following elements is
# required: media_group | media_content | media_player | media_peerLink
# | media_location. We do the check only if any media_... element is
# set to allow non media feeds
if(any([ma for ma in vars(self)
if ma.startswith('media_') and getattr(self, ma)])
and not self.media_group
and not self.media_content
and not self.media_player
and not self.media_peerLink
and not self.media_location
):
raise AttributeError(
"Using media elements requires the specification of at least "
"one of the following elements: 'media_group', "
"'media_content', 'media_player', 'media_peerLink' or "
"'media_location'.")
# check Media RSS requirement: if media:player is missing all
# media_content elements need to have url attributes.
if not self.media_player:
if self.media_content:
# check if all media_content elements have a URL set
if isinstance(self.media_content, list):
if not all([False for mc in self.media_content if
'url' not in mc.element_attrs]):
raise AttributeError(
"MediaRSSItems require a media_player attribute "
"if a media_content has no url set.")
else:
if not self.media_content.element_attrs['url']:
raise AttributeError(
"MediaRSSItems require a media_player attribute "
"if a media_content has no url set.")
pass
elif self.media_group:
# check media groups without player if its media_content
# elements have a URL set
raise NotImplementedError(
"MediaRSSItem: media_group check not implemented yet.") | 0.000874 |
def get_url(self, resource, params=None):
"""
Generate url for request
"""
# replace placeholders
pattern = r'\{(.+?)\}'
resource = re.sub(pattern, lambda t: str(params.get(t.group(1), '')), resource)
# build url
parts = (self.endpoint, '/api/', resource)
return '/'.join(map(lambda x: str(x).strip('/'), parts)) | 0.007634 |
def open(self, path, encoding=None, use_cached_encoding=True):
"""
Open a file and set its content on the editor widget.
pyqode does not try to guess encoding. It's up to the client code to
handle encodings. You can either use a charset detector to detect
encoding or rely on a settings in your application. It is also up to
you to handle UnicodeDecodeError, unless you've added
class:`pyqode.core.panels.EncodingPanel` on the editor.
pyqode automatically caches file encoding that you can later reuse it
automatically.
:param path: Path of the file to open.
:param encoding: Default file encoding. Default is to use the locale
encoding.
:param use_cached_encoding: True to use the cached encoding instead
of ``encoding``. Set it to True if you want to force reload with a
new encoding.
:raises: UnicodeDecodeError in case of error if no EncodingPanel
were set on the editor.
"""
ret_val = False
if encoding is None:
encoding = locale.getpreferredencoding()
self.opening = True
settings = Cache()
self._path = path
# get encoding from cache
if use_cached_encoding:
try:
cached_encoding = settings.get_file_encoding(
path, preferred_encoding=encoding)
except KeyError:
pass
else:
encoding = cached_encoding
enable_modes = os.path.getsize(path) < self._limit
for m in self.editor.modes:
if m.enabled:
m.enabled = enable_modes
# open file and get its content
try:
with open(path, 'Ur', encoding=encoding) as file:
content = file.read()
if self.autodetect_eol:
self._eol = file.newlines
if isinstance(self._eol, tuple):
self._eol = self._eol[0]
if self._eol is None:
# empty file has no newlines
self._eol = self.EOL.string(self.preferred_eol)
else:
self._eol = self.EOL.string(self.preferred_eol)
except (UnicodeDecodeError, UnicodeError) as e:
try:
from pyqode.core.panels import EncodingPanel
panel = self.editor.panels.get(EncodingPanel)
except KeyError:
raise e # panel not found, not automatic error management
else:
panel.on_open_failed(path, encoding)
else:
# success! Cache the encoding
settings.set_file_encoding(path, encoding)
self._encoding = encoding
# replace tabs by spaces
if self.replace_tabs_by_spaces:
content = content.replace("\t", " " * self.editor.tab_length)
# set plain text
self.editor.setPlainText(
content, self.get_mimetype(path), self.encoding)
self.editor.setDocumentTitle(self.editor.file.name)
ret_val = True
_logger().debug('file open: %s', path)
self.opening = False
if self.restore_cursor:
self._restore_cached_pos()
self._check_for_readonly()
return ret_val | 0.000584 |
def _update(self):
"""Initialize the 1D interpolation."""
if self.strains.size and self.strains.size == self.values.size:
x = np.log(self.strains)
y = self.values
if x.size < 4:
self._interpolater = interp1d(
x,
y,
'linear',
bounds_error=False,
fill_value=(y[0], y[-1]))
else:
self._interpolater = interp1d(
x,
y,
'cubic',
bounds_error=False,
fill_value=(y[0], y[-1])) | 0.003003 |
def launch_help(self, helpname, filename):
"""Generic help launcher
Launches HTMLWindow that shows content of filename
or the Internet page with the filename url
Parameters
----------
filename: String
\thtml file or url
"""
# Set up window
position = config["help_window_position"]
size = config["help_window_size"]
self.help_window = wx.Frame(self.main_window, -1,
helpname, position, size)
self.help_htmlwindow = wx.html.HtmlWindow(self.help_window, -1,
(0, 0), size)
self.help_window.Bind(wx.EVT_MOVE, self.OnHelpMove)
self.help_window.Bind(wx.EVT_SIZE, self.OnHelpSize)
self.help_htmlwindow.Bind(wx.EVT_RIGHT_DOWN, self.OnHelpBack)
self.help_htmlwindow.Bind(wx.html.EVT_HTML_LINK_CLICKED,
lambda e: self.open_external_links(e))
self.help_htmlwindow.Bind(wx.EVT_MOUSEWHEEL,
lambda e: self.zoom_html(e))
# Get help data
current_path = os.getcwd()
os.chdir(get_help_path())
try:
if os.path.isfile(filename):
self.help_htmlwindow.LoadFile(filename)
else:
self.help_htmlwindow.LoadPage(filename)
except IOError:
self.help_htmlwindow.LoadPage(filename)
# Show tutorial window
self.help_window.Show()
os.chdir(current_path) | 0.001278 |
def configure(self, options, config):
"""Configures the xunit plugin."""
Plugin.configure(self, options, config)
self.config = config
if self.enabled:
self.stats = {'errors': 0,
'failures': 0,
'passes': 0,
'skipped': 0
}
self.errorlist = []
self.error_report_file = codecs.open(options.xunit_file, 'w',
self.encoding, 'replace') | 0.003643 |
def cluster_sample5():
"Start with wrong number of clusters."
start_centers = [[0.0, 1.0], [0.0, 0.0]]
template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_SIMPLE5, criterion = splitting_type.BAYESIAN_INFORMATION_CRITERION)
template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_SIMPLE5, criterion = splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH) | 0.018519 |
def scatter(points, vertexlabels=None, **kwargs):
'''Scatter plot of barycentric 2-simplex points on a 2D triangle.
:param points: Points on a 2-simplex.
:type points: N x 3 list or ndarray.
:param vertexlabels: Labels for corners of plot in the order
``(a, b, c)`` where ``a == (1,0,0)``, ``b == (0,1,0)``,
``c == (0,0,1)``.
:type vertexlabels: 3-tuple of strings.
:param **kwargs: Arguments to :func:`plt.scatter`.
:type **kwargs: keyword arguments.'''
if vertexlabels is None:
vertexlabels = ('1','2','3')
projected = cartesian(points)
plt.scatter(projected[:,0], projected[:,1], **kwargs)
_draw_axes(vertexlabels)
return plt.gcf() | 0.007052 |
def cloneNode(self, deep: bool=False) -> AbstractNode:
"""Return new copy of this node.
If optional argument ``deep`` is specified and is True, new node has
clones of child nodes of this node (if presents).
"""
if deep:
return self._clone_node_deep()
return self._clone_node() | 0.011869 |
def purge_results(self, client_id, msg):
"""Purge results from memory. This method is more valuable before we move
to a DB based message storage mechanism."""
content = msg['content']
self.log.info("Dropping records with %s", content)
msg_ids = content.get('msg_ids', [])
reply = dict(status='ok')
if msg_ids == 'all':
try:
self.db.drop_matching_records(dict(completed={'$ne':None}))
except Exception:
reply = error.wrap_exception()
else:
pending = filter(lambda m: m in self.pending, msg_ids)
if pending:
try:
raise IndexError("msg pending: %r" % pending[0])
except:
reply = error.wrap_exception()
else:
try:
self.db.drop_matching_records(dict(msg_id={'$in':msg_ids}))
except Exception:
reply = error.wrap_exception()
if reply['status'] == 'ok':
eids = content.get('engine_ids', [])
for eid in eids:
if eid not in self.engines:
try:
raise IndexError("No such engine: %i" % eid)
except:
reply = error.wrap_exception()
break
uid = self.engines[eid].queue
try:
self.db.drop_matching_records(dict(engine_uuid=uid, completed={'$ne':None}))
except Exception:
reply = error.wrap_exception()
break
self.session.send(self.query, 'purge_reply', content=reply, ident=client_id) | 0.005552 |
def nl_send(sk, msg):
"""Transmit Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L416
Transmits the Netlink message `msg` over the Netlink socket using the `socket.sendmsg()`. This function is based on
`nl_send_iovec()`.
The message is addressed to the peer as specified in the socket by either the nl_socket_set_peer_port() or
nl_socket_set_peer_groups() function. The peer address can be overwritten by specifying an address in the `msg`
object using nlmsg_set_dst().
If present in the `msg`, credentials set by the nlmsg_set_creds() function are added to the control buffer of the
message.
Calls to this function can be overwritten by providing an alternative using the nl_cb_overwrite_send() function.
This function triggers the `NL_CB_MSG_OUT` callback.
ATTENTION: Unlike `nl_send_auto()`, this function does *not* finalize the message in terms of automatically adding
needed flags or filling out port numbers.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
msg -- Netlink message (nl_msg class instance).
Returns:
Number of bytes sent on success or a negative error code.
"""
cb = sk.s_cb
if cb.cb_send_ow:
return cb.cb_send_ow(sk, msg)
hdr = nlmsg_hdr(msg)
iov = hdr.bytearray[:hdr.nlmsg_len]
return nl_send_iovec(sk, msg, iov, 1) | 0.004993 |
def write_json(dictionary, filename):
"""Write dictionary to JSON"""
with open(filename, 'w') as data_file:
json.dump(dictionary, data_file, indent=4, sort_keys=True)
print('--> Wrote ' + os.path.basename(filename)) | 0.004255 |
def _update_line(self):
""" Update border line to match new shape """
w = self._border_width
m = self.margin
# border is drawn within the boundaries of the widget:
#
# size = (8, 7) margin=2
# internal rect = (3, 3, 2, 1)
# ........
# ........
# ..BBBB..
# ..B B..
# ..BBBB..
# ........
# ........
#
l = b = m
r = self.size[0] - m
t = self.size[1] - m
pos = np.array([
[l, b], [l+w, b+w],
[r, b], [r-w, b+w],
[r, t], [r-w, t-w],
[l, t], [l+w, t-w],
], dtype=np.float32)
faces = np.array([
[0, 2, 1],
[1, 2, 3],
[2, 4, 3],
[3, 5, 4],
[4, 5, 6],
[5, 7, 6],
[6, 0, 7],
[7, 0, 1],
[5, 3, 1],
[1, 5, 7],
], dtype=np.int32)
start = 8 if self._border_color.is_blank else 0
stop = 8 if self._bgcolor.is_blank else 10
face_colors = None
if self._face_colors is not None:
face_colors = self._face_colors[start:stop]
self._mesh.set_data(vertices=pos, faces=faces[start:stop],
face_colors=face_colors)
# picking mesh covers the entire area
self._picking_mesh.set_data(vertices=pos[::2]) | 0.002104 |
def rsdl_s(self, Yprev, Y):
"""Compute dual residual vector.
Overriding this method is required if methods :meth:`cnst_A`,
:meth:`cnst_AT`, :meth:`cnst_B`, and :meth:`cnst_c` are not
overridden.
"""
return self.rho * self.cnst_AT(Yprev - Y) | 0.006897 |
def print_table(table, name=None, fmt=None):
"""
Pretty print a pandas DataFrame.
Uses HTML output if running inside Jupyter Notebook, otherwise
formatted text output.
Parameters
----------
table : pd.Series or pd.DataFrame
Table to pretty-print.
name : str, optional
Table name to display in upper left corner.
fmt : str, optional
Formatter to use for displaying table elements.
E.g. '{0:.2f}%' for displaying 100 as '100.00%'.
Restores original setting after displaying.
"""
if isinstance(table, pd.Series):
table = pd.DataFrame(table)
if isinstance(table, pd.DataFrame):
table.columns.name = name
prev_option = pd.get_option('display.float_format')
if fmt is not None:
pd.set_option('display.float_format', lambda x: fmt.format(x))
display(table)
if fmt is not None:
pd.set_option('display.float_format', prev_option) | 0.00104 |
def capture(returns, factor_returns, period=DAILY):
"""
Compute capture ratio.
Parameters
----------
returns : pd.Series or np.ndarray
Returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
Returns
-------
capture_ratio : float
Note
----
See http://www.investopedia.com/terms/u/up-market-capture-ratio.asp for
details.
"""
return (annual_return(returns, period=period) /
annual_return(factor_returns, period=period)) | 0.000963 |
def compile(self, optimizer, loss, metrics=None):
"""
Args:
optimizer (tf.train.Optimizer):
loss, metrics: string or list of strings
"""
if isinstance(loss, six.string_types):
loss = [loss]
if metrics is None:
metrics = []
if isinstance(metrics, six.string_types):
metrics = [metrics]
self._stats_to_inference = loss + metrics + [TOTAL_LOSS_NAME]
setup_keras_trainer(
self.trainer, get_model=self.get_model,
input_signature=self.input_signature,
target_signature=self.target_signature,
input=self.input,
optimizer=optimizer,
loss=loss,
metrics=metrics) | 0.002628 |
def _get_or_create_user(self, force_populate=False):
"""
Loads the User model object from the database or creates it if it
doesn't exist. Also populates the fields, subject to
AUTH_LDAP_ALWAYS_UPDATE_USER.
"""
save_user = False
username = self.backend.ldap_to_django_username(self._username)
self._user, built = self.backend.get_or_build_user(username, self)
self._user.ldap_user = self
self._user.ldap_username = self._username
should_populate = force_populate or self.settings.ALWAYS_UPDATE_USER or built
if built:
if self.settings.NO_NEW_USERS:
raise self.AuthenticationFailed(
"user does not satisfy AUTH_LDAP_NO_NEW_USERS"
)
logger.debug("Creating Django user {}".format(username))
self._user.set_unusable_password()
save_user = True
if should_populate:
logger.debug("Populating Django user {}".format(username))
self._populate_user()
save_user = True
# Give the client a chance to finish populating the user just
# before saving.
populate_user.send(self.backend.__class__, user=self._user, ldap_user=self)
if save_user:
self._user.save()
# This has to wait until we're sure the user has a pk.
if self.settings.MIRROR_GROUPS or self.settings.MIRROR_GROUPS_EXCEPT:
self._normalize_mirror_settings()
self._mirror_groups() | 0.002549 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.