text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def has_ndarray_int_columns(features, X):
""" Checks if numeric feature columns exist in ndarray """
_, ncols = X.shape
if not all(d.isdigit() for d in features if isinstance(d, str)) or not isinstance(X, np.ndarray):
return False
ndarray_columns = np.arange(0, ncols)
feature_cols = np.unique([int(d) for d in features])
return all(np.in1d(feature_cols, ndarray_columns)) | 0.00495 |
def remove(self, node, dirty=True):
"""Remove the given child node.
Args:
node (gkeepapi.Node): Node to remove.
dirty (bool): Whether this node should be marked dirty.
"""
if node.id in self._children:
self._children[node.id].parent = None
del self._children[node.id]
if dirty:
self.touch() | 0.005115 |
def reduce(self, agg=operator.add, acc=None):
"""
Submit all tasks and reduce the results
"""
return self.submit_all().reduce(agg, acc) | 0.011976 |
def save(self):
""" Exports all user attributes to the user's configuration and writes configuration
Saves the values for each attribute stored in User.configVars
into the user's configuration. The password is automatically
encoded and salted to prevent saving it as plaintext. The
session is pickled, encoded, and compressed to take up
less space in the configuration file. All other attributes
are saved in plain text. Writes the changes to the configuration file.
"""
# Code to load all attributes
for prop in dir(self):
if getattr(self, prop) == None: continue
if not prop in self.configVars: continue
# Special handling for some attributes
if prop == "session":
pic = pickle.dumps(getattr(self, prop).cookies)
comp = zlib.compress(pic)
enc = base64.b64encode(comp)
self.config[prop] = enc.decode()
continue
if prop == "password" and not self.savePassword: continue
if prop == "password":
s = hashlib.md5(self.username.encode()).hexdigest()
p = base64.b64encode(getattr(self, prop).encode()) + s.encode()
self.config[prop] = p.decode()
continue
self.config[prop] = str(getattr(self, prop))
if 'password' in self.config and not self.savePassword: del self.config.password
self.config.write()
self.__loadConfig() | 0.009852 |
def dump(doc, output_stream=None):
"""
Dump a :class:`.Doc` object into a JSON-encoded text string.
The output will be sent to :data:`sys.stdout` unless an alternative
text stream is given.
To dump to :data:`sys.stdout` just do:
>>> import panflute as pf
>>> doc = pf.Doc(Para(Str('a'))) # Create sample document
>>> pf.dump(doc)
To dump to file:
>>> with open('some-document.json', 'w'. encoding='utf-8') as f:
>>> pf.dump(doc, f)
To dump to a string:
>>> import io
>>> with io.StringIO() as f:
>>> pf.dump(doc, f)
>>> contents = f.getvalue()
:param doc: document, usually created with :func:`.load`
:type doc: :class:`.Doc`
:param output_stream: text stream used as output
(default is :data:`sys.stdout`)
"""
assert type(doc) == Doc, "panflute.dump needs input of type panflute.Doc"
if output_stream is None:
sys.stdout = codecs.getwriter("utf-8")(sys.stdout) if py2 else codecs.getwriter("utf-8")(sys.stdout.detach())
output_stream = sys.stdout
# Switch to legacy JSON output; eg: {'t': 'Space', 'c': []}
if doc.api_version is None:
# Switch .to_json() to legacy
Citation.backup = Citation.to_json
Citation.to_json = Citation.to_json_legacy
# Switch ._slots_to_json() to legacy
for E in [Table, OrderedList, Quoted, Math]:
E.backup = E._slots_to_json
E._slots_to_json = E._slots_to_json_legacy
# Switch .to_json() to method of base class
for E in EMPTY_ELEMENTS:
E.backup = E.to_json
E.to_json = Element.to_json
json_serializer = lambda elem: elem.to_json()
output_stream.write(json.dumps(
obj=doc,
default=json_serializer, # Serializer
check_circular=False,
separators=(',', ':'), # Compact separators, like Pandoc
ensure_ascii=False # For Pandoc compat
))
# Undo legacy changes
if doc.api_version is None:
Citation.to_json = Citation.backup
for E in [Table, OrderedList, Quoted, Math]:
E._slots_to_json = E.backup
for E in EMPTY_ELEMENTS:
E.to_json = E.backup | 0.001325 |
def from_files(cls, secrets=None, storage=None, scopes=None, no_webserver=False):
"""Return a spreadsheet collection making OAauth 2.0 credentials.
Args:
secrets (str): location of secrets file (default: ``%r``)
storage (str): location of storage file (default: ``%r``)
scopes: scope URL(s) or ``'read'`` or ``'write'`` (default: ``%r``)
no_webserver (bool): URL/code prompt instead of webbrowser auth
Returns:
Sheets: new Sheets instance with OAauth 2.0 credentials
"""
creds = oauth2.get_credentials(scopes, secrets, storage, no_webserver)
return cls(creds) | 0.004491 |
def add_axes_and_nodes(self):
"""
Adds the axes (i.e. 2 or 3 axes, not to be confused with matplotlib
axes) and the nodes that belong to each axis.
"""
for i, (group, nodelist) in enumerate(self.nodes.items()):
theta = self.group_theta(group)
if self.has_edge_within_group(group):
theta = theta - self.minor_angle
self.plot_nodes(nodelist, theta, group)
theta = theta + 2 * self.minor_angle
self.plot_nodes(nodelist, theta, group)
else:
self.plot_nodes(nodelist, theta, group) | 0.004608 |
def get_child_tiers_for(self, id_tier):
"""Give all child tiers for a tier.
:param str id_tier: Name of the tier.
:returns: List of all children
:raises KeyError: If the tier is non existent.
"""
self.tiers[id_tier]
return [m for m in self.tiers if 'PARENT_REF' in self.tiers[m][2] and
self.tiers[m][2]['PARENT_REF'] == id_tier] | 0.004988 |
def make_simple_merged_vcf_with_no_combinations(self, ref_seq):
'''Does a simple merging of all variants in this cluster.
Assumes one ALT in each variant. Uses the ALT for each
variant, making one new vcf_record that has all the variants
put together'''
if len(self) <= 1:
return
merged_vcf_record = self.vcf_records[0]
for i in range(1, len(self.vcf_records), 1):
if self.vcf_records[i].intersects(merged_vcf_record):
return
else:
merged_vcf_record = merged_vcf_record.merge(self.vcf_records[i], ref_seq)
self.vcf_records = [merged_vcf_record] | 0.004418 |
def status_line(self):
"""
Returns the first line of response, including http version, status
and a phrase (OK).
"""
if not self.phrase:
self.phrase = HttpStatus(self.status_code).phrase
return "{} {} {}".format("HTTP/1.1", self.status_code, self.phrase) | 0.006369 |
def RdatabasesBM(host=rbiomart_host):
"""
Lists BioMart databases through a RPY2 connection.
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing
"""
biomaRt = importr("biomaRt")
print(biomaRt.listMarts(host=host)) | 0.00361 |
def removed(self):
"""
Returns list of removed ``FileNode`` objects.
"""
if not self.parents:
return []
return RemovedFileNodesGenerator([n for n in
self._get_paths_for_status('deleted')], self) | 0.010791 |
def init_blackhole(self):
"""redirects stdout/stderr to devnull if necessary"""
if self.no_stdout or self.no_stderr:
blackhole = open(os.devnull, 'w')
if self.no_stdout:
sys.stdout = sys.__stdout__ = blackhole
if self.no_stderr:
sys.stderr = sys.__stderr__ = blackhole | 0.005682 |
def _get_matching_segments(self, zf, name):
"""
Return a generator yielding each of the segments who's names
match name.
"""
for n in zf.namelist():
if n.startswith(name):
yield zf.read(n) | 0.043902 |
def enable_global_typechecked_decorator(flag = True, retrospective = True):
"""Enables or disables global typechecking mode via decorators.
See flag global_typechecked_decorator.
In contrast to setting the flag directly, this function provides
a retrospective option. If retrospective is true, this will also
affect already imported modules, not only future imports.
Does not work if checking_enabled is false.
Does not work reliably if checking_enabled has ever been set to
false during current run.
"""
global global_typechecked_decorator
global_typechecked_decorator = flag
if import_hook_enabled:
_install_import_hook()
if global_typechecked_decorator and retrospective:
_catch_up_global_typechecked_decorator()
return global_typechecked_decorator | 0.006075 |
def generate_menu(self, ass, text, path=None, level=0):
"""
Function generates menu from based on ass parameter
"""
menu = self.create_menu()
for index, sub in enumerate(sorted(ass[1], key=lambda y: y[0].fullname.lower())):
if index != 0:
text += "|"
text += "- " + sub[0].fullname
new_path = list(path)
if level == 0:
new_path.append(ass[0].name)
new_path.append(sub[0].name)
menu_item = self.menu_item(sub, new_path)
if sub[1]:
# If assistant has subassistants
(sub_menu, txt) = self.generate_menu(sub, text, new_path, level=level + 1)
menu_item.set_submenu(sub_menu)
menu.append(menu_item)
return menu, text | 0.004796 |
def _plot_residuals_to_ax(
data_all, model_ML, ax, e_unit=u.eV, sed=True, errorbar_opts={}
):
"""Function to compute and plot residuals in units of the uncertainty"""
if "group" not in data_all.keys():
data_all["group"] = np.zeros(len(data_all))
groups = np.unique(data_all["group"])
MLf_unit, MLsedf = sed_conversion(model_ML[0], model_ML[1].unit, sed)
MLene = model_ML[0].to(e_unit)
MLflux = (model_ML[1] * MLsedf).to(MLf_unit)
ax.axhline(0, color="k", lw=1, ls="--")
interp = False
if data_all["energy"].size != MLene.size or not np.allclose(
data_all["energy"].value, MLene.value
):
interp = True
from scipy.interpolate import interp1d
modelfunc = interp1d(MLene.value, MLflux.value, bounds_error=False)
for g in groups:
groupidx = np.where(data_all["group"] == g)
data = data_all[groupidx]
notul = ~data["ul"]
df_unit, dsedf = sed_conversion(data["energy"], data["flux"].unit, sed)
ene = data["energy"].to(e_unit)
xerr = u.Quantity((data["energy_error_lo"], data["energy_error_hi"]))
flux = (data["flux"] * dsedf).to(df_unit)
dflux = (data["flux_error_lo"] + data["flux_error_hi"]) / 2.0
dflux = (dflux * dsedf).to(df_unit)[notul]
if interp:
difference = flux[notul] - modelfunc(ene[notul]) * flux.unit
else:
difference = flux[notul] - MLflux[groupidx][notul]
# wrap around color and marker cycles
color = color_cycle[int(g) % len(color_cycle)]
marker = marker_cycle[int(g) % len(marker_cycle)]
opts = dict(
zorder=100,
marker=marker,
ls="",
elinewidth=2,
capsize=0,
mec=color,
mew=0.1,
ms=6,
color=color,
)
opts.update(errorbar_opts)
ax.errorbar(
ene[notul].value,
(difference / dflux).decompose().value,
yerr=(dflux / dflux).decompose().value,
xerr=xerr[:, notul].to(e_unit).value,
**opts
)
from matplotlib.ticker import MaxNLocator
ax.yaxis.set_major_locator(
MaxNLocator(5, integer="True", prune="upper", symmetric=True)
)
ax.set_ylabel(r"$\Delta\sigma$")
ax.set_xscale("log") | 0.000424 |
def __multi_arity_dispatch_fn( # pylint: disable=too-many-arguments,too-many-locals
ctx: GeneratorContext,
name: str,
arity_map: Mapping[int, str],
default_name: Optional[str] = None,
max_fixed_arity: Optional[int] = None,
meta_node: Optional[MetaNode] = None,
is_async: bool = False,
) -> GeneratedPyAST:
"""Return the Python AST nodes for a argument-length dispatch function
for multi-arity functions.
def fn(*args):
nargs = len(args)
method = __fn_dispatch_map.get(nargs)
if method:
return method(*args)
# Only if default
if nargs > max_fixed_arity:
return default(*args)
raise RuntimeError
"""
dispatch_map_name = f"{name}_dispatch_map"
dispatch_keys, dispatch_vals = [], []
for k, v in arity_map.items():
dispatch_keys.append(ast.Num(k))
dispatch_vals.append(ast.Name(id=v, ctx=ast.Load()))
# Async functions should return await, otherwise just return
handle_return = __handle_async_return if is_async else __handle_return
nargs_name = genname("nargs")
method_name = genname("method")
body = [
ast.Assign(
targets=[ast.Name(id=nargs_name, ctx=ast.Store())],
value=ast.Call(
func=ast.Name(id="len", ctx=ast.Load()),
args=[ast.Name(id=_MULTI_ARITY_ARG_NAME, ctx=ast.Load())],
keywords=[],
),
),
ast.Assign(
targets=[ast.Name(id=method_name, ctx=ast.Store())],
value=ast.Call(
func=ast.Attribute(
value=ast.Name(id=dispatch_map_name, ctx=ast.Load()),
attr="get",
ctx=ast.Load(),
),
args=[ast.Name(id=nargs_name, ctx=ast.Load())],
keywords=[],
),
),
ast.If(
test=ast.Compare(
left=ast.NameConstant(None),
ops=[ast.IsNot()],
comparators=[ast.Name(id=method_name, ctx=ast.Load())],
),
body=[
handle_return(
ast.Call(
func=ast.Name(id=method_name, ctx=ast.Load()),
args=[
ast.Starred(
value=ast.Name(
id=_MULTI_ARITY_ARG_NAME, ctx=ast.Load()
),
ctx=ast.Load(),
)
],
keywords=[],
)
)
],
orelse=[]
if default_name is None
else [
ast.If(
test=ast.Compare(
left=ast.Name(id=nargs_name, ctx=ast.Load()),
ops=[ast.GtE()],
comparators=[ast.Num(max_fixed_arity)],
),
body=[
handle_return(
ast.Call(
func=ast.Name(id=default_name, ctx=ast.Load()),
args=[
ast.Starred(
value=ast.Name(
id=_MULTI_ARITY_ARG_NAME, ctx=ast.Load()
),
ctx=ast.Load(),
)
],
keywords=[],
)
)
],
orelse=[],
)
],
),
ast.Raise(
exc=ast.Call(
func=_load_attr("basilisp.lang.runtime.RuntimeException"),
args=[
ast.Str(f"Wrong number of args passed to function: {name}"),
ast.Name(id=nargs_name, ctx=ast.Load()),
],
keywords=[],
),
cause=None,
),
]
py_fn_node = ast.AsyncFunctionDef if is_async else ast.FunctionDef
meta_deps, meta_decorators = __fn_meta(ctx, meta_node)
return GeneratedPyAST(
node=ast.Name(id=name, ctx=ast.Load()),
dependencies=chain(
[
ast.Assign(
targets=[ast.Name(id=dispatch_map_name, ctx=ast.Store())],
value=ast.Dict(keys=dispatch_keys, values=dispatch_vals),
)
],
meta_deps,
[
py_fn_node(
name=name,
args=ast.arguments(
args=[],
kwarg=None,
vararg=ast.arg(arg=_MULTI_ARITY_ARG_NAME, annotation=None),
kwonlyargs=[],
defaults=[],
kw_defaults=[],
),
body=body,
decorator_list=list(chain(meta_decorators, [_BASILISP_FN_FN_NAME])),
returns=None,
)
],
),
) | 0.001133 |
def subtask(*args, **kwargs):
'''Decorator which prints out the name of the decorated function on
execution.
'''
depth = kwargs.get('depth', 2)
prefix = kwargs.get('prefix', '\n' + '#' * depth + ' ')
tail = kwargs.get('tail', '\n')
doc1 = kwargs.get('doc1', False)
color = kwargs.get('color', cyan)
def real_decorator(func):
if doc1:
return print_full_name(color=color, prefix=prefix,
tail=tail)(print_doc1(func))
return print_full_name(color=color, prefix=prefix, tail=tail)(func)
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator | 0.001279 |
def all_stats(self):
"""Compute stats for all results.
:return: :class:`results.AllStats <results.AllStats>` object
:rtype: results.AllStats
"""
schema = AllStatsSchema()
resp = self.service.post(self.base, params={'stats': 'all'})
return self.service.decode(schema, resp) | 0.006079 |
def VORPS(cpu, dest, src, src2):
"""
Performs a bitwise logical OR operation on the source operand (second operand) and second source operand (third operand)
and stores the result in the destination operand (first operand).
"""
res = dest.write(src.read() | src2.read()) | 0.009646 |
def execute_concurrent(session, statements_and_parameters, concurrency=100, raise_on_first_error=True, results_generator=False):
"""
Executes a sequence of (statement, parameters) tuples concurrently. Each
``parameters`` item must be a sequence or :const:`None`.
The `concurrency` parameter controls how many statements will be executed
concurrently. When :attr:`.Cluster.protocol_version` is set to 1 or 2,
it is recommended that this be kept below 100 times the number of
core connections per host times the number of connected hosts (see
:meth:`.Cluster.set_core_connections_per_host`). If that amount is exceeded,
the event loop thread may attempt to block on new connection creation,
substantially impacting throughput. If :attr:`~.Cluster.protocol_version`
is 3 or higher, you can safely experiment with higher levels of concurrency.
If `raise_on_first_error` is left as :const:`True`, execution will stop
after the first failed statement and the corresponding exception will be
raised.
`results_generator` controls how the results are returned.
* If :const:`False`, the results are returned only after all requests have completed.
* If :const:`True`, a generator expression is returned. Using a generator results in a constrained
memory footprint when the results set will be large -- results are yielded
as they return instead of materializing the entire list at once. The trade for lower memory
footprint is marginal CPU overhead (more thread coordination and sorting out-of-order results
on-the-fly).
A sequence of ``ExecutionResult(success, result_or_exc)`` namedtuples is returned
in the same order that the statements were passed in. If ``success`` is :const:`False`,
there was an error executing the statement, and ``result_or_exc`` will be
an :class:`Exception`. If ``success`` is :const:`True`, ``result_or_exc``
will be the query result.
Example usage::
select_statement = session.prepare("SELECT * FROM users WHERE id=?")
statements_and_params = []
for user_id in user_ids:
params = (user_id, )
statements_and_params.append((select_statement, params))
results = execute_concurrent(
session, statements_and_params, raise_on_first_error=False)
for (success, result) in results:
if not success:
handle_error(result) # result will be an Exception
else:
process_user(result[0]) # result will be a list of rows
Note: in the case that `generators` are used, it is important to ensure the consumers do not
block or attempt further synchronous requests, because no further IO will be processed until
the consumer returns. This may also produce a deadlock in the IO event thread.
"""
if concurrency <= 0:
raise ValueError("concurrency must be greater than 0")
if not statements_and_parameters:
return []
executor = ConcurrentExecutorGenResults(session, statements_and_parameters) if results_generator else ConcurrentExecutorListResults(session, statements_and_parameters)
return executor.execute(concurrency, raise_on_first_error) | 0.004608 |
def _rate_limit(self):
"""Pulls in and enforces the latest rate limits for the specified user"""
self.limits_set = True
for product in self.account_information():
self.limits[product['id']] = {'interval': timedelta(seconds=60 / float(product['per_minute_limit']))} | 0.013333 |
def classes(self, values):
"""Classes setter."""
if isinstance(values, dict):
if self.__data is not None and len(self.__data) != len(values):
raise ValueError(
'number of samples do not match the previously assigned data')
elif set(self.keys) != set(list(values)):
raise ValueError('sample ids do not match the previously assigned ids.')
else:
self.__classes = values
else:
raise ValueError('classes input must be a dictionary!') | 0.00703 |
def distutils_old_autosemver_case(metadata, attr, value):
"""DEPRECATED"""
metadata = distutils_default_case(metadata, attr, value)
create_changelog(bugtracker_url=getattr(metadata, 'bugtracker_url', ''))
return metadata | 0.004237 |
def _ll_pre_transform(self, train_tfm:List[Callable], valid_tfm:List[Callable]):
"Call `train_tfm` and `valid_tfm` after opening image, before converting from `PIL.Image`"
self.train.x.after_open = compose(train_tfm)
self.valid.x.after_open = compose(valid_tfm)
return self | 0.017301 |
def inversion_psf_shape_tag_from_inversion_psf_shape(inversion_psf_shape):
"""Generate an inversion psf shape tag, to customize phase names based on size of the inversion PSF that the \
original PSF is trimmed to for faster run times.
This changes the phase name 'phase_name' as follows:
inversion_psf_shape = 1 -> phase_name
inversion_psf_shape = 2 -> phase_name_inversion_psf_shape_2
inversion_psf_shape = 2 -> phase_name_inversion_psf_shape_2
"""
if inversion_psf_shape is None:
return ''
else:
y = str(inversion_psf_shape[0])
x = str(inversion_psf_shape[1])
return ('_inv_psf_' + y + 'x' + x) | 0.003003 |
def get_address_transactions(self, account_id, address_id, **params):
"""https://developers.coinbase.com/api/v2#list-address39s-transactions"""
response = self._get(
'v2',
'accounts',
account_id,
'addresses',
address_id,
'transactions',
params=params)
return self._make_api_object(response, Transaction) | 0.007299 |
def show(self):
"""Shows the new colors on the pixels themselves if they haven't already
been autowritten.
The colors may or may not be showing after this function returns because
it may be done asynchronously."""
if self.brightness > 0.99:
neopixel_write(self.pin, self.buf)
else:
neopixel_write(self.pin, bytearray([int(i * self.brightness) for i in self.buf])) | 0.011468 |
def make(assembly, samples):
""" Make phylip and nexus formats. This is hackish since I'm recycling the
code whole-hog from pyrad V3. Probably could be good to go back through
and clean up the conversion code some time.
"""
## get the longest name
longname = max([len(i) for i in assembly.samples.keys()])
names = [i.name for i in samples]
partitions = makephy(assembly, samples, longname)
makenex(assembly, names, longname, partitions) | 0.008403 |
def process(self, argument_list):
"""
:param argument_list: list of str, input from user
:return: dict:
{"cleaned_arg_name": "value"}
"""
arg_index = 0
for a in argument_list:
opt_and_val = a.split("=", 1)
opt_name = opt_and_val[0]
try:
# option
argument = self.options[opt_name]
except KeyError:
# argument
try:
argument = self.arguments[arg_index]
except IndexError:
logger.error("option/argument %r not specified", a)
raise NoSuchOptionOrArgument("No such option or argument: %r" % opt_name)
logger.info("argument found: %s", argument)
safe_arg_name = normalize_arg_name(argument.name) # so we can access names-with-dashes
logger.info("argument is available under name %r", safe_arg_name)
if isinstance(argument, Argument):
arg_index += 1
value = (a, )
else:
try:
value = (opt_and_val[1], )
except IndexError:
value = tuple()
arg_val = argument.action(*value)
logger.info("argument %r has value %r", safe_arg_name, arg_val)
self.given_arguments[safe_arg_name] = arg_val
return self.given_arguments | 0.002736 |
def issuer(self, value):
"""
An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate object
of the issuer.
"""
is_oscrypto = isinstance(value, asymmetric.Certificate)
if not is_oscrypto and not isinstance(value, x509.Certificate):
raise TypeError(_pretty_message(
'''
issuer must be an instance of asn1crypto.x509.Certificate or
oscrypto.asymmetric.Certificate, not %s
''',
_type_name(value)
))
if is_oscrypto:
value = value.asn1
self._issuer = value | 0.004666 |
def example_list(a, args):
""" list topics and cluster metadata """
if len(args) == 0:
what = "all"
else:
what = args[0]
md = a.list_topics(timeout=10)
print("Cluster {} metadata (response from broker {}):".format(md.cluster_id, md.orig_broker_name))
if what in ("all", "brokers"):
print(" {} brokers:".format(len(md.brokers)))
for b in iter(md.brokers.values()):
if b.id == md.controller_id:
print(" {} (controller)".format(b))
else:
print(" {}".format(b))
if what not in ("all", "topics"):
return
print(" {} topics:".format(len(md.topics)))
for t in iter(md.topics.values()):
if t.error is not None:
errstr = ": {}".format(t.error)
else:
errstr = ""
print(" \"{}\" with {} partition(s){}".format(t, len(t.partitions), errstr))
for p in iter(t.partitions.values()):
if p.error is not None:
errstr = ": {}".format(p.error)
else:
errstr = ""
print(" partition {} leader: {}, replicas: {}, isrs: {}".format(
p.id, p.leader, p.replicas, p.isrs, errstr)) | 0.002423 |
def _format_name_map(self, lon, lat):
''' Return the name of the map in the good format '''
if self.ppd in [4, 16, 64, 128]:
lolaname = '_'.join(['LDEM', str(self.ppd)])
elif self.ppd in [512]:
lolaname = '_'.join(
['LDEM', str(self.ppd), lat[0], lat[1], lon[0], lon[1]])
return lolaname | 0.005556 |
def create_osd_keyring(conn, cluster, key):
"""
Run on osd node, writes the bootstrap key if not there yet.
"""
logger = conn.logger
path = '/var/lib/ceph/bootstrap-osd/{cluster}.keyring'.format(
cluster=cluster,
)
if not conn.remote_module.path_exists(path):
logger.warning('osd keyring does not exist yet, creating one')
conn.remote_module.write_keyring(path, key) | 0.002392 |
def _scan_constraint_match(self, minimum_version, maximum_version, jdk):
"""Finds a cached version matching the specified constraints
:param Revision minimum_version: minimum jvm version to look for (eg, 1.7).
:param Revision maximum_version: maximum jvm version to look for (eg, 1.7.9999).
:param bool jdk: whether the found java distribution is required to have a jdk.
:return: the Distribution, or None if no matching distribution is in the cache.
:rtype: :class:`pants.java.distribution.Distribution`
"""
for dist in self._cache.values():
if minimum_version and dist.version < minimum_version:
continue
if maximum_version and dist.version > maximum_version:
continue
if jdk and not dist.jdk:
continue
return dist | 0.010025 |
def get_cli_returns(
self,
jid,
minions,
timeout=None,
tgt='*',
tgt_type='glob',
verbose=False,
show_jid=False,
**kwargs):
'''
Starts a watcher looking at the return data for a specified JID
:returns: all of the information for the JID
'''
if verbose:
msg = 'Executing job with jid {0}'.format(jid)
print(msg)
print('-' * len(msg) + '\n')
elif show_jid:
print('jid: {0}'.format(jid))
if timeout is None:
timeout = self.opts['timeout']
fret = {}
# make sure the minions is a set (since we do set operations on it)
minions = set(minions)
found = set()
# start this before the cache lookup-- in case new stuff comes in
event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout)
# get the info from the cache
ret = self.get_cache_returns(jid)
if ret != {}:
found.update(set(ret))
yield ret
# if you have all the returns, stop
if len(found.intersection(minions)) >= len(minions):
raise StopIteration()
# otherwise, get them from the event system
for event in event_iter:
if event != {}:
found.update(set(event))
yield event
if len(found.intersection(minions)) >= len(minions):
self._clean_up_subscriptions(jid)
raise StopIteration() | 0.001252 |
def add_to_cluster(self, name, **attrs):
"""Add attributes to a cluster.
"""
cluster = self.get_cluster(name=name)
attrs_ = cluster['cluster']
attrs_.update(**attrs) | 0.009756 |
def att_pos_mocap_send(self, time_usec, q, x, y, z, force_mavlink1=False):
'''
Motion capture attitude and position
time_usec : Timestamp (micros since boot or Unix epoch) (uint64_t)
q : Attitude quaternion (w, x, y, z order, zero-rotation is 1, 0, 0, 0) (float)
x : X position in meters (NED) (float)
y : Y position in meters (NED) (float)
z : Z position in meters (NED) (float)
'''
return self.send(self.att_pos_mocap_encode(time_usec, q, x, y, z), force_mavlink1=force_mavlink1) | 0.006766 |
def close(self):
"""
Closes the connection to this hypervisor (but leave it running).
"""
yield from self.send("hypervisor close")
self._writer.close()
self._reader, self._writer = None | 0.008547 |
def users_changed_handler(stream):
"""
Sends connected client list of currently active users in the chatroom
"""
while True:
yield from stream.get()
# Get list list of current active users
users = [
{'username': username, 'uuid': uuid_str}
for username, uuid_str in ws_connections.values()
]
# Make packet with list of new users (sorted by username)
packet = {
'type': 'users-changed',
'value': sorted(users, key=lambda i: i['username'])
}
logger.debug(packet)
yield from fanout_message(ws_connections.keys(), packet) | 0.001484 |
def _parse_eloss(line, lines):
"""Parse Energy [eV] eloss_xx eloss_zz"""
split_line = line.split()
energy = float(split_line[0])
eloss_xx = float(split_line[1])
eloss_zz = float(split_line[2])
return {"energy": energy, "eloss_xx": eloss_xx, "eloss_zz": eloss_zz} | 0.003367 |
def pre(self):
"""
Pre-order search of the tree rooted at this node.
(First visit current node, then visit children.)
:rtype: generator of :class:`~aeneas.tree.Tree`
"""
yield self
for node in self.children:
for v in node.pre:
yield v | 0.00627 |
def get_page_full(self, page_id):
""" Get full page info and full html code """
try:
result = self._request('/getpagefull/',
{'pageid': page_id})
return TildaPage(**result)
except NetworkError:
return [] | 0.006711 |
def adjustSizeConstraint(self):
"""
Adjusts the min/max size based on the current tab.
"""
widget = self.currentWidget()
if not widget:
return
offw = 4
offh = 4
#if self.tabBar().isVisible():
# offh += 20 # tab bar height
minw = min(widget.minimumWidth() + offw, MAX_INT)
minh = min(widget.minimumHeight() + offh, MAX_INT)
maxw = min(widget.maximumWidth() + offw, MAX_INT)
maxh = min(widget.maximumHeight() + offh, MAX_INT)
self.setMinimumSize(minw, minh)
self.setMaximumSize(maxw, maxh)
self.setSizePolicy(widget.sizePolicy()) | 0.009777 |
def resize_to_shape(data, shape, zoom=None, mode='nearest', order=0):
"""
Function resize input data to specific shape.
:param data: input 3d array-like data
:param shape: shape of output data
:param zoom: zoom is used for back compatibility
:mode: default is 'nearest'
"""
# @TODO remove old code in except part
try:
# rint 'pred vyjimkou'
# aise Exception ('test without skimage')
# rint 'za vyjimkou'
import skimage
import skimage.transform
# Now we need reshape seeds and segmentation to original size
segm_orig_scale = skimage.transform.resize(
data, shape, order=0,
preserve_range=True,
mode="constant",
)
segmentation = segm_orig_scale
logger.debug('resize to orig with skimage')
except:
import scipy
import scipy.ndimage
dtype = data.dtype
if zoom is None:
zoom = shape / np.asarray(data.shape).astype(np.double)
segm_orig_scale = scipy.ndimage.zoom(
data,
1.0 / zoom,
mode=mode,
order=order
).astype(dtype)
logger.debug('resize to orig with scipy.ndimage')
# @TODO odstranit hack pro oříznutí na stejnou velikost
# v podstatě je to vyřešeno, ale nechalo by se to dělat elegantněji v zoom
# tam je bohužel patrně bug
# rint 'd3d ', self.data3d.shape
# rint 's orig scale shape ', segm_orig_scale.shape
shp = [
np.min([segm_orig_scale.shape[0], shape[0]]),
np.min([segm_orig_scale.shape[1], shape[1]]),
np.min([segm_orig_scale.shape[2], shape[2]]),
]
# elf.data3d = self.data3d[0:shp[0], 0:shp[1], 0:shp[2]]
# mport ipdb; ipdb.set_trace() # BREAKPOINT
segmentation = np.zeros(shape, dtype=dtype)
segmentation[
0:shp[0],
0:shp[1],
0:shp[2]] = segm_orig_scale[0:shp[0], 0:shp[1], 0:shp[2]]
del segm_orig_scale
return segmentation | 0.000973 |
def reference_journal(self, index):
"""Return the reference journal name."""
# TODO Change the column name 'Journal' to an other?
ref_type = self.reference_type(index)
if ref_type == "journalArticle":
return self.reference_data(index)["publicationTitle"]
else:
return "({})".format(ref_type) | 0.005634 |
def elapsed():
"""
Displays the elapsed time since the step started running.
"""
environ.abort_thread()
step = _cd.project.get_internal_project().current_step
r = _get_report()
r.append_body(render.elapsed_time(step.elapsed_time))
result = '[ELAPSED]: {}\n'.format(timedelta(seconds=step.elapsed_time))
r.stdout_interceptor.write_source(result) | 0.002625 |
def set_minimum_level(self, level=0, stdoutFlag=True, fileFlag=True):
"""
Set the minimum logging level. All levels below the minimum will be ignored at logging.
:Parameters:
#. level (None, number, str): The minimum level of logging.
If None, minimum level checking is left out.
If str, it must be a defined logtype and therefore the minimum level would be the level of this logtype.
#. stdoutFlag (boolean): Whether to apply this minimum level to standard output logging.
#. fileFlag (boolean): Whether to apply this minimum level to file logging.
"""
# check flags
assert isinstance(stdoutFlag, bool), "stdoutFlag must be boolean"
assert isinstance(fileFlag, bool), "fileFlag must be boolean"
if not (stdoutFlag or fileFlag):
return
# check level
if level is not None:
if isinstance(level, basestring):
level = str(level)
assert level in self.__logTypeStdoutFlags.keys(), "level '%s' given as string, is not defined logType" %level
level = self.__logTypeLevels[level]
assert _is_number(level), "level must be a number"
level = float(level)
if stdoutFlag:
if self.__stdoutMaxLevel is not None:
assert level<=self.__stdoutMaxLevel, "stdoutMinLevel must be smaller or equal to stdoutMaxLevel %s"%self.__stdoutMaxLevel
if fileFlag:
if self.__fileMaxLevel is not None:
assert level<=self.__fileMaxLevel, "fileMinLevel must be smaller or equal to fileMaxLevel %s"%self.__fileMaxLevel
# set flags
if stdoutFlag:
self.__stdoutMinLevel = level
self.__update_stdout_flags()
if fileFlag:
self.__fileMinLevel = level
self.__update_file_flags() | 0.007216 |
def store(self, name, value, atype, new_name=None, multiplier=None, allowed_values=None):
''' store a config value in a dictionary, these values are used to populate a trasnfer spec
validation -- check type, check allowed values and rename if required '''
if value is not None:
_bad_type = (not isinstance(value, atype))
if not _bad_type:
# special case
_bad_type = (isinstance(value, bool) and atype == int)
if _bad_type:
# could be a special value
if allowed_values and value in allowed_values:
allowed_values = None
else:
raise ValueError("%s should be value of type (%s)" % (name, atype.__name__))
if allowed_values:
if isinstance(value, str):
if value not in allowed_values:
raise ValueError("%s can be %s" % (name, allowed_values))
elif isinstance(value, int):
if isinstance(allowed_values[0], int):
if value < allowed_values[0]:
raise ValueError("%s must be >= %d" % (name, allowed_values[0]))
_val = value if not multiplier else (multiplier * value)
_name = name if not new_name else new_name
self._dict[_name] = _val | 0.005682 |
def add_header(self, name, value):
'''Attach an email header to send with the message.
:param name: The name of the header value.
:param value: The header value.
'''
if self.headers is None:
self.headers = []
self.headers.append(dict(Name=name, Value=value)) | 0.00627 |
def write_bytes(self, addr, buf):
"""Write many bytes to the specified device. buf is a bytearray"""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
self._select_device(addr)
self._device.write(buf) | 0.010909 |
def _get_nodal_planes_from_ndk_string(self, ndk_string):
"""
Reads the nodal plane information (represented by 5th line [57:] of the
tensor representation) and returns an instance of the GCMTNodalPlanes
class
"""
planes = GCMTNodalPlanes()
planes.nodal_plane_1 = {'strike': float(ndk_string[0:3]),
'dip': float(ndk_string[3:6]),
'rake': float(ndk_string[6:11])}
planes.nodal_plane_2 = {'strike': float(ndk_string[11:15]),
'dip': float(ndk_string[15:18]),
'rake': float(ndk_string[18:])}
return planes | 0.002857 |
def build_tensor_serving_input_receiver_fn(shape, dtype=tf.float32,
batch_size=1):
"""Returns a input_receiver_fn that can be used during serving.
This expects examples to come through as float tensors, and simply
wraps them as TensorServingInputReceivers.
Arguably, this should live in tf.estimator.export. Testing here first.
Args:
shape: list representing target size of a single example.
dtype: the expected datatype for the input example
batch_size: number of input tensors that will be passed for prediction
Returns:
A function that itself returns a TensorServingInputReceiver.
"""
def serving_input_receiver_fn():
# Prep a placeholder where the input example will be fed in
features = tf.placeholder(
dtype=dtype, shape=[batch_size] + shape, name='input_tensor')
return tf.estimator.export.TensorServingInputReceiver(
features=features, receiver_tensors=features)
return serving_input_receiver_fn | 0.003945 |
def merge_leading_dims(array_or_tensor, n_dims=2):
"""Merge the first dimensions of a tensor.
Args:
array_or_tensor: Tensor to have its first dimensions merged. Can also
be an array or numerical value, which will be converted to a tensor
for batch application, if needed.
n_dims: Number of dimensions to merge.
Returns:
Either the input value converted to a Tensor, with the requested dimensions
merged, or the unmodified input value if the input has less than `n_dims`
dimensions.
Raises:
ValueError: If the rank of `array_or_tensor` is not well-defined.
"""
tensor = tf.convert_to_tensor(array_or_tensor)
tensor_shape_static = tensor.get_shape()
# Check if the rank of the input tensor is well-defined.
if tensor_shape_static.dims is None:
raise ValueError("Can't merge leading dimensions of tensor of unknown "
"rank.")
tensor_shape_list = tensor_shape_static.as_list()
# We can only merge the n_dims leading dimensions if the rank of the given
# tensor is sufficiently large.
if n_dims > len(tensor_shape_list):
return array_or_tensor
if tensor_shape_static.is_fully_defined():
new_shape = (
[np.prod(tensor_shape_list[:n_dims])] + tensor_shape_list[n_dims:])
return tf.reshape(tensor, new_shape)
# Shape can't be inferred statically.
tensor_shape = tf.shape(tensor)
new_first_dim = tf.reduce_prod(tensor_shape[:n_dims], keepdims=True)
other_dims = tensor_shape[n_dims:]
new_size = tf.concat([new_first_dim, other_dims], 0)
result = tf.reshape(tensor, new_size)
if all(value is not None for value in tensor_shape_list[:n_dims]):
merged_leading_size = np.prod(tensor_shape_list[:n_dims])
else:
merged_leading_size = None
# We need to set the result size of this, as otherwise we won't be able to
# pass to e.g. a Linear. Here we need to know at least the rank of the tensor.
result.set_shape([merged_leading_size] + tensor_shape_list[n_dims:])
return result | 0.011917 |
def filter(self, **kwargs):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
kwargs: valid FILTER_VALIDATOR arguments.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.filter(has_funder='true', has_license='true')
>>> for item in query:
... print(item['title'])
...
['Design of smiling-face-shaped band-notched UWB antenna']
['Phase I clinical and pharmacokinetic study of PM01183 (a tetrahydroisoquinoline, Lurbinectedin) in combination with gemcitabine in patients with advanced solid tumors']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
for fltr, value in kwargs.items():
decoded_fltr = fltr.replace('__', '.').replace('_', '-')
if decoded_fltr not in self.FILTER_VALIDATOR.keys():
raise UrlSyntaxError(
'Filter %s specified but there is no such filter for this route. Valid filters for this route are: %s' % (
str(decoded_fltr),
', '.join(self.FILTER_VALIDATOR.keys())
)
)
if self.FILTER_VALIDATOR[decoded_fltr] is not None:
self.FILTER_VALIDATOR[decoded_fltr](str(value))
if 'filter' not in request_params:
request_params['filter'] = decoded_fltr + ':' + str(value)
else:
request_params['filter'] += ',' + decoded_fltr + ':' + str(value)
return self.__class__(request_url, request_params, context, self.etiquette) | 0.002914 |
def next_media_partname(self, ext):
"""Return |PackURI| instance for next available media partname.
Partname is first available, starting at sequence number 1. Empty
sequence numbers are reused. *ext* is used as the extension on the
returned partname.
"""
def first_available_media_idx():
media_idxs = sorted([
part.partname.idx for part in self.iter_parts()
if part.partname.startswith('/ppt/media/media')
])
for i, media_idx in enumerate(media_idxs):
idx = i + 1
if idx < media_idx:
return idx
return len(media_idxs)+1
idx = first_available_media_idx()
return PackURI('/ppt/media/media%d.%s' % (idx, ext)) | 0.002484 |
def move_to(self, folder_id):
"""
:param str folder_id: The Calendar ID to where you want to move the event to.
Moves an event to a different folder (calendar). ::
event = service.calendar().get_event(id='KEY HERE')
event.move_to(folder_id='NEW CALENDAR KEY HERE')
"""
if not folder_id:
raise TypeError(u"You can't move an event to a non-existant folder")
if not isinstance(folder_id, BASESTRING_TYPES):
raise TypeError(u"folder_id must be a string")
if not self.id:
raise TypeError(u"You can't move an event that hasn't been created yet.")
self.refresh_change_key()
response_xml = self.service.send(soap_request.move_event(self, folder_id))
new_id, new_change_key = self._parse_id_and_change_key_from_response(response_xml)
if not new_id:
raise ValueError(u"MoveItem returned success but requested item not moved")
self._id = new_id
self._change_key = new_change_key
self.calendar_id = folder_id
return self | 0.007921 |
def withdraw(self, uuid, organization, from_date=None, to_date=None):
"""Withdraw a unique identity from an organization.
This method removes all the enrollments between the unique identity,
identified by <uuid>, and <organization>. Both entities must exist
on the registry before being deleted.
When a period of time is given using either <from_date> and <to_date>
parameters, it will remove those enrollments which their periods fall
between these two parameters. Default values for these dates
are '1900-01-01' and '2100-01-01'.
:param uuid: unique identifier
:param organization: name of the organization
:param from_date: date when the enrollment starts
:param to_date: date when the enrollment ends
"""
# Empty or None values for uuid and organizations are not allowed,
# so do nothing
if not uuid or not organization:
return CMD_SUCCESS
try:
api.delete_enrollment(self.db, uuid, organization, from_date, to_date)
except (NotFoundError, InvalidValueError) as e:
self.error(str(e))
return e.code
return CMD_SUCCESS | 0.002451 |
def dict_cat(net, define_cat_colors=False):
'''
make a dictionary of node-category associations
'''
# print('---------------------------------')
# print('---- dict_cat: before setting cat colors')
# print('---------------------------------\n')
# print(define_cat_colors)
# print(net.viz['cat_colors'])
net.persistent_cat = True
for inst_rc in ['row', 'col']:
inst_keys = list(net.dat['node_info'][inst_rc].keys())
all_cats = [x for x in inst_keys if 'cat-' in x]
for inst_name_cat in all_cats:
dict_cat = {}
tmp_cats = net.dat['node_info'][inst_rc][inst_name_cat]
tmp_nodes = net.dat['nodes'][inst_rc]
for i in range(len(tmp_cats)):
inst_cat = tmp_cats[i]
inst_node = tmp_nodes[i]
if inst_cat not in dict_cat:
dict_cat[inst_cat] = []
dict_cat[inst_cat].append(inst_node)
tmp_name = 'dict_' + inst_name_cat.replace('-', '_')
net.dat['node_info'][inst_rc][tmp_name] = dict_cat
# merge with old cat_colors by default
cat_colors = net.viz['cat_colors']
if define_cat_colors == True:
cat_number = 0
for inst_rc in ['row', 'col']:
inst_keys = list(net.dat['node_info'][inst_rc].keys())
all_cats = [x for x in inst_keys if 'cat-' in x]
for cat_index in all_cats:
if cat_index not in cat_colors[inst_rc]:
cat_colors[inst_rc][cat_index] = {}
cat_names = sorted(list(set(net.dat['node_info'][inst_rc][cat_index])))
# loop through each category name and assign a color
for tmp_name in cat_names:
# using the same rules as the front-end to define cat_colors
inst_color = get_cat_color(cat_number + cat_names.index(tmp_name))
check_name = tmp_name
# check if category is string type and non-numeric
try:
float(check_name)
is_string_cat = False
except:
is_string_cat = True
if is_string_cat == True:
# check for default non-color
if ': ' in check_name:
check_name = check_name.split(': ')[1]
# if check_name == 'False' or check_name == 'false':
if 'False' in check_name or 'false' in check_name:
inst_color = '#eee'
if 'Not ' in check_name:
inst_color = '#eee'
# print('cat_colors')
# print('----------')
# print(cat_colors[inst_rc][cat_index])
# do not overwrite old colors
if tmp_name not in cat_colors[inst_rc][cat_index] and is_string_cat:
cat_colors[inst_rc][cat_index][tmp_name] = inst_color
# print('overwrite: ' + tmp_name + ' -> ' + str(inst_color))
cat_number = cat_number + 1
net.viz['cat_colors'] = cat_colors | 0.01492 |
def scan_build_files(project_tree, base_relpath, build_ignore_patterns=None):
"""Looks for all BUILD files
:param project_tree: Project tree to scan in.
:type project_tree: :class:`pants.base.project_tree.ProjectTree`
:param base_relpath: Directory under root_dir to scan.
:param build_ignore_patterns: .gitignore like patterns to exclude from BUILD files scan.
:type build_ignore_patterns: pathspec.pathspec.PathSpec
"""
if base_relpath and os.path.isabs(base_relpath):
raise BuildFile.BadPathError('base_relpath parameter ({}) should be a relative path.'
.format(base_relpath))
if base_relpath and not project_tree.isdir(base_relpath):
raise BuildFile.BadPathError('Can only scan directories and {0} is not a valid dir.'
.format(base_relpath))
if build_ignore_patterns and not isinstance(build_ignore_patterns, PathSpec):
raise TypeError("build_ignore_patterns should be pathspec.pathspec.PathSpec instance, "
"instead {} was given.".format(type(build_ignore_patterns)))
build_files = set()
for root, dirs, files in project_tree.walk(base_relpath or '', topdown=True):
excluded_dirs = list(build_ignore_patterns.match_files('{}/'.format(os.path.join(root, dirname))
for dirname in dirs))
for subdir in excluded_dirs:
# Remove trailing '/' from paths which were added to indicate that paths are paths to directories.
dirs.remove(fast_relpath(subdir, root)[:-1])
for filename in files:
if BuildFile._is_buildfile_name(filename):
build_files.add(os.path.join(root, filename))
return BuildFile._build_files_from_paths(project_tree, build_files, build_ignore_patterns) | 0.010332 |
def render(self, *args, **kwargs):
"""Override of the rendering so that if the link have no text in it, the href is used inside the <a> tag"""
if not self.childs and "href" in self.attrs:
return self.clone()(self.attrs["href"]).render(*args, **kwargs)
return super().render(*args, **kwargs) | 0.009202 |
def save(self, filename=None, format=None, path=None,
width=None, height=None, units='in',
dpi=None, limitsize=True, verbose=True, **kwargs):
"""
Save a ggplot object as an image file
Parameters
----------
filename : str, optional
File name to write the plot to. If not specified, a name
like “plotnine-save-<hash>.<format>” is used.
format : str
Image format to use, automatically extract from
file name extension.
path : str
Path to save plot to (if you just want to set path and
not filename).
width : number, optional
Width (defaults to value set by the theme). If specified
the `height` must also be given.
height : number, optional
Height (defaults to value set by the theme). If specified
the `width` must also be given.
units : str
Units for width and height when either one is explicitly
specified (in, cm, or mm).
dpi : float
DPI to use for raster graphics. If None, defaults to using
the `dpi` of theme, if none is set then a `dpi` of 100.
limitsize : bool
If ``True`` (the default), ggsave will not save images
larger than 50x50 inches, to prevent the common error
of specifying dimensions in pixels.
verbose : bool
If ``True``, print the saving information.
kwargs : dict
Additional arguments to pass to matplotlib `savefig()`.
"""
fig_kwargs = {'bbox_inches': 'tight', # 'tight' is a good default
'format': format}
fig_kwargs.update(kwargs)
figure = [None] # nonlocal
# filename, depends on the object
if filename is None:
ext = format if format else 'pdf'
filename = self._save_filename(ext)
if path:
filename = os.path.join(path, filename)
# Preserve the users object
self = deepcopy(self)
# theme
self.theme = self.theme or theme_get()
# The figure size should be known by the theme
if width is not None and height is not None:
width = to_inches(width, units)
height = to_inches(height, units)
self += theme(figure_size=(width, height))
elif (width is None and height is not None or
width is not None and height is None):
raise PlotnineError(
"You must specify both width and height")
width, height = self.theme.themeables.property('figure_size')
if limitsize and (width > 25 or height > 25):
raise PlotnineError(
"Dimensions (width={}, height={}) exceed 25 inches "
"(height and width are specified in inches/cm/mm, "
"not pixels). If you are sure you want these "
"dimensions, use 'limitsize=False'.".format(width, height))
if dpi is None:
try:
self.theme.themeables.property('dpi')
except KeyError:
self.theme = self.theme + theme(dpi=100)
else:
self.theme = self.theme + theme(dpi=dpi)
if verbose:
warn("Saving {0} x {1} {2} image.".format(
from_inches(width, units),
from_inches(height, units), units), PlotnineWarning)
warn('Filename: {}'.format(filename), PlotnineWarning)
# Helper function so that we can clean up when it fails
def _save():
fig = figure[0] = self.draw()
# savefig ignores the figure face & edge colors
facecolor = fig.get_facecolor()
edgecolor = fig.get_edgecolor()
if edgecolor:
fig_kwargs['facecolor'] = facecolor
if edgecolor:
fig_kwargs['edgecolor'] = edgecolor
fig_kwargs['frameon'] = True
fig.savefig(filename, **fig_kwargs)
try:
_save()
except Exception as err:
figure[0] and plt.close(figure[0])
raise err
else:
figure[0] and plt.close(figure[0]) | 0.000933 |
def from_xy(cls, x_array, y_array):
""" Create a dataset from two arrays of data.
:note: infering the dimensions for the first elements of each array.
"""
if len(x_array) == 0:
raise ValueError("data array is empty.")
dim_x, dim_y = len(x_array[0]), len(y_array[0])
dataset = cls(dim_x, dim_y)
for x, y in zip(x_array, y_array):
assert len(x) == dim_x and len(y) == dim_y
dataset.add_xy(x, y)
return dataset | 0.005859 |
def get_variogram_points(self):
"""Returns both the lags and the variogram function evaluated at each
of them.
The evaluation of the variogram function and the lags are produced
internally. This method is convenient when the user wants to access to
the lags and the resulting variogram (according to the model provided)
for further analysis.
Returns
-------
(tuple) tuple containing:
lags (array) - the lags at which the variogram was evaluated
variogram (array) - the variogram function evaluated at the lags
"""
return self.lags, self.variogram_function(self.variogram_model_parameters, self.lags) | 0.004121 |
def _calculate_python_sources(self, targets):
"""Generate a set of source files from the given targets."""
python_eval_targets = filter(self.is_non_synthetic_python_target, targets)
sources = set()
for target in python_eval_targets:
sources.update(
source for source in target.sources_relative_to_buildroot()
if os.path.splitext(source)[1] == self._PYTHON_SOURCE_EXTENSION
)
return list(sources) | 0.004515 |
def _get_mirror_urls(self, mirrors=None, main_mirror_url=None):
"""Retrieves a list of URLs from the main mirror DNS entry
unless a list of mirror URLs are passed.
"""
if not mirrors:
mirrors = get_mirrors(main_mirror_url)
# Should this be made "less random"? E.g. netselect like?
random.shuffle(mirrors)
mirror_urls = set()
for mirror_url in mirrors:
# Make sure we have a valid URL
if not ("http://" or "https://" or "file://") in mirror_url:
mirror_url = "http://%s" % mirror_url
if not mirror_url.endswith("/simple"):
mirror_url = "%s/simple/" % mirror_url
mirror_urls.add(mirror_url)
return list(mirror_urls) | 0.002541 |
def get_magnitude_term(self, C, rup):
"""
Returns the magnitude scaling term in equation 3
"""
b0, stress_drop = self._get_sof_terms(C, rup.rake)
if rup.mag <= C["m1"]:
return b0
else:
# Calculate moment (equation 5)
m_0 = 10.0 ** (1.5 * rup.mag + 16.05)
# Get stress-drop scaling (equation 6)
if rup.mag > C["m2"]:
stress_drop += (C["b2"] * (C["m2"] - self.CONSTANTS["mstar"]) +
(C["b3"] * (rup.mag - C["m2"])))
else:
stress_drop += (C["b2"] * (rup.mag - self.CONSTANTS["mstar"]))
stress_drop = np.exp(stress_drop)
# Get corner frequency (equation 4)
f0 = 4.9 * 1.0E6 * 3.2 * ((stress_drop / m_0) ** (1. / 3.))
return 1. / f0 | 0.002331 |
def deep_compare(self, other, settings):
"""
Compares each field of the name one at a time to see if they match.
Each name field has context-specific comparison logic.
:param Name other: other Name for comparison
:return bool: whether the two names are compatible
"""
if not self._is_compatible_with(other):
return False
first, middle, last = self._compare_components(other, settings)
return first and middle and last | 0.00396 |
def peep_port(paths):
"""Convert a peep requirements file to one compatble with pip-8 hashing.
Loses comments and tromps on URLs, so the result will need a little manual
massaging, but the hard part--the hash conversion--is done for you.
"""
if not paths:
print('Please specify one or more requirements files so I have '
'something to port.\n')
return COMMAND_LINE_ERROR
comes_from = None
for req in chain.from_iterable(
_parse_requirements(path, package_finder(argv)) for path in paths):
req_path, req_line = path_and_line(req)
hashes = [hexlify(urlsafe_b64decode((hash + '=').encode('ascii'))).decode('ascii')
for hash in hashes_above(req_path, req_line)]
if req_path != comes_from:
print()
print('# from %s' % req_path)
print()
comes_from = req_path
if not hashes:
print(req.req)
else:
print('%s' % (req.link if getattr(req, 'link', None) else req.req), end='')
for hash in hashes:
print(' \\')
print(' --hash=sha256:%s' % hash, end='')
print() | 0.002479 |
def _parse_single_video(self, example_proto):
"""Parses single video from the input tfrecords.
Args:
example_proto: tfExample proto with a single video.
Returns:
dict with all frames, positions and actions.
"""
context_features = {
"game_duration_loops": tf.io.FixedLenFeature([1], tf.int64),
"game_duration_seconds": tf.io.FixedLenFeature([1], tf.float32),
"n_steps": tf.io.FixedLenFeature([1], tf.int64),
"screen_size": tf.io.FixedLenFeature([2], tf.int64),
}
sequence_features = {
"rgb_screen": tf.io.FixedLenSequenceFeature([], tf.string),
}
_, seq_feat = tf.io.parse_single_sequence_example(
example_proto,
context_features=context_features,
sequence_features=sequence_features)
video_frames = tf.map_fn(
tf.image.decode_png, seq_feat["rgb_screen"], dtype=tf.uint8)
return video_frames | 0.001083 |
def get_forward_returns_columns(columns):
"""
Utility that detects and returns the columns that are forward returns
"""
pattern = re.compile(r"^(\d+([Dhms]|ms|us|ns))+$", re.IGNORECASE)
valid_columns = [(pattern.match(col) is not None) for col in columns]
return columns[valid_columns] | 0.003236 |
def write_files(self):
"""
write all data out into er_* and pmag_* files as appropriate
"""
warnings = self.validate_data()
print('-I- Writing all saved data to files')
if self.measurements:
self.write_measurements_file()
for dtype in ['specimen', 'sample', 'site']:
if self.data_lists[dtype][0]:
do_pmag = dtype in self.incl_pmag_data
self.write_magic_file(dtype, do_er=True, do_pmag=do_pmag)
if not do_pmag:
pmag_file = os.path.join(self.WD, 'pmag_' + dtype + 's.txt')
if os.path.isfile(pmag_file):
os.remove(pmag_file)
if self.locations:
self.write_magic_file('location', do_er=True, do_pmag=False)
self.write_age_file()
if self.results:
self.write_result_file()
if warnings:
print('-W- ' + str(warnings))
return False, warnings
return True, None | 0.002899 |
def scan(self, cursor=0, match=None, count=None):
"""
Incrementally return lists of key names. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
f = Future()
if self.keyspace is None:
with self.pipe as pipe:
res = pipe.scan(cursor=cursor, match=match, count=count)
def cb():
f.set((res[0], [self.keyparse.decode(v) for v in res[1]]))
pipe.on_execute(cb)
return f
if match is None:
match = '*'
match = "%s{%s}" % (self.keyspace, match)
pattern = re.compile(r'^%s\{(.*)\}$' % self.keyspace)
with self.pipe as pipe:
res = pipe.scan(cursor=cursor, match=match, count=count)
def cb():
keys = []
for k in res[1]:
k = self.keyparse.decode(k)
m = pattern.match(k)
if m:
keys.append(m.group(1))
f.set((res[0], keys))
pipe.on_execute(cb)
return f | 0.00161 |
def simultaneous_nlsq_fit(xs, ys, dys, func, params_inits, verbose=False,
**kwargs):
"""Do a simultaneous nonlinear least-squares fit
Input:
------
`xs`: tuple of abscissa vectors (1d numpy ndarrays)
`ys`: tuple of ordinate vectors (1d numpy ndarrays)
`dys`: tuple of the errors of ordinate vectors (1d numpy ndarrays or Nones)
`func`: fitting function (the same for all the datasets)
`params_init`: tuples of *lists* or *tuples* (not numpy ndarrays!) of the
initial values of the parameters to be fitted. The special value `None`
signifies that the corresponding parameter is the same as in the
previous dataset. Of course, none of the parameters of the first dataset
can be None.
`verbose`: if various messages useful for debugging should be printed on
stdout.
additional keyword arguments get forwarded to nlsq_fit()
Output:
-------
`p`: tuple of a list of fitted parameters
`dp`: tuple of a list of errors of the fitted parameters
`statdict`: statistics dictionary. This is of the same form as in
`nlsq_fit` except that func_value is a sequence of one-dimensional
np.ndarrays containing the best-fitting function values for each curve.
"""
if not isinstance(xs, collections.Sequence) or \
not isinstance(ys, collections.Sequence) or \
not isinstance(dys, collections.Sequence) or \
not isinstance(params_inits, collections.Sequence):
raise ValueError('Parameters `xs`, `ys`, `dys` and `params_inits` should be tuples or lists.')
Ndata = len(xs)
if len(ys) != Ndata or len(dys) != Ndata or len(params_inits) != Ndata:
raise ValueError('Parameters `xs`, `ys`, `dys` and `params_inits` should have the same length.')
if not all([isinstance(x, collections.Sequence) for x in params_inits]):
raise ValueError('Elements of `params_inits` should be tuples or Python lists.')
Ns = set([len(x) for x in params_inits])
if len(Ns) != 1:
raise ValueError('Elements of `params_inits` should have the same length.')
Npar = Ns.pop()
for i in range(Ndata):
if dys[i] is None:
dys[i] = np.ones(len(xs[i]), np.double) * np.nan
# concatenate the x, y and dy vectors
xcat = np.concatenate(xs)
ycat = np.concatenate(ys)
dycat = np.concatenate(dys)
# find the start and end indices for each dataset in the concatenated datasets.
lens = [len(x) for x in xs]
starts = [int(sum(lens[:i])) for i in range(len(lens))]
ends = [int(sum(lens[:i + 1])) for i in range(len(lens))]
# flatten the initial parameter list. A single list is needed, where the
# constrained parameters occur only once. Of course, we have to do some
# bookkeeping to be able to find the needed parameters for each sub-range
# later during the fit.
paramcat = [] # this will be the concatenated list of parameters
param_indices = [] # this will have the same structure as params_inits (i.e.
# a tuple of tuples of ints). Each tuple corresponds to a dataset.
# Each integer number in each tuple holds
# the index of the corresponding fit parameter in the
# concatenated parameter list.
for j in range(Ndata): # for each dataset
param_indices.append([])
jorig = j
for i in range(Npar):
j = jorig
while params_inits[j][i] is None and (j >= 0):
j = j - 1
if j < 0:
raise ValueError('None of the parameters in the very first dataset should be `None`.')
if jorig == j: # not constrained parameter
paramcat.append(params_inits[j][i])
param_indices[jorig].append(len(paramcat) - 1)
else:
param_indices[jorig].append(param_indices[j][i])
if verbose:
print("Number of datasets for simultaneous fitting:", Ndata)
print("Total number of data points:", len(xcat))
print("Number of parameters in each dataset:", Npar)
print("Total number of parameters:", Ndata * Npar)
print("Number of independent parameters:", len(paramcat))
# the flattened function
def func_flat(x, *params):
y = []
for j in range(Ndata):
if verbose > 1:
print("Simultaneous fitting: evaluating function for dataset #", j, "/", Ndata)
pars = [params[i] for i in param_indices[j]]
y.append(func(x[starts[j]:ends[j]], *pars))
return np.concatenate(tuple(y))
# Now we reduced the problem to a single least-squares fit. Carry it out and
# interpret the results.
pflat, dpflat, statdictflat = nlsq_fit(xcat, ycat, dycat, func_flat, paramcat, verbose, **kwargs)
for n in ['func_value', 'R2', 'Chi2', 'Chi2_reduced', 'DoF', 'Covariance', 'Correlation_coeffs']:
statdictflat[n + '_global'] = statdictflat[n]
statdictflat[n] = []
p = []
dp = []
for j in range(Ndata): # unpack the results
p.append([pflat[i] for i in param_indices[j]])
dp.append([dpflat[i] for i in param_indices[j]])
statdictflat['func_value'].append(statdictflat['func_value_global'][starts[j]:ends[j]])
if np.isfinite(dys[j]).all():
statdictflat['Chi2'].append((((statdictflat['func_value'][-1] - ys[j]) / dys[j]) ** 2).sum())
sstot = np.sum((ys[j] - np.mean(ys[j])) ** 2 / dys[j] ** 2)
else:
statdictflat['Chi2'].append(((statdictflat['func_value'][-1] - ys[j]) ** 2).sum())
sstot = np.sum((ys[j] - np.mean(ys[j])) ** 2)
sserr = statdictflat['Chi2'][-1]
statdictflat['R2'].append(1 - sserr / sstot)
statdictflat['DoF'].append(len(xs[j] - len(p[-1])))
statdictflat['Covariance'].append(slice_covarmatrix(statdictflat['Covariance_global'], param_indices[j]))
statdictflat['Correlation_coeffs'].append(slice_covarmatrix(statdictflat['Correlation_coeffs_global'], param_indices[j]))
statdictflat['Chi2_reduced'].append(statdictflat['Chi2'][-1] / statdictflat['DoF'][-1])
return p, dp, statdictflat | 0.004358 |
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
C = self.COEFFS[imt]
# clip rhypo at 10 (this is the minimum distance used in
# deriving the equation), see page 22, this avoids singularity
# in mean value equation
rhypo = dists.rhypo.copy()
rhypo[rhypo < 10] = 10
# convert magnitude from Mblg to Mw
mag = rup.mag * 0.98 - 0.39 if rup.mag <= 5.5 else \
2.715 - 0.277 * rup.mag + 0.127 * rup.mag * rup.mag
# functional form as explained in 'Youngs_fit_to_AB95lookup.doc'
f1 = np.minimum(np.log(rhypo), np.log(70.))
f2 = np.maximum(np.log(rhypo / 130.), 0)
mean = (
C['c1'] + C['c2'] * mag + C['c3'] * mag ** 2 +
(C['c4'] + C['c5'] * mag) * f1 +
(C['c6'] + C['c7'] * mag) * f2 +
C['c8'] * rhypo
)
stddevs = self._get_stddevs(stddev_types, dists.rhypo.shape[0])
return mean, stddevs | 0.002584 |
def complete_url(self, url):
""" Completes a given URL with this instance's URL base. """
if self.base_url:
return urlparse.urljoin(self.base_url, url)
else:
return url | 0.015625 |
def append_panel(panels, size_x, size_y, max_col=12):
"""
Appends a panel to the list of panels. Finds the highest palce at the left for the new panel.
:param panels:
:param size_x:
:param size_y:
:param max_col:
:return: a new panel or None if it is not possible to place a panel with such size_x
"""
bottom_lines = bottoms(panels)
shape = find_shape(bottom_lines, max_col)
lines = longest_lines(shape)
line = find_place(lines, size_x)
if not line:
return
panel = {
'col': line['col'],
'row': line['row'],
'size_x': size_x,
'size_y': size_y,
}
panels.append(panel)
return panel | 0.004373 |
def _get_json(value):
"""Convert the given value to a JSON object."""
if hasattr(value, 'replace'):
value = value.replace('\n', ' ')
try:
return json.loads(value)
except json.JSONDecodeError:
# Escape double quotes.
if hasattr(value, 'replace'):
value = value.replace('"', '\\"')
# try putting the value into a string
return json.loads('"{}"'.format(value)) | 0.002309 |
def mock_method(self, interface, dbus_method, in_signature, *args, **kwargs):
'''Master mock method.
This gets "instantiated" in AddMethod(). Execute the code snippet of
the method and return the "ret" variable if it was set.
'''
# print('mock_method', dbus_method, self, in_signature, args, kwargs, file=sys.stderr)
# convert types of arguments according to signature, using
# MethodCallMessage.append(); this will also provide type/length
# checks, except for the case of an empty signature
if in_signature == '' and len(args) > 0:
raise TypeError('Fewer items found in D-Bus signature than in Python arguments')
m = dbus.connection.MethodCallMessage('a.b', '/a', 'a.b', 'a')
m.append(signature=in_signature, *args)
args = m.get_args_list()
self.log(dbus_method + self.format_args(args))
self.call_log.append((int(time.time()), str(dbus_method), args))
self.MethodCalled(dbus_method, args)
# The code may be a Python 3 string to interpret, or may be a function
# object (if AddMethod was called from within Python itself, rather than
# over D-Bus).
code = self.methods[interface][dbus_method][2]
if code and isinstance(code, types.FunctionType):
return code(self, *args)
elif code:
loc = locals().copy()
exec(code, globals(), loc)
if 'ret' in loc:
return loc['ret'] | 0.0033 |
def iter_errors(self):
""""Lazily yields each ValidationError for the received data dict.
"""
# Deprecate
warnings.warn(
'Property "package.iter_errors" is deprecated.',
UserWarning)
return self.profile.iter_errors(self.to_dict()) | 0.006757 |
def getuname(self, uid):
"""
Get the username of a given uid.
"""
uid = int(uid)
try:
return self.uidsmap[uid]
except KeyError:
pass
try:
name = pwd.getpwuid(uid)[0]
except (KeyError, AttributeError):
name = "uid=%d" % uid
self.uidsmap[uid] = name
return name | 0.005141 |
def add_mapped_chain_ids(self, mapped_chains):
"""Add chains by ID into the mapped_chains attribute
Args:
mapped_chains (str, list): Chain ID or list of IDs
"""
mapped_chains = ssbio.utils.force_list(mapped_chains)
for c in mapped_chains:
if c not in self.mapped_chains:
self.mapped_chains.append(c)
log.debug('{}: added to list of mapped chains'.format(c))
else:
log.debug('{}: chain already in list of mapped chains, not adding'.format(c)) | 0.005282 |
def _check_inputs(self):
"""Check the inputs to ensure they are valid.
Returns
-------
status : bool
True if all inputs are valid, False if one is not.
"""
valid_detector = True
valid_filter = True
valid_date = True
# Determine the submitted detector is valid
if self.detector not in self._valid_detectors:
msg = ('{} is not a valid detector option.\n'
'Please choose one of the following:\n{}\n'
'{}'.format(self.detector,
'\n'.join(self._valid_detectors),
self._msg_div))
LOG.error(msg)
valid_detector = False
# Determine if the submitted filter is valid
if (self.filt is not None and valid_detector and
self.filt not in self.valid_filters[self.detector]):
msg = ('{} is not a valid filter for {}\n'
'Please choose one of the following:\n{}\n'
'{}'.format(self.filt, self.detector,
'\n'.join(self.valid_filters[self.detector]),
self._msg_div))
LOG.error(msg)
valid_filter = False
# Determine if the submitted date is valid
date_check = self._check_date()
if date_check is not None:
LOG.error('{}\n{}'.format(date_check, self._msg_div))
valid_date = False
if not valid_detector or not valid_filter or not valid_date:
return False
return True | 0.001235 |
def run_task(task, workspace):
"""
Runs the task and updates the workspace with results.
Parameters
----------
task - dict
Task Description
Examples:
{'task': task_func, 'inputs': ['a', 'b'], 'outputs': 'c'}
{'task': task_func, 'inputs': '*', 'outputs': '*'}
{'task': task_func, 'inputs': ['*','a'], 'outputs': 'b'}
Returns a new workspace with results
"""
data = copy.copy(workspace)
task = validate_task(task)
# Prepare input to task
inputs = [input_parser(key, data) for key in task.inputs]
if inspect.isgeneratorfunction(task.fn):
# Multiple output task
# Assuming number of outputs are equal to number of return values
data.update(zip(task.outputs, task.fn(*inputs)))
else:
# Single output task
results = task.fn(*inputs)
if task.outputs[0] != '*':
results = {task.outputs[0]: results}
elif not isinstance(results, dict):
raise TypeError('Result should be a dict for output type *')
data.update(results)
return data | 0.000913 |
def increment(cls, name):
"""Call this method to increment the named counter. This is atomic on
the database.
:param name:
Name for a previously created ``Counter`` object
"""
with transaction.atomic():
counter = Counter.objects.select_for_update().get(name=name)
counter.value += 1
counter.save()
return counter.value | 0.007177 |
def _set_ethernet(self, v, load=False):
"""
Setter method for ethernet, mapped from YANG variable /interface/ethernet/logical_interface/ethernet (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ethernet is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ethernet() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("instance_id",ethernet.ethernet, yang_name="ethernet", rest_name="ethernet", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-id', extensions={u'tailf-common': {u'info': u'Configure an Ethernet Logical Interface on this main interface', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_phy_lif', u'cli-mode-name': u'conf-if-eth-lif-$(instance-id)'}}), is_container='list', yang_name="ethernet", rest_name="ethernet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure an Ethernet Logical Interface on this main interface', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_phy_lif', u'cli-mode-name': u'conf-if-eth-lif-$(instance-id)'}}, namespace='urn:brocade.com:mgmt:brocade-lif', defining_module='brocade-lif', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ethernet must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("instance_id",ethernet.ethernet, yang_name="ethernet", rest_name="ethernet", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-id', extensions={u'tailf-common': {u'info': u'Configure an Ethernet Logical Interface on this main interface', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_phy_lif', u'cli-mode-name': u'conf-if-eth-lif-$(instance-id)'}}), is_container='list', yang_name="ethernet", rest_name="ethernet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure an Ethernet Logical Interface on this main interface', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_phy_lif', u'cli-mode-name': u'conf-if-eth-lif-$(instance-id)'}}, namespace='urn:brocade.com:mgmt:brocade-lif', defining_module='brocade-lif', yang_type='list', is_config=True)""",
})
self.__ethernet = t
if hasattr(self, '_set'):
self._set() | 0.003439 |
def refresh_signal_handler(self, signo, frame):
"""
This callback is called when SIGUSR1 signal is received.
It updates outputs of all modules by calling their `run` method.
Interval modules are updated in separate threads if their interval is
above a certain treshold value.
This treshold is computed by :func:`compute_treshold_interval` class
method.
The reasoning is that modules with larger intervals also usually take
longer to refresh their output and that their output is not required in
'real time'.
This also prevents possible lag when updating all modules in a row.
"""
if signo != signal.SIGUSR1:
return
for module in self.modules:
if hasattr(module, "interval"):
if module.interval > self.treshold_interval:
thread = Thread(target=module.run)
thread.start()
else:
module.run()
else:
module.run()
self.async_refresh() | 0.001823 |
def alexa(self) -> list:
"""Returns list of Amazon Alexa compatible states of the RichMessage
instance nested controls.
Returns:
alexa_controls: Amazon Alexa representation of RichMessage instance nested
controls.
"""
alexa_controls = [control.alexa() for control in self.controls]
return alexa_controls | 0.007895 |
def attribute_changed(self, node, column):
"""
Calls :meth:`QAbstractItemModel.dataChanged` with given Node attribute index.
:param node: Node.
:type node: AbstractCompositeNode or GraphModelNode
:param column: Attribute column.
:type column: int
:return: Method success.
:rtype: bool
"""
index = self.get_attribute_index(node, column)
if index is not None:
self.dataChanged.emit(index, index)
return True
else:
return False | 0.005376 |
def register_service(email, password, organisation_id, name=None,
service_type=None, accounts_url=None,
location=None, config=None):
"""Register a service with the accounts service
\b
EMAIL: a user's email
PASSWORD: a user's password
ORGANISATION_ID: ID of the service's parent organisation
"""
accounts_url = accounts_url or getattr(options, 'url_accounts', None)
name = name or getattr(options, 'name', None)
service_type = service_type or getattr(options, 'service_type', None)
location = location or ('https://localhost:' + str(getattr(options, 'port')))
config = config or 'config'
if not accounts_url:
raise click.ClickException(click.style('accounts_url not defined',
fg='red'))
if not name:
raise click.ClickException(click.style('service name not defined',
fg='red'))
if not service_type:
raise click.ClickException(click.style('service type not defined',
fg='red'))
try:
client = _get_accounts_client(accounts_url, email, password)
service_id = _create_service(client, organisation_id, name, location, service_type)
client_secret = _get_client_secret(client, service_id)
_update_local_conf(config, service_id, client_secret)
except httpclient.HTTPError as exc:
try:
msg = exc.response.body
except AttributeError:
msg = exc.message
raise click.ClickException(click.style(msg, fg='red'))
except socket.error as exc:
raise click.ClickException(click.style(exc.strerror, fg='red'))
click.echo(click.style('\n{} service registered\n'.format(name),
fg='green')) | 0.001609 |
def _dot_to_dec(ip, check=True):
"""Dotted decimal notation to decimal conversion."""
if check and not is_dot(ip):
raise ValueError('_dot_to_dec: invalid IP: "%s"' % ip)
octets = str(ip).split('.')
dec = 0
dec |= int(octets[0]) << 24
dec |= int(octets[1]) << 16
dec |= int(octets[2]) << 8
dec |= int(octets[3])
return dec | 0.00274 |
def open_file(self, store=current_store, use_seek=False):
"""Opens the file-like object which is a context manager
(that means it can used for :keyword:`with` statement).
If ``use_seek`` is :const:`True` (though :const:`False` by default)
it guarentees the returned file-like object is also seekable
(provides :meth:`~file.seek()` method).
:param store: the storage which contains image files.
:data:`~sqlalchemy_imageattach.context.current_store`
by default
:type store: :class:`~sqlalchemy_imageattach.store.Store`
:returns: the file-like object of the image, which is a context
manager (plus, also seekable only if ``use_seek``
is :const:`True`)
:rtype: :class:`file`,
:class:`~sqlalchemy_imageattach.file.FileProxy`,
file-like object
"""
if not isinstance(store, Store):
raise TypeError('store must be an instance of '
'sqlalchemy_imageattach.store.Store, not ' +
repr(store))
if Session.object_session(self) is None:
try:
file = self.file
except AttributeError:
raise IOError('no stored original image file')
return ReusableFileProxy(file)
return store.open(self, use_seek) | 0.001394 |
def should_audit(instance):
"""Returns True or False to indicate whether the instance
should be audited or not, depending on the project settings."""
# do not audit any model listed in UNREGISTERED_CLASSES
for unregistered_class in UNREGISTERED_CLASSES:
if isinstance(instance, unregistered_class):
return False
# only audit models listed in REGISTERED_CLASSES (if it's set)
if len(REGISTERED_CLASSES) > 0:
for registered_class in REGISTERED_CLASSES:
if isinstance(instance, registered_class):
break
else:
return False
# all good
return True | 0.001534 |
def shift_to_coords(self, pix, fill_value=np.nan):
"""Create a new map that is shifted to the pixel coordinates
``pix``."""
pix_offset = self.get_offsets(pix)
dpix = np.zeros(len(self.shape) - 1)
for i in range(len(self.shape) - 1):
x = self.rebin * (pix[i] - pix_offset[i + 1]
) + (self.rebin - 1.0) / 2.
dpix[i] = x - self._pix_ref[i]
pos = [pix_offset[i] + self.shape[i] // 2
for i in range(self.data.ndim)]
s0, s1 = utils.overlap_slices(self.shape_out, self.shape, pos)
k = np.zeros(self.data.shape)
for i in range(k.shape[0]):
k[i] = shift(self._data_spline[i], dpix, cval=np.nan,
order=2, prefilter=False)
for i in range(1, len(self.shape)):
k = utils.sum_bins(k, i, self.rebin)
k0 = np.ones(self.shape_out) * fill_value
if k[s1].size == 0 or k0[s0].size == 0:
return k0
k0[s0] = k[s1]
return k0 | 0.001907 |
def parse_sra(path_to_config):
"""
Parses genetorrent config file. Returns list of samples: [ [id1, id1 ], [id2, id2], ... ]
Returns duplicate of ids to follow UUID/URL standard.
"""
samples = []
with open(path_to_config, 'r') as f:
for line in f.readlines():
if not line.isspace():
samples.append(line.strip())
return samples | 0.005115 |
def list_functions(mod_name):
"""Lists all functions declared in a module.
http://stackoverflow.com/a/1107150/3004221
Args:
mod_name: the module name
Returns:
A list of functions declared in that module.
"""
mod = sys.modules[mod_name]
return [func.__name__ for func in mod.__dict__.values()
if is_mod_function(mod, func)] | 0.002632 |
def _perform_emulated_reset(self):
"""! @brief Emulate a software reset by writing registers.
All core registers are written to reset values. This includes setting the initial PC and SP
to values read from the vector table, which is assumed to be located at the based of the
boot memory region.
If the memory map does not provide a boot region, then the current value of the VTOR register
is reused, as it should at least point to a valid vector table.
The current value of DEMCR.VC_CORERESET determines whether the core will be resumed or
left halted.
Note that this reset method will not set DHCSR.S_RESET_ST or DFSR.VCATCH.
"""
# Halt the core before making changes.
self.halt()
bootMemory = self.memory_map.get_boot_memory()
if bootMemory is None:
# Reuse current VTOR value if we don't know the boot memory region.
vectorBase = self.read32(self.VTOR)
else:
vectorBase = bootMemory.start
# Read initial SP and PC.
initialSp = self.read32(vectorBase)
initialPc = self.read32(vectorBase + 4)
# Init core registers.
regList = ['r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10', 'r11', 'r12',
'psp', 'msp', 'lr', 'pc', 'xpsr', 'cfbp']
valueList = [0] * 13 + \
[
0, # PSP
initialSp, # MSP
0xffffffff, # LR
initialPc, # PC
0x01000000, # XPSR
0, # CFBP
]
if self.has_fpu:
regList += [('s%d' % n) for n in range(32)] + ['fpscr']
valueList += [0] * 33
self.write_core_registers_raw(regList, valueList)
# "Reset" SCS registers.
data = [
(self.ICSR_PENDSVCLR | self.ICSR_PENDSTCLR), # ICSR
vectorBase, # VTOR
(self.NVIC_AIRCR_VECTKEY | self.NVIC_AIRCR_VECTCLRACTIVE), # AIRCR
0, # SCR
0, # CCR
0, # SHPR1
0, # SHPR2
0, # SHPR3
0, # SHCSR
0, # CFSR
]
self.write_memory_block32(self.ICSR, data)
self.write32(self.CPACR, 0)
if self.has_fpu:
data = [
0, # FPCCR
0, # FPCAR
0, # FPDSCR
]
self.write_memory_block32(self.FPCCR, data)
# "Reset" SysTick.
self.write_memory_block32(self.SYSTICK_CSR, [0] * 3)
# "Reset" NVIC registers.
numregs = (self.read32(self.ICTR) & 0xf) + 1
self.write_memory_block32(self.NVIC_ICER0, [0xffffffff] * numregs)
self.write_memory_block32(self.NVIC_ICPR0, [0xffffffff] * numregs)
self.write_memory_block32(self.NVIC_IPR0, [0xffffffff] * (numregs * 8)) | 0.007869 |
def _len_lcs(x, y):
"""
Returns the length of the Longest Common Subsequence between sequences x
and y.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
:param x: sequence of words
:param y: sequence of words
:returns integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = _get_index_of_lcs(x, y)
return table[n, m] | 0.002513 |
def _get_links(network_id, template_id=None):
"""
Get all the links in a network
"""
extras = {'types':[], 'attributes':[]}
link_qry = db.DBSession.query(Link).filter(
Link.network_id==network_id,
Link.status=='A').options(
noload('network')
)
if template_id is not None:
link_qry = link_qry.filter(ResourceType.link_id==Link.id,
TemplateType.id==ResourceType.type_id,
TemplateType.template_id==template_id)
link_res = db.DBSession.execute(link_qry.statement).fetchall()
links = []
for l in link_res:
links.append(JSONObject(l, extras=extras))
return links | 0.010551 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.