text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def defaults_scope(**kwargs):
"""Creates a scope for the defaults that are used in a `with` block.
Note: `defaults_scope` supports nesting where later defaults can be
overridden. Also, an explicitly given keyword argument on a method always
takes precedence.
In addition to setting defaults for some methods, this also can control:
* `summary_collections`: Choose which collection to place summaries in or
disable with `None`.
* `trainable_variables`: Boolean indicating if variables are trainable.
* `variable_collections`: Default collections in which to place variables;
`tf.GraphKeys.GLOBAL_VARIABLES` is always included.
Args:
**kwargs: The defaults.
Yields:
Doesn't really yield, instead this creates a Context Manager for use in a
`with` statement.
Raises:
ValueError: if a collection type is accidently supplied a string.
"""
_assert_value_not_string('summary_collections', kwargs)
_assert_value_not_string('variable_collections', kwargs)
_check_defaults(kwargs)
global _defaults
old_defaults = _defaults
_defaults = chain_dict.ChainDict(_defaults)
_defaults.update(kwargs)
# Special logic to support summary_collections.
# This is added here because introducing more scopes would add more confusion
# than overloading this one a bit.
books = bookkeeper.for_default_graph()
if 'summary_collections' in _defaults:
books.summary_collections = _defaults['summary_collections']
else:
books.reset_summary_collections()
try:
yield _defaults
finally:
_defaults = old_defaults | 0.010759 |
def extract_all(self, directory=".", members=None):
"""Extract all member from the archive to the specified working
directory.
"""
if self.handle:
self.handle.extractall(path=directory, members=members) | 0.00813 |
def merged_pex(cls, path, pex_info, interpreter, pexes, interpeter_constraints=None):
"""Yields a pex builder at path with the given pexes already merged.
:rtype: :class:`pex.pex_builder.PEXBuilder`
"""
pex_paths = [pex.path() for pex in pexes if pex]
if pex_paths:
pex_info = pex_info.copy()
pex_info.merge_pex_path(':'.join(pex_paths))
with safe_concurrent_creation(path) as safe_path:
builder = PEXBuilder(safe_path, interpreter, pex_info=pex_info)
if interpeter_constraints:
for constraint in interpeter_constraints:
builder.add_interpreter_constraint(constraint)
yield builder | 0.012214 |
def module_broadcast(m, broadcast_fn, *args, **kwargs):
""" Call given function in all submodules with given parameters """
apply_leaf(m, lambda x: module_apply_broadcast(x, broadcast_fn, args, kwargs)) | 0.009524 |
def trackjobs(func, results, spacer):
"""
Blocks and prints progress for just the func being requested from a list
of submitted engine jobs. Returns whether any of the jobs failed.
func = str
results = dict of asyncs
"""
## TODO: try to insert a better way to break on KBD here.
LOGGER.info("inside trackjobs of %s", func)
## get just the jobs from results that are relevant to this func
asyncs = [(i, results[i]) for i in results if i.split("-", 2)[0] == func]
## progress bar
start = time.time()
while 1:
## how many of this func have finished so far
ready = [i[1].ready() for i in asyncs]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
printstr = " {} | {} | s3 |".format(PRINTSTR[func], elapsed)
progressbar(len(ready), sum(ready), printstr, spacer=spacer)
time.sleep(0.1)
if len(ready) == sum(ready):
print("")
break
sfails = []
errmsgs = []
for job in asyncs:
if not job[1].successful():
sfails.append(job[0])
errmsgs.append(job[1].result())
return func, sfails, errmsgs | 0.004241 |
def create(gandi, fqdn, name, type, value, ttl):
"""Create new record entry for a domain.
multiple value parameters can be provided.
"""
domains = gandi.dns.list()
domains = [domain['fqdn'] for domain in domains]
if fqdn not in domains:
gandi.echo('Sorry domain %s does not exist' % fqdn)
gandi.echo('Please use one of the following: %s' % ', '.join(domains))
return
result = gandi.dns.add_record(fqdn, name, type, value, ttl)
gandi.echo(result['message']) | 0.001946 |
def shape_list(x):
"""Return list of dims, statically where possible."""
x = tf.convert_to_tensor(x)
# If unknown rank, return dynamic shape
if x.get_shape().dims is None:
return tf.shape(x)
static = x.get_shape().as_list()
shape = tf.shape(x)
ret = []
for i, dim in enumerate(static):
if dim is None:
dim = shape[i]
ret.append(dim)
return ret | 0.028796 |
def render(source, target, context, owner='root', group='root',
perms=0o444, templates_dir=None, encoding='UTF-8',
template_loader=None, config_template=None):
"""
Render a template.
The `source` path, if not absolute, is relative to the `templates_dir`.
The `target` path should be absolute. It can also be `None`, in which
case no file will be written.
The context should be a dict containing the values to be replaced in the
template.
config_template may be provided to render from a provided template instead
of loading from a file.
The `owner`, `group`, and `perms` options will be passed to `write_file`.
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
The rendered template will be written to the file as well as being returned
as a string.
Note: Using this requires python-jinja2 or python3-jinja2; if it is not
installed, calling this will attempt to use charmhelpers.fetch.apt_install
to install it.
"""
try:
from jinja2 import FileSystemLoader, Environment, exceptions
except ImportError:
try:
from charmhelpers.fetch import apt_install
except ImportError:
hookenv.log('Could not import jinja2, and could not import '
'charmhelpers.fetch to install it',
level=hookenv.ERROR)
raise
if sys.version_info.major == 2:
apt_install('python-jinja2', fatal=True)
else:
apt_install('python3-jinja2', fatal=True)
from jinja2 import FileSystemLoader, Environment, exceptions
if template_loader:
template_env = Environment(loader=template_loader)
else:
if templates_dir is None:
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
template_env = Environment(loader=FileSystemLoader(templates_dir))
# load from a string if provided explicitly
if config_template is not None:
template = template_env.from_string(config_template)
else:
try:
source = source
template = template_env.get_template(source)
except exceptions.TemplateNotFound as e:
hookenv.log('Could not load template %s from %s.' %
(source, templates_dir),
level=hookenv.ERROR)
raise e
content = template.render(context)
if target is not None:
target_dir = os.path.dirname(target)
if not os.path.exists(target_dir):
# This is a terrible default directory permission, as the file
# or its siblings will often contain secrets.
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
host.write_file(target, content.encode(encoding), owner, group, perms)
return content | 0.000693 |
def square(left, top, length, filled=False, thickness=1):
"""Returns a generator that produces (x, y) tuples for a square.
This function is an alias for the rectangle() function, with `length` passed for both the
`width` and `height` parameters.
The `left` and `top` arguments are the x and y coordinates for the topleft corner of the square.
If `filled` is `True`, the interior points are also returned.
NOTE: The `thickness` argument is not yet implemented.
>>> list(square(0, 0, 5))
[(0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (4, 1), (4, 2), (4, 3), (4, 4), (3, 4), (2, 4), (1, 4), (0, 4), (0, 3), (0, 2), (0, 1)]
>>> drawPoints(square(0, 0, 5))
OOOOO
O,,,O
O,,,O
O,,,O
OOOOO
>>> drawPoints(square(0, 0, 5, filled=True))
OOOOO
OOOOO
OOOOO
OOOOO
OOOOO
"""
if thickness != 1:
raise NotImplementedError('The pybresenham module is under development and the filled, thickness, and endcap parameters are not implemented. You can contribute at https://github.com/asweigart/pybresenham')
return rectangle(left, top, length, length, filled, thickness) | 0.004355 |
def dashboard(request):
"""Shows the latest results for each source"""
sources = (models.Source.objects.all().prefetch_related('metric_set')
.order_by('name'))
metrics = SortedDict([(src, src.metric_set.all()) for src in sources])
no_source_metrics = models.Metric.objects.filter(source__isnull=True)
if no_source_metrics:
metrics[''] = no_source_metrics
if request.META.get('HTTP_X_PJAX', False):
parent_template = 'pjax.html'
else:
parent_template = 'base.html'
return render(request, 'metrics/dashboard.html', {
'source_metrics': metrics,
'parent_template': parent_template
}) | 0.001433 |
def diffusion_stencil_2d(epsilon=1.0, theta=0.0, type='FE'):
"""Rotated Anisotropic diffusion in 2d of the form.
-div Q A Q^T grad u
Q = [cos(theta) -sin(theta)]
[sin(theta) cos(theta)]
A = [1 0 ]
[0 eps ]
Parameters
----------
epsilon : float, optional
Anisotropic diffusion coefficient: -div A grad u,
where A = [1 0; 0 epsilon]. The default is isotropic, epsilon=1.0
theta : float, optional
Rotation angle `theta` in radians defines -div Q A Q^T grad,
where Q = [cos(`theta`) -sin(`theta`); sin(`theta`) cos(`theta`)].
type : {'FE','FD'}
Specifies the discretization as Q1 finite element (FE) or 2nd order
finite difference (FD)
The default is `theta` = 0.0
Returns
-------
stencil : numpy array
A 3x3 diffusion stencil
See Also
--------
stencil_grid, poisson
Notes
-----
Not all combinations are supported.
Examples
--------
>>> import scipy as sp
>>> from pyamg.gallery.diffusion import diffusion_stencil_2d
>>> sten = diffusion_stencil_2d(epsilon=0.0001,theta=sp.pi/6,type='FD')
>>> print sten
[[-0.2164847 -0.750025 0.2164847]
[-0.250075 2.0002 -0.250075 ]
[ 0.2164847 -0.750025 -0.2164847]]
"""
eps = float(epsilon) # for brevity
theta = float(theta)
C = np.cos(theta)
S = np.sin(theta)
CS = C*S
CC = C**2
SS = S**2
if(type == 'FE'):
"""FE approximation to::
- (eps c^2 + s^2) u_xx +
-2(eps - 1) c s u_xy +
- ( c^2 + eps s^2) u_yy
[ -c^2*eps-s^2+3*c*s*(eps-1)-c^2-s^2*eps,
2*c^2*eps+2*s^2-4*c^2-4*s^2*eps,
-c^2*eps-s^2-3*c*s*(eps-1)-c^2-s^2*eps]
[-4*c^2*eps-4*s^2+2*c^2+2*s^2*eps,
8*c^2*eps+8*s^2+8*c^2+8*s^2*eps,
-4*c^2*eps-4*s^2+2*c^2+2*s^2*eps]
[-c^2*eps-s^2-3*c*s*(eps-1)-c^2-s^2*eps,
2*c^2*eps+2*s^2-4*c^2-4*s^2*eps,
-c^2*eps-s^2+3*c*s*(eps-1)-c^2-s^2*eps]
c = cos(theta)
s = sin(theta)
"""
a = (-1*eps - 1)*CC + (-1*eps - 1)*SS + (3*eps - 3)*CS
b = (2*eps - 4)*CC + (-4*eps + 2)*SS
c = (-1*eps - 1)*CC + (-1*eps - 1)*SS + (-3*eps + 3)*CS
d = (-4*eps + 2)*CC + (2*eps - 4)*SS
e = (8*eps + 8)*CC + (8*eps + 8)*SS
stencil = np.array([[a, b, c],
[d, e, d],
[c, b, a]]) / 6.0
elif type == 'FD':
"""FD approximation to:
- (eps c^2 + s^2) u_xx +
-2(eps - 1) c s u_xy +
- ( c^2 + eps s^2) u_yy
c = cos(theta)
s = sin(theta)
A = [ 1/2(eps - 1) c s -(c^2 + eps s^2) -1/2(eps - 1) c s ]
[ ]
[ -(eps c^2 + s^2) 2 (eps + 1) -(eps c^2 + s^2) ]
[ ]
[ -1/2(eps - 1) c s -(c^2 + eps s^2) 1/2(eps - 1) c s ]
"""
a = 0.5*(eps - 1)*CS
b = -(eps*SS + CC)
c = -a
d = -(eps*CC + SS)
e = 2.0*(eps + 1)
stencil = np.array([[a, b, c],
[d, e, d],
[c, b, a]])
return stencil | 0.00029 |
def view_rest_retry(self, url=None):
"""
View current rest retry settings in the `requests.Session()` object
**Parameters:**
- **url:** URL to use to determine retry methods for. Defaults to 'https://'
**Returns:** Dict, Key header, value is header value.
"""
if url is None:
url = "https://"
return vars(self._session.get_adapter(url).max_retries) | 0.006993 |
def _get_options(ret=None):
'''
Get the redis options from salt.
'''
attrs = {'host': 'host',
'port': 'port',
'unix_socket_path': 'unix_socket_path',
'db': 'db',
'password': 'password',
'cluster_mode': 'cluster_mode',
'startup_nodes': 'cluster.startup_nodes',
'skip_full_coverage_check': 'cluster.skip_full_coverage_check',
}
if salt.utils.platform.is_proxy():
return {
'host': __opts__.get('redis.host', 'salt'),
'port': __opts__.get('redis.port', 6379),
'unix_socket_path': __opts__.get('redis.unix_socket_path', None),
'db': __opts__.get('redis.db', '0'),
'password': __opts__.get('redis.password', ''),
'cluster_mode': __opts__.get('redis.cluster_mode', False),
'startup_nodes': __opts__.get('redis.cluster.startup_nodes', {}),
'skip_full_coverage_check': __opts__.get('redis.cluster.skip_full_coverage_check', False)
}
_options = salt.returners.get_returner_options(__virtualname__,
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__)
return _options | 0.001426 |
def remove(image):
"""Remove an image to the GUI img library."""
path = os.path.join(IMG_DIR, image)
if os.path.isfile(path):
os.remove(path) | 0.006211 |
def traceplot(trace: sample_types, labels: List[Union[str, Tuple[str, str]]] = None, ax: Any = None,
x0: int = 0) -> Any:
"""
Plot samples values.
:param trace: result of MCMC run
:param labels: labels of vertices to be plotted. if None, all vertices are plotted.
:param ax: Matplotlib axes
:param x0: index of first data point, used for sample stream plots
"""
if labels is None:
labels = list(trace.keys())
if ax is None:
_, ax = plt.subplots(len(labels), 1, squeeze=False)
for index, label in enumerate(labels):
data = [sample for sample in trace[label]]
ax[index][0].set_title(label)
ax[index][0].plot(__integer_xaxis(ax[index][0], x0, len(data)), data)
__pause_for_crude_animation()
return ax | 0.003717 |
def redraw_tiles(self, surface):
""" redraw the visible portion of the buffer -- it is slow.
"""
# TODO/BUG: Redraw animated tiles correctly. They are getting reset here
logger.warning('pyscroll buffer redraw')
self._clear_surface(self._buffer)
self._tile_queue = self.data.get_tile_images_by_rect(self._tile_view)
self._flush_tile_queue(surface) | 0.007444 |
def TableArgsMeta(table_args):
'''Declarative metaclass automatically adding (merging) __table_args__ to
mapped classes. Example:
Meta = TableArgsMeta({
'mysql_engine': 'InnoDB',
'mysql_default charset': 'utf8',
}
Base = declarative_base(name='Base', metaclass=Meta)
class MyClass(Base):
…
is equivalent to
Base = declarative_base(name='Base')
class MyClass(Base):
__table_args__ = {
'mysql_engine': 'InnoDB',
'mysql_default charset': 'utf8',
}
…
'''
class _TableArgsMeta(declarative.DeclarativeMeta):
def __init__(cls, name, bases, dict_):
if ( # Do not extend base class
'_decl_class_registry' not in cls.__dict__ and
# Missing __tablename_ or equal to None means single table
# inheritance — no table for it (columns go to table of
# base class)
cls.__dict__.get('__tablename__') and
# Abstract class — no table for it (columns go to table[s]
# of subclass[es]
not cls.__dict__.get('__abstract__', False)):
ta = getattr(cls, '__table_args__', {})
if isinstance(ta, dict):
ta = dict(table_args, **ta)
cls.__table_args__ = ta
else:
assert isinstance(ta, tuple)
if ta and isinstance(ta[-1], dict):
tad = dict(table_args, **ta[-1])
ta = ta[:-1]
else:
tad = dict(table_args)
cls.__table_args__ = ta + (tad,)
super(_TableArgsMeta, cls).__init__(name, bases, dict_)
return _TableArgsMeta | 0.00105 |
def import_(module_name, name):
"""Imports an object by a relative module path::
Profiler = import_('profiling.profiler', 'Profiler')
"""
module = importlib.import_module(module_name, __package__)
return getattr(module, name) | 0.004 |
def get_listing_calendar(self, listing_id, starting_date=datetime.datetime.now(), calendar_months=6):
"""
Get host availability calendar for a given listing
"""
params = {
'_format': 'host_calendar_detailed'
}
starting_date_str = starting_date.strftime("%Y-%m-%d")
ending_date_str = (
starting_date + datetime.timedelta(days=30)).strftime("%Y-%m-%d")
r = self._session.get(API_URL + "/calendars/{}/{}/{}".format(
str(listing_id), starting_date_str, ending_date_str), params=params)
r.raise_for_status()
return r.json() | 0.006279 |
def compact_name(self, hashsize=6):
"""Compact representation of all simulation parameters
"""
# this can be made more robust for ID > 9 (double digit)
s = self.compact_name_core(hashsize, t_max=True)
s += "_ID%d-%d" % (self.ID, self.EID)
return s | 0.00678 |
def random_model(ctx, model_class_name):
"""
Get a random model identifier by class name. For example::
# db/fixtures/Category.yml
{% for i in range(0, 10) %}
category{{ i }}:
name: {{ faker.name() }}
{% endfor %}
# db/fixtures/Post.yml
a_blog_post:
category: {{ random_model('Category') }}
Will render to something like the following::
# db/fixtures/Post.yml (rendered)
a blog_post:
category: "Category(category7)"
:param ctx: The context variables of the current template (passed automatically)
:param model_class_name: The class name of the model to get.
"""
model_identifiers = ctx['model_identifiers'][model_class_name]
if not model_identifiers:
return 'None'
idx = random.randrange(0, len(model_identifiers))
return '"%s(%s)"' % (model_class_name, model_identifiers[idx]) | 0.002151 |
def get(self, media_id):
"""
获取永久素材
详情请参考
http://mp.weixin.qq.com/wiki/4/b3546879f07623cb30df9ca0e420a5d0.html
:param media_id: 素材的 media_id
:return: 图文素材返回图文列表,其它类型为素材的内容
"""
def _processor(res):
if isinstance(res, dict):
if 'news_item' in res:
# 图文素材
return res['news_item']
return res
res = self._post(
'material/get_material',
data={
'media_id': media_id
},
result_processor=_processor
)
return res | 0.003145 |
def construct_operation_validators(api_path, path_definition, operation_definition, context):
"""
- consumes (did the request conform to the content types this api consumes)
- produces (did the response conform to the content types this endpoint produces)
- parameters (did the parameters of this request validate)
TODO: move path parameter validation to here, because each operation
can override any of the path level parameters.
- schemes (was the request scheme correct)
- security: TODO since security isn't yet implemented.
"""
validators = {}
# sanity check
assert 'context' not in operation_definition
assert 'api_path' not in operation_definition
assert 'path_definition' not in operation_definition
for key in operation_definition.keys():
if key not in validator_mapping:
# TODO: is this the right thing to do?
continue
validators[key] = validator_mapping[key](
context=context,
api_path=api_path,
path_definition=path_definition,
**operation_definition
)
# Global defaults
if 'consumes' in context and 'consumes' not in validators:
validators['consumes'] = validator_mapping['consumes'](**context)
if 'parameters' in path_definition and 'parameters' not in validators:
validators['parameters'] = validator_mapping['parameters'](
context=context,
api_path=api_path,
path_definition=path_definition,
parameters=path_definition['parameters'],
**operation_definition
)
return validators | 0.001805 |
def get_function_help(function: str, bel_spec: BELSpec):
"""Get function_help given function name
This will get the function summary template (argument summary in signature)
and the argument help listing.
"""
function_long = bel_spec["functions"]["to_long"].get(function)
function_help = []
if function_long:
for signature in bel_spec["functions"]["signatures"][function_long]["signatures"]:
function_help.append(
{
"function_summary": signature["argument_summary"],
"argument_help": signature["argument_help_listing"],
"description": bel_spec["functions"]["info"][function_long]["description"],
}
)
return function_help | 0.003846 |
def index_open(self, index):
'''
Opens the speicified index.
http://www.elasticsearch.org/guide/reference/api/admin-indices-open-close.html
> ElasticSearch().index_open('my_index')
'''
request = self.session
url = 'http://%s:%s/%s/_open' % (self.host, self.port, index)
response = request.post(url,None)
return response | 0.007653 |
def write_file(self, molecule, inpfile):
"""
Write an ADF input file.
Parameters
----------
molecule : Molecule
The molecule for this task.
inpfile : str
The name where the input file will be saved.
"""
mol_blocks = []
atom_block = AdfKey("Atoms", options=["cartesian"])
for site in molecule:
atom_block.add_subkey(AdfKey(str(site.specie), list(site.coords)))
mol_blocks.append(atom_block)
if molecule.charge != 0:
netq = molecule.charge
ab = molecule.spin_multiplicity - 1
charge_block = AdfKey("Charge", [netq, ab])
mol_blocks.append(charge_block)
if ab != 0:
unres_block = AdfKey("Unrestricted")
mol_blocks.append(unres_block)
with open(inpfile, "w+") as f:
for block in mol_blocks:
f.write(str(block) + "\n")
f.write(str(self.task) + "\n")
f.write("END INPUT") | 0.001899 |
def parseExtensionArgs(self, ax_args):
"""Given attribute exchange arguments, populate this FetchRequest.
@param ax_args: Attribute Exchange arguments from the request.
As returned from L{Message.getArgs<openid.message.Message.getArgs>}.
@type ax_args: dict
@raises KeyError: if the message is not consistent in its use
of namespace aliases.
@raises NotAXMessage: If ax_args does not include an Attribute Exchange
mode.
@raises AXError: If the data to be parsed does not follow the
attribute exchange specification. At least when
'if_available' or 'required' is not specified for a
particular attribute type.
"""
# Raises an exception if the mode is not the expected value
self._checkMode(ax_args)
aliases = NamespaceMap()
for key, value in ax_args.items():
if key.startswith('type.'):
alias = key[5:]
type_uri = value
aliases.addAlias(type_uri, alias)
count_key = 'count.' + alias
count_s = ax_args.get(count_key)
if count_s:
try:
count = int(count_s)
if count <= 0:
raise AXError(
"Count %r must be greater than zero, got %r" %
(count_key, count_s, ))
except ValueError:
if count_s != UNLIMITED_VALUES:
raise AXError("Invalid count value for %r: %r" %
(count_key, count_s, ))
count = count_s
else:
count = 1
self.add(AttrInfo(type_uri, alias=alias, count=count))
required = toTypeURIs(aliases, ax_args.get('required'))
for type_uri in required:
self.requested_attributes[type_uri].required = True
if_available = toTypeURIs(aliases, ax_args.get('if_available'))
all_type_uris = required + if_available
for type_uri in aliases.iterNamespaceURIs():
if type_uri not in all_type_uris:
raise AXError('Type URI %r was in the request but not '
'present in "required" or "if_available"' %
(type_uri, ))
self.update_url = ax_args.get('update_url') | 0.001191 |
def delete(self, obj):
"""
Returns
object: full copy of new obj
"""
full = deepcopy(obj)
frag = full
parts, last = self.parts[:-1], self.parts[-1]
for part in parts:
if isinstance(frag, dict):
frag = frag[part]
elif isinstance(frag, (list, tuple)):
frag = frag[int(part)]
if isinstance(frag, dict):
frag.pop(last)
elif isinstance(frag, list):
if last == '-':
frag.pop()
else:
frag.pop(int(last))
return full | 0.0032 |
def solve(
solver, mzn, *dzn_files, data=None, include=None, stdlib_dir=None,
globals_dir=None, allow_multiple_assignments=False, output_mode='item',
timeout=None, two_pass=None, pre_passes=None, output_objective=False,
non_unique=False, all_solutions=False, num_solutions=None,
free_search=False, parallel=None, seed=None, **kwargs
):
"""Flatten and solve a MiniZinc program.
Parameters
----------
solver : Solver
The ``Solver`` instance to use.
mzn : str
The path to the minizinc model file.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
data : list of str
Additional data as a list of strings containing dzn variables
assignments.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
output_mode : {'item', 'dzn', 'json'}
The desired output format. The default is ``'item'`` which outputs a
stream of strings as returned by the ``solns2out`` tool, formatted
according to the output statement of the MiniZinc model. The ``'dzn'``
and ``'json'`` formats output a stream of strings formatted in dzn and
json respectively.
timeout : int
The timeout in seconds for the flattening + solving process.
two_pass : bool or int
If ``two_pass`` is True, then it is equivalent to the ``--two-pass``
option for the ``minizinc`` executable. If ``two_pass`` is an integer
``<n>``, instead, it is equivalent to the ``-O<n>`` option for the
``minizinc`` executable.
pre_passes : int
Equivalent to the ``--pre-passes`` option for the ``minizinc``
executable.
output_objective : bool
Equivalent to the ``--output-objective`` option for the ``minizinc``
executable. Adds a field ``_objective`` to all solutions.
non_unique : bool
Equivalent to the ``--non-unique`` option for the ``minizinc``
executable.
all_solutions : bool
Whether all the solutions must be returned. This option might not work
if the solver does not support it. Default is ``False``.
num_solutions : int
The upper bound on the number of solutions to be returned. This option
might not work if the solver does not support it. Default is ``1``.
free_search : bool
If True, instruct the solver to perform free search.
parallel : int
The number of parallel threads the solver can utilize for the solving.
seed : int
The random number generator seed to pass to the solver.
**kwargs
Additional arguments to pass to the solver, provided as additional
keyword arguments to this function. Check the solver documentation for
the available arguments.
Returns
-------
Object wrapping the executed process.
"""
args = _solve_args(
solver, timeout=timeout, two_pass=two_pass, pre_passes=pre_passes,
output_objective=output_objective, non_unique=non_unique,
all_solutions=all_solutions, num_solutions=num_solutions,
free_search=free_search, parallel=parallel, seed=seed, **kwargs
)
args += _flattening_args(
mzn, *dzn_files, data=data, stdlib_dir=stdlib_dir,
globals_dir=globals_dir, output_mode=output_mode, include=include,
allow_multiple_assignments=allow_multiple_assignments
)
input = mzn if args[-1] == '-' else None
t0 = _time()
try:
proc = _run_minizinc_proc(*args, input=input)
except RuntimeError as err:
raise MiniZincError(mzn_file, args) from err
solve_time = _time() - t0
logger.info('Solving completed in {:>3.2f} sec'.format(solve_time))
return proc | 0.000229 |
def interactive(renderer):
"""
Parse user input, dump to stdout, rinse and repeat.
Python REPL style.
"""
_import_readline()
_print_heading(renderer)
contents = []
more = False
while True:
try:
prompt, more = ('... ', True) if more else ('>>> ', True)
contents.append(input(prompt) + '\n')
except EOFError:
print('\n' + mistletoe.markdown(contents, renderer), end='')
more = False
contents = []
except KeyboardInterrupt:
print('\nExiting.')
break | 0.001695 |
def _check_for_duplicates(xs, attr, check_fn=None):
"""Identify and raise errors on duplicate items.
"""
dups = []
for key, vals in itertools.groupby(x[attr] for x in xs):
if len(list(vals)) > 1:
dups.append(key)
if len(dups) > 0:
psamples = []
for x in xs:
if x[attr] in dups:
psamples.append(x)
# option to skip problem based on custom input function.
if check_fn and check_fn(psamples):
return
descrs = [x["description"] for x in psamples]
raise ValueError("Duplicate '%s' found in input sample configuration.\n"
"Required to be unique for a project: %s\n"
"Problem found in these samples: %s" % (attr, dups, descrs)) | 0.003759 |
def verify(institute_id, case_name, variant_id, variant_category, order):
"""Start procedure to validate variant using other techniques."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
variant_obj = store.variant(variant_id)
user_obj = store.user(current_user.email)
comment = request.form.get('verification_comment')
try:
controllers.variant_verification(store=store, mail=mail, institute_obj=institute_obj, case_obj=case_obj, user_obj=user_obj, comment=comment,
variant_obj=variant_obj, sender=current_app.config['MAIL_USERNAME'], variant_url=request.referrer, order=order, url_builder=url_for)
except controllers.MissingVerificationRecipientError:
flash('No verification recipients added to institute.', 'danger')
return redirect(request.referrer) | 0.006944 |
def download_url(job, url, work_dir='.', name=None, s3_key_path=None, cghub_key_path=None):
"""
Downloads URL, can pass in file://, http://, s3://, or ftp://, gnos://cghub/analysisID, or gnos:///analysisID
If downloading S3 URLs, the S3AM binary must be on the PATH
:param toil.job.Job job: Toil job that is calling this function
:param str url: URL to download from
:param str work_dir: Directory to download file to
:param str name: Name of output file, if None, basename of URL is used
:param str s3_key_path: Path to 32-byte encryption key if url points to S3 file that uses SSE-C
:param str cghub_key_path: Path to cghub key used to download from CGHub.
:return: Path to the downloaded file
:rtype: str
"""
file_path = os.path.join(work_dir, name) if name else os.path.join(work_dir, os.path.basename(url))
if cghub_key_path:
_download_with_genetorrent(job, url, file_path, cghub_key_path)
elif urlparse(url).scheme == 's3':
_s3am_with_retry(job, num_cores=1, file_path=file_path, s3_url=url, mode='download', s3_key_path=s3_key_path)
elif urlparse(url).scheme == 'file':
shutil.copy(urlparse(url).path, file_path)
else:
subprocess.check_call(['curl', '-fs', '--retry', '5', '--create-dir', url, '-o', file_path])
assert os.path.exists(file_path)
return file_path | 0.005084 |
def set(self, field, clusters, value, add_to_stack=True):
"""Set the value of one of several clusters."""
# Add the field if it doesn't exist.
if field not in self._fields:
self.add_field(field)
assert field in self._fields
clusters = _as_list(clusters)
for cluster in clusters:
if cluster not in self._data:
self._data[cluster] = {}
self._data[cluster][field] = value
up = UpdateInfo(description='metadata_' + field,
metadata_changed=clusters,
metadata_value=value,
)
undo_state = self.emit('request_undo_state', up)
if add_to_stack:
self._undo_stack.add((clusters, field, value, up, undo_state))
self.emit('cluster', up)
return up | 0.002315 |
def hue(p):
"""
Returns the saturation of a pixel.
Gray pixels have saturation 0.
:param p: A tuple of (R,G,B) values
:return: A saturation value between 0 and 360
"""
min_c = min(p)
max_c = max(p)
d = float(max_c - min_c)
if d == 0:
return 0
if max_c == p[0]:
h = (p[1] - p[2]) / d
elif max_c == p[1]:
h = 2 + (p[2] - p[0]) / d
else:
h = 4 + (p[0] - p[1]) / d
h *= 60
if h < 0:
h += 360
return h | 0.001996 |
def set_query_on_table_metaclass(model: object, session: Session):
"""
Ensures that the given database model (`DeclarativeMeta`) has a `query` property through
which the user can easily query the corresponding database table.
Database object models derived from Flask-SQLAlchemy's `database.Model` have this property
set up by default, but when using SQLAlchemy, this may not be the case. In this method this
problem we fix.
Argumentss:
model (DeclarativeMeta): The database model object whose `query` property should be
set up if it's not set up already.
session (Session): The session to use to set up the `query` property on `model`.
"""
if not hasattr(model, "query"):
model.query = session.query(model) | 0.008055 |
def rename_fmapm(bids_base, basename):
'''
Rename magnitude fieldmap file to BIDS specification
'''
files = dict()
for ext in ['nii.gz', 'json']:
for echo in [1, 2]:
fname = '{0}_e{1}.{2}'.format(basename, echo, ext)
src = os.path.join(bids_base, 'fmap', fname)
if os.path.exists(src):
dst = src.replace(
'magnitude_e{0}'.format(echo),
'magnitude{0}'.format(echo)
)
logger.debug('renaming %s to %s', src, dst)
os.rename(src, dst)
files[ext] = dst
return files | 0.001546 |
def do_execute_direct(self, code: str, silent: bool = False) -> [str, dict]:
"""
This is the main method that takes code from the Jupyter cell and submits it to the SAS server
:param code: code from the cell
:param silent:
:return: str with either the log or list
"""
if not code.strip():
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
if self.mva is None:
self._allow_stdin = True
self._start_sas()
if self.lst_len < 0:
self._get_lst_len()
if code.startswith('Obfuscated SAS Code'):
logger.debug("decoding string")
tmp1 = code.split()
decode = base64.b64decode(tmp1[-1])
code = decode.decode('utf-8')
if code.startswith('showSASLog_11092015') == False and code.startswith("CompleteshowSASLog_11092015") == False:
logger.debug("code type: " + str(type(code)))
logger.debug("code length: " + str(len(code)))
logger.debug("code string: " + code)
if code.startswith("/*SASKernelTest*/"):
res = self.mva.submit(code, "text")
else:
res = self.mva.submit(code, prompt=self.promptDict)
self.promptDict = {}
if res['LOG'].find("SAS process has terminated unexpectedly") > -1:
print(res['LOG'], '\n' "Restarting SAS session on your behalf")
self.do_shutdown(True)
return res['LOG']
output = res['LST']
log = res['LOG']
return self._which_display(log, output)
elif code.startswith("CompleteshowSASLog_11092015") == True and code.startswith('showSASLog_11092015') == False:
full_log = highlight(self.mva.saslog(), SASLogLexer(),
HtmlFormatter(full=True, style=SASLogStyle, lineseparator="<br>",
title="Full SAS Log"))
return full_log.replace('\n', ' ')
else:
return self.cachedlog.replace('\n', ' ') | 0.004564 |
def t_fold_end(self, t):
r'\n+\ *'
column = find_column(t)
indent = self.indent_stack[-1]
if column < indent:
rollback_lexpos(t)
if column <= indent:
t.lexer.pop_state()
t.type = 'B_FOLD_END'
if column > indent:
t.type = 'SCALAR'
return t | 0.005848 |
def role_assignment_path(cls, project, incident, role_assignment):
"""Return a fully-qualified role_assignment string."""
return google.api_core.path_template.expand(
"projects/{project}/incidents/{incident}/roleAssignments/{role_assignment}",
project=project,
incident=incident,
role_assignment=role_assignment,
) | 0.007772 |
def parse_info(wininfo_name, egginfo_name):
"""Extract metadata from filenames.
Extracts the 4 metadataitems needed (name, version, pyversion, arch) from
the installer filename and the name of the egg-info directory embedded in
the zipfile (if any).
The egginfo filename has the format::
name-ver(-pyver)(-arch).egg-info
The installer filename has the format::
name-ver.arch(-pyver).exe
Some things to note:
1. The installer filename is not definitive. An installer can be renamed
and work perfectly well as an installer. So more reliable data should
be used whenever possible.
2. The egg-info data should be preferred for the name and version, because
these come straight from the distutils metadata, and are mandatory.
3. The pyver from the egg-info data should be ignored, as it is
constructed from the version of Python used to build the installer,
which is irrelevant - the installer filename is correct here (even to
the point that when it's not there, any version is implied).
4. The architecture must be taken from the installer filename, as it is
not included in the egg-info data.
5. Architecture-neutral installers still have an architecture because the
installer format itself (being executable) is architecture-specific. We
should therefore ignore the architecture if the content is pure-python.
"""
egginfo = None
if egginfo_name:
egginfo = egg_info_re.search(egginfo_name)
if not egginfo:
raise ValueError("Egg info filename %s is not valid" %
(egginfo_name,))
# Parse the wininst filename
# 1. Distribution name (up to the first '-')
w_name, sep, rest = wininfo_name.partition('-')
if not sep:
raise ValueError("Installer filename %s is not valid" %
(wininfo_name,))
# Strip '.exe'
rest = rest[:-4]
# 2. Python version (from the last '-', must start with 'py')
rest2, sep, w_pyver = rest.rpartition('-')
if sep and w_pyver.startswith('py'):
rest = rest2
w_pyver = w_pyver.replace('.', '')
else:
# Not version specific - use py2.py3. While it is possible that
# pure-Python code is not compatible with both Python 2 and 3, there
# is no way of knowing from the wininst format, so we assume the best
# here (the user can always manually rename the wheel to be more
# restrictive if needed).
w_pyver = 'py2.py3'
# 3. Version and architecture
w_ver, sep, w_arch = rest.rpartition('.')
if not sep:
raise ValueError("Installer filename %s is not valid" %
(wininfo_name,))
if egginfo:
w_name = egginfo.group('name')
w_ver = egginfo.group('ver')
return dict(name=w_name, ver=w_ver, arch=w_arch, pyver=w_pyver) | 0.00172 |
def remote_file_size(self, remote_cmd="", remote_file=None):
"""Get the file size of the remote file."""
if remote_file is None:
if self.direction == "put":
remote_file = self.dest_file
elif self.direction == "get":
remote_file = self.source_file
remote_cmd = 'system "ls -l {}/{}"'.format(self.file_system, remote_file)
remote_out = self.ssh_ctl_chan.send_command(remote_cmd)
for line in remote_out.splitlines():
if remote_file in line:
file_size = line.split()[4]
break
if "Error opening" in remote_out or "No such file or directory" in remote_out:
raise IOError("Unable to find file on remote system")
else:
return int(file_size) | 0.004926 |
def shift_right(self, times=1):
"""
Finds Location shifted right by 1
:rtype: Location
"""
try:
return Location(self._rank, self._file + times)
except IndexError as e:
raise IndexError(e) | 0.007692 |
def set_user_name(self, uid, name):
"""Set user name
:param uid: user number [1:16]
:param name: username (limit of 16bytes)
"""
data = [uid]
if len(name) > 16:
raise Exception('name must be less than or = 16 chars')
name = name.ljust(16, "\x00")
data.extend([ord(x) for x in name])
self.xraw_command(netfn=0x06, command=0x45, data=data)
return True | 0.004525 |
def _ParsePathSpecification(
self, knowledge_base, searcher, file_system, path_specification,
path_separator):
"""Parses a file system for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
searcher (dfvfs.FileSystemSearcher): file system searcher to preprocess
the file system.
file_system (dfvfs.FileSystem): file system to be preprocessed.
path_specification (dfvfs.PathSpec): path specification that contains
the artifact value data.
path_separator (str): path segment separator.
Raises:
PreProcessFail: if the preprocessing fails.
"""
try:
file_entry = searcher.GetFileEntryByPathSpec(path_specification)
except IOError as exception:
relative_path = searcher.GetRelativePath(path_specification)
if path_separator != file_system.PATH_SEPARATOR:
relative_path_segments = file_system.SplitPath(relative_path)
relative_path = '{0:s}{1:s}'.format(
path_separator, path_separator.join(relative_path_segments))
raise errors.PreProcessFail((
'Unable to retrieve file entry: {0:s} with error: '
'{1!s}').format(relative_path, exception))
if file_entry:
self._ParseFileEntry(knowledge_base, file_entry) | 0.004515 |
def original(self):
"""(:class:`Image`) The original image. It could be :const:`None`
if there are no stored images yet.
"""
images = self.query._original_images(**self.identity_map)
if images:
return images[0] | 0.007576 |
def get_xyz(self, list_of_names=None):
"""Get xyz coordinates for these electrodes
Parameters
----------
list_of_names : list of str
list of electrode names to use
Returns
-------
list of tuples of 3 floats (x, y, z)
list of xyz coordinates for all the electrodes
TODO
----
coordinate system of electrodes
"""
if list_of_names is not None:
filter_lambda = lambda x: x['name'] in list_of_names
else:
filter_lambda = None
return self.electrodes.get(filter_lambda=filter_lambda,
map_lambda=lambda e: (float(e['x']),
float(e['y']),
float(e['z']))) | 0.003505 |
def save_state(self):
"""Store the options into the user's stored session info.
.. versionadded: 4.3.0
"""
settings = QtCore.QSettings()
settings.setValue(
'inasafe/useDefaultTemplates',
self.default_template_radio.isChecked())
settings.setValue(
'inasafe/lastTemplate',
self.template_combo.itemData(self.template_combo.currentIndex()))
settings.setValue(
'inasafe/lastCustomTemplate', self.template_path.text()) | 0.003781 |
def get_part_filenames(num_parts=None, start_num=0):
"""Get numbered PART.html filenames."""
if num_parts is None:
num_parts = get_num_part_files()
return ['PART{0}.html'.format(i) for i in range(start_num+1, num_parts+1)] | 0.004132 |
def from_xx(cls, xx):
"""
Create a new Language instance from a ISO639 string
:param xx: ISO639 as string
:return: Language instance with instance.xx() == xx if xx is valid else instance of UnknownLanguage
"""
xx = str(xx).lower()
if xx is 'unknown':
return UnknownLanguage(xx)
try:
return cls._from_xyz('ISO639', xx)
except NotALanguageException:
log.warning('Unknown ISO639: {}'.format(xx))
return UnknownLanguage(xx) | 0.005566 |
def from_lab(l, a, b, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed CIE-LAB values.
Parameters:
:l:
The L component [0...100]
:a:
The a component [-1...1]
:b:
The a component [-1...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.from_lab(66.951823, 0.43084105, 0.73969231)
Color(1.0, 0.5, -0.0, 1.0)
>>> Color.from_lab(66.951823, 0.41165967, 0.67282012, wref=WHITE_REFERENCE['std_D50'])
Color(1.0, 0.5, -0.0, 1.0)
>>> Color.from_lab(66.951823, 0.43084105, 0.73969231, 0.5)
Color(1.0, 0.5, -0.0, 0.5)
>>> Color.from_lab(66.951823, 0.41165967, 0.67282012, 0.5, WHITE_REFERENCE['std_D50'])
Color(1.0, 0.5, -0.0, 0.5)
"""
return Color(xyz_to_rgb(*lab_to_xyz(l, a, b, wref)), 'rgb', alpha, wref) | 0.004132 |
def is_filter_selected(self, selection_id, value):
"""
Compares whether the 'selection_id' parameter value saved in the
cookie is the same value as the "value" parameter.
:param selection_id: a string as a dashboard_cookie key.
:param value: The value to compare against the value from
dashboard_cookie key.
:return: Boolean.
"""
selected = self.dashboard_cookie.get(selection_id)
return selected == value | 0.004115 |
def sonify_clicks(audio, clicks, out_file, fs, offset=0):
"""Sonifies the estimated times into the output file.
Parameters
----------
audio: np.array
Audio samples of the input track.
clicks: np.array
Click positions in seconds.
out_file: str
Path to the output file.
fs: int
Sample rate.
offset: float
Offset of the clicks with respect to the audio.
"""
# Generate clicks (this should be done by mir_eval, but its
# latest release is not compatible with latest numpy)
times = clicks + offset
# 1 kHz tone, 100ms
click = np.sin(2 * np.pi * np.arange(fs * .1) * 1000 / (1. * fs))
# Exponential decay
click *= np.exp(-np.arange(fs * .1) / (fs * .01))
length = int(times.max() * fs + click.shape[0] + 1)
audio_clicks = mir_eval.sonify.clicks(times, fs, length=length)
# Create array to store the audio plus the clicks
out_audio = np.zeros(max(len(audio), len(audio_clicks)))
# Assign the audio and the clicks
out_audio[:len(audio)] = audio
out_audio[:len(audio_clicks)] += audio_clicks
# Write to file
scipy.io.wavfile.write(out_file, fs, out_audio) | 0.00084 |
def stop(name, call=None):
'''
Stop a node
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping node %s', name)
instance_id = _get_node(name)['instanceId']
__utils__['cloud.fire_event'](
'event',
'stopping instance',
'salt/cloud/{0}/stopping'.format(name),
args={'name': name, 'instance_id': instance_id},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
params = {'Action': 'StopInstances',
'InstanceId.1': instance_id}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return result | 0.001164 |
def apply_T6(word):
'''If a VVV-sequence contains a long vowel, there is a syllable boundary
between it and the third vowel, e.g. [kor.ke.aa], [yh.ti.öön], [ruu.an],
[mää.yt.te].'''
WORD = word
offset = 0
for vvv in vvv_sequences(WORD):
seq = vvv.group(2)
j = 2 if is_long(seq[:2]) else 1 if is_long(seq[1:]) else 0
if j:
i = vvv.start(2) + j + offset
WORD = WORD[:i] + '.' + WORD[i:]
offset += 1
RULE = ' T6' if word != WORD else ''
return WORD, RULE | 0.001828 |
def get_anki_phrases_english(limit=None):
""" Return all the English phrases in the Anki translation flashcards
>>> len(get_anki_phrases_english(limit=100)) > 700
True
"""
texts = set()
for lang in ANKI_LANGUAGES:
df = get_data(lang)
phrases = df.eng.str.strip().values
texts = texts.union(set(phrases))
if limit and len(texts) >= limit:
break
return sorted(texts) | 0.004566 |
def diff(args):
"""
%prog diff afasta bfasta
print out whether the records in two fasta files are the same
"""
from jcvi.utils.table import banner
p = OptionParser(diff.__doc__)
p.add_option("--ignore_case", default=False, action="store_true",
help="ignore case when comparing sequences [default: %default]")
p.add_option("--ignore_N", default=False, action="store_true",
help="ignore N and X's when comparing sequences [default: %default]")
p.add_option("--ignore_stop", default=False, action="store_true",
help="ignore stop codon when comparing sequences [default: %default]")
p.add_option("--rc", default=False, action="store_true",
help="also consider reverse complement [default: %default]")
p.add_option("--quiet", default=False, action="store_true",
help="don't output comparison details [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
afasta, bfasta = args
afastan = len(Fasta(afasta))
bfastan = len(Fasta(bfasta))
if afastan == bfastan:
print(green("Two sets contain the same number of sequences ({0}, {1})".\
format(afastan, bfastan)))
else:
print(red("Two sets contain different number of sequences ({0}, {1})".\
format(afastan, bfastan)))
ah = SeqIO.parse(afasta, "fasta")
bh = SeqIO.parse(bfasta, "fasta")
problem_ids = []
for arec, brec in zip(ah, bh):
if opts.ignore_stop:
arec.seq = arec.seq.rstrip("*")
brec.seq = brec.seq.rstrip("*")
asize, bsize = len(arec), len(brec)
if not opts.quiet:
print(banner(str(arec), [str(brec)]))
if asize == bsize:
print(green("Two sequence size match (%d)" % asize))
else:
print(red("Two sequence size do not match (%d, %d)" % (asize, bsize)))
# print out the first place the two sequences diff
fd = print_first_difference(arec, brec, ignore_case=opts.ignore_case,
ignore_N=opts.ignore_N, rc=opts.rc, report_match=not opts.quiet)
if not fd:
logging.error("Two sets of sequences differ at `{0}`".format(arec.id))
problem_ids.append("\t".join(str(x) for x in (arec.id, asize, bsize,
abs(asize - bsize))))
if problem_ids:
print(red("A total of {0} records mismatch.".format(len(problem_ids))))
fw = must_open("Problems.ids", "w")
print("\n".join(problem_ids), file=fw) | 0.007266 |
def check_pointers(parser, codedir=None, mfilter=None, recursive=False):
"""Checks the modules in the specified code parser to see if they
have common, but subtle, pointer bugs in:
1. subroutines with a parameter of intent(out) and user-derived type
must* set *all* members of that parameter or they will have an
*undefined* status.
2. pointer-type arrays that are not nullified are set to a valid target
will return 'T' when passed to `associated`. Best practice is to nullify
pointer arrays in user-derived types as the default value on those types.
:arg parser: [fortpy.code.CodeParser] with the modules to search *already loaded*.
:arg codedir: specify the full path to the library whose modules should be searched,
just another way to filter which modules are generating the warnings.
:arg mfilter: filter to apply to module names; can use the wildcard standard
from bash.
"""
from fnmatch import fnmatch
from fortpy.msg import std, set_verbosity, info
set_verbosity(0)
W1 = " {} '{}' does not set the value of members '{}' in parameter '{}'."
W2 = " Type '{}' does not nullify members '{}' on creation."
offenders = {}
for (modname, module) in parser.modules.items():
if not recursive and codedir is not None and not codedir.lower() in module.filepath.lower():
continue
if mfilter is not None and not fnmatch(module.name.lower(), mfilter.lower()):
continue
#Test the first condition above for all subroutines in the module; also handle
#the recursively defined subroutines.
hprinted = False
for xname, xvalue in module.executables.items():
oparams, pmembers = _exec_check_pointers(xvalue)
if len(oparams) > 0:
if not hprinted:
info("Best practice suggestions: {}".format(module.filepath))
hprinted = True
for oparam in oparams:
plist = ', '.join([p.name for p in pmembers[oparam]])
std(W1.format(type(xvalue).__name__, xname, plist, oparam), 0)
offenders[xvalue.full_name] = (oparams, pmembers)
for tname, tvalue in module.types.items():
result = _type_check_pointers(tvalue)
if len(result) > 0:
if not hprinted:
info("Best practice suggestions: {}".format(module.filepath))
hprinted = True
plist = ', '.join([p.name for p in result])
std(W2.format(tname, plist), 0)
offenders[xvalue.full_name] = result
return offenders | 0.005816 |
def parse_url_or_log(url, encoding='utf-8'):
'''Parse and return a URLInfo.
This function logs a warning if the URL cannot be parsed and returns
None.
'''
try:
url_info = URLInfo.parse(url, encoding=encoding)
except ValueError as error:
_logger.warning(__(
_('Unable to parse URL ‘{url}’: {error}.'),
url=wpull.string.printable_str(url), error=error))
else:
return url_info | 0.002222 |
def deleteICM(uuid: str):
""" Deletes an ICM"""
_metadata = ICMMetadata.query.filter_by(id=uuid).first()
db.session.delete(_metadata)
db.session.commit()
return ("", 204) | 0.005263 |
def get_effective_course_roles_in_account(self, account_id):
"""
List all course roles available to an account, for the passed Canvas
account ID, including course roles inherited from parent accounts.
"""
course_roles = []
params = {"show_inherited": "1"}
for role in self.get_roles_in_account(account_id, params):
if role.base_role_type != "AccountMembership":
course_roles.append(role)
return course_roles | 0.004008 |
def _extract_image(self, raw_image):
"""
Extract unequally-sized image bands.
Parameters
----------
raw_image : reference to openjpeg ImageType instance
The image structure initialized with image characteristics.
Returns
-------
list or ndarray
If the JPEG 2000 image has unequally-sized components, they are
extracted into a list, otherwise a numpy array.
"""
ncomps = raw_image.contents.numcomps
# Make a pass thru the image, see if any of the band datatypes or
# dimensions differ.
dtypes, nrows, ncols = [], [], []
for k in range(raw_image.contents.numcomps):
component = raw_image.contents.comps[k]
dtypes.append(self._component2dtype(component))
nrows.append(component.h)
ncols.append(component.w)
is_cube = all(r == nrows[0] and c == ncols[0] and d == dtypes[0]
for r, c, d in zip(nrows, ncols, dtypes))
if is_cube:
image = np.zeros((nrows[0], ncols[0], ncomps), dtypes[0])
else:
image = []
for k in range(raw_image.contents.numcomps):
component = raw_image.contents.comps[k]
self._validate_nonzero_image_size(nrows[k], ncols[k], k)
addr = ctypes.addressof(component.data.contents)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
nelts = nrows[k] * ncols[k]
band = np.ctypeslib.as_array(
(ctypes.c_int32 * nelts).from_address(addr))
if is_cube:
image[:, :, k] = np.reshape(band.astype(dtypes[k]),
(nrows[k], ncols[k]))
else:
image.append(np.reshape(band.astype(dtypes[k]),
(nrows[k], ncols[k])))
if is_cube and image.shape[2] == 1:
# The third dimension has just a single layer. Make the image
# data 2D instead of 3D.
image.shape = image.shape[0:2]
return image | 0.000913 |
def pods(namespace='default', **kwargs):
'''
Return a list of kubernetes pods defined in the namespace
CLI Examples::
salt '*' kubernetes.pods
salt '*' kubernetes.pods namespace=default
'''
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.list_namespaced_pod(namespace)
return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')]
except (ApiException, HTTPError) as exc:
if isinstance(exc, ApiException) and exc.status == 404:
return None
else:
log.exception(
'Exception when calling '
'CoreV1Api->list_namespaced_pod'
)
raise CommandExecutionError(exc)
finally:
_cleanup(**cfg) | 0.002398 |
def somethingFound(self,data,mode="phonefy"):
'''
Verifying if something was found. Note that this method needed to be rewritten as in Spoj we need to look for a text which APPEARS instead of looking for a text that does NOT appear.
:param data: Data where the self.notFoundText will be searched.
:param mode: Mode to be executed.
:return: Returns True if exists.
'''
#try:
for text in self.notFoundText[mode]:
if text in data:
# This is the change with regards to the standard behaviour!
return True
return False | 0.014493 |
def is_profile_in_legacy_format(profile):
"""
Is a given profile JSON object in legacy format?
"""
if isinstance(profile, dict):
pass
elif isinstance(profile, (str, unicode)):
try:
profile = json.loads(profile)
except ValueError:
return False
else:
return False
if "@type" in profile:
return False
if "@context" in profile:
return False
is_in_legacy_format = False
if "avatar" in profile:
is_in_legacy_format = True
elif "cover" in profile:
is_in_legacy_format = True
elif "bio" in profile:
is_in_legacy_format = True
elif "twitter" in profile:
is_in_legacy_format = True
elif "facebook" in profile:
is_in_legacy_format = True
return is_in_legacy_format | 0.001205 |
def flatpages_link_list(request):
"""
Returns a HttpResponse whose content is a Javascript file representing a
list of links to flatpages.
"""
from django.contrib.flatpages.models import FlatPage
link_list = [(page.title, page.url) for page in FlatPage.objects.all()]
return render_to_link_list(link_list) | 0.003003 |
def set_effect(self, effect_name: str):
"""Sets the color change program of the light."""
try:
effect_index = self._light_effect_list.index(effect_name)
except ValueError:
LOG.error("Trying to set unknown light effect")
return False
return self.setValue(key="PROGRAM", channel=self._effect_channel, value=effect_index) | 0.007752 |
def _get_genesplicer(data):
"""
This is a plugin for the Ensembl Variant Effect Predictor (VEP) that
runs GeneSplicer (https://ccb.jhu.edu/software/genesplicer/) to get splice site predictions.
https://github.com/Ensembl/VEP_plugins/blob/master/GeneSplicer.pm
"""
genesplicer_exec = os.path.realpath(config_utils.get_program("genesplicer", data["config"]))
genesplicer_training = tz.get_in(("genome_resources", "variation", "genesplicer"), data)
if genesplicer_exec and os.path.exists(genesplicer_exec) and genesplicer_training and os.path.exists(genesplicer_training) :
return ["--plugin", "GeneSplicer,%s,%s" % (genesplicer_exec,genesplicer_training)]
else:
return [] | 0.01108 |
def get_service(name):
"""
Get the service descriptor for the given service name.
@see: L{start_service}, L{stop_service},
L{pause_service}, L{resume_service}
@type name: str
@param name: Service unique name. You can get this value from the
C{ServiceName} member of the service descriptors returned by
L{get_services} or L{get_active_services}.
@rtype: L{win32.ServiceStatusProcess}
@return: Service status descriptor.
"""
with win32.OpenSCManager(
dwDesiredAccess = win32.SC_MANAGER_ENUMERATE_SERVICE
) as hSCManager:
with win32.OpenService(hSCManager, name,
dwDesiredAccess = win32.SERVICE_QUERY_STATUS
) as hService:
try:
return win32.QueryServiceStatusEx(hService)
except AttributeError:
return win32.QueryServiceStatus(hService) | 0.007021 |
def stored_messages_archive(context, num_elements=10):
"""
Renders a list of archived messages for the current user
"""
if "user" in context:
user = context["user"]
if user.is_authenticated():
qs = MessageArchive.objects.select_related("message").filter(user=user)
return {
"messages": qs[:num_elements],
"count": qs.count(),
} | 0.004684 |
def _do_analysis_cross_validation(self):
"""
Find the best model (fit) based on cross-valiation (leave one out)
"""
assert len(self.df) < 15, "Cross-validation is not implemented if your sample contains more than 15 datapoints"
# initialization: first model is the mean, but compute cv correctly.
errors = []
response_term = [Term([LookupFactor(self.y)])]
model_terms = [Term([])] # empty term is the intercept
model_desc = ModelDesc(response_term, model_terms)
for i in self.df.index:
# make new_fit, compute cross-validation and store error
df_ = self.df.drop(i, axis=0)
fit = fm.ols(model_desc, data=df_).fit()
cross_prediction = self._predict(fit=fit, df=self.df.loc[[i], :])
errors.append(cross_prediction['predicted'] - cross_prediction[self.y])
self._list_of_fits = [fm.ols(model_desc, data=self.df).fit()]
self.list_of_cverrors = [np.mean(np.abs(np.array(errors)))]
# try to improve the model until no improvements can be found
all_model_terms_dict = {x: Term([LookupFactor(x)]) for x in self.list_of_x}
while all_model_terms_dict:
# import pdb;pdb.set_trace()
# try each x in all_exog and overwrite if we find a better one
# at the end of iteration (and not earlier), save the best of the iteration
better_model_found = False
best = dict(fit=self._list_of_fits[-1], cverror=self.list_of_cverrors[-1])
for x, term in all_model_terms_dict.items():
model_desc = ModelDesc(response_term, self._list_of_fits[-1].model.formula.rhs_termlist + [term])
# cross_validation, currently only implemented for monthly data
# compute the mean error for a given formula based on leave-one-out.
errors = []
for i in self.df.index:
# make new_fit, compute cross-validation and store error
df_ = self.df.drop(i, axis=0)
fit = fm.ols(model_desc, data=df_).fit()
cross_prediction = self._predict(fit=fit, df=self.df.loc[[i], :])
errors.append(cross_prediction['predicted'] - cross_prediction[self.y])
cverror = np.mean(np.abs(np.array(errors)))
# compare the model with the current fit
if cverror < best['cverror']:
# better model, keep it
# first, reidentify using all the datapoints
best['fit'] = fm.ols(model_desc, data=self.df).fit()
best['cverror'] = cverror
better_model_found = True
best_x = x
if better_model_found:
self._list_of_fits.append(best['fit'])
self.list_of_cverrors.append(best['cverror'])
else:
# if we did not find a better model, exit
break
# next iteration with the found exog removed
all_model_terms_dict.pop(best_x)
self._fit = self._list_of_fits[-1] | 0.003447 |
def scrape_all_files(self):
"""
Generator that yields one by one the return value for self.read_dcm
for each file within this set
"""
try:
for dcmf in self.items:
yield self.read_dcm(dcmf)
except IOError as ioe:
raise IOError('Error reading DICOM file: {}.'.format(dcmf)) from ioe | 0.008152 |
def eparOptionFactory(master, statusBar, param, defaultParam,
doScroll, fieldWidths,
plugIn=None, editedCallbackObj=None,
helpCallbackObj=None, mainGuiObj=None,
defaultsVerb="Default", bg=None, indent=False,
flagging=False, flaggedColor=None):
"""Return EparOption item of appropriate type for the parameter param"""
# Allow passed-in overrides
if plugIn is not None:
eparOption = plugIn
# If there is an enumerated list, regardless of datatype use EnumEparOption
elif param.choice is not None:
eparOption = EnumEparOption
else:
# Use String for types not in the dictionary
eparOption = _eparOptionDict.get(param.type, StringEparOption)
# Create it
eo = eparOption(master, statusBar, param, defaultParam, doScroll,
fieldWidths, defaultsVerb, bg,
indent=indent, helpCallbackObj=helpCallbackObj,
mainGuiObj=mainGuiObj)
eo.setEditedCallbackObj(editedCallbackObj)
eo.setIsFlagging(flagging, False)
if flaggedColor:
eo.setFlaggedColor(flaggedColor)
return eo | 0.000822 |
def ReleaseProcessedFlow(self, flow_obj, cursor=None):
"""Releases a flow that the worker was processing to the database."""
update_query = """
UPDATE flows
LEFT OUTER JOIN (
SELECT client_id, flow_id, needs_processing
FROM flow_requests
WHERE
client_id = %(client_id)s AND
flow_id = %(flow_id)s AND
request_id = %(next_request_to_process)s AND
needs_processing
) AS needs_processing
ON
flows.client_id = needs_processing.client_id AND
flows.flow_id = needs_processing.flow_id
SET
flows.flow = %(flow)s,
flows.processing_on = NULL,
flows.processing_since = NULL,
flows.processing_deadline = NULL,
flows.next_request_to_process = %(next_request_to_process)s,
flows.flow_state = %(flow_state)s,
flows.user_cpu_time_used_micros = %(user_cpu_time_used_micros)s,
flows.system_cpu_time_used_micros = %(system_cpu_time_used_micros)s,
flows.network_bytes_sent = %(network_bytes_sent)s,
flows.num_replies_sent = %(num_replies_sent)s,
flows.last_update = NOW(6)
WHERE
flows.client_id = %(client_id)s AND
flows.flow_id = %(flow_id)s AND (
needs_processing.needs_processing = FALSE OR
needs_processing.needs_processing IS NULL)
"""
clone = flow_obj.Copy()
clone.processing_on = None
clone.processing_since = None
clone.processing_deadline = None
args = {
"client_id":
db_utils.ClientIDToInt(flow_obj.client_id),
"flow":
clone.SerializeToString(),
"flow_id":
db_utils.FlowIDToInt(flow_obj.flow_id),
"flow_state":
int(clone.flow_state),
"network_bytes_sent":
flow_obj.network_bytes_sent,
"next_request_to_process":
flow_obj.next_request_to_process,
"num_replies_sent":
flow_obj.num_replies_sent,
"system_cpu_time_used_micros":
db_utils.SecondsToMicros(flow_obj.cpu_time_used.system_cpu_time),
"user_cpu_time_used_micros":
db_utils.SecondsToMicros(flow_obj.cpu_time_used.user_cpu_time),
}
rows_updated = cursor.execute(update_query, args)
return rows_updated == 1 | 0.000444 |
def _ensure_authed(self, ptype, message):
"""
Checks message type against current auth state.
If server mode, and auth has not succeeded, and the message is of a
post-auth type (channel open or global request) an appropriate error
response Message is crafted and returned to caller for sending.
Otherwise (client mode, authed, or pre-auth message) returns None.
"""
if (
not self.server_mode
or ptype <= HIGHEST_USERAUTH_MESSAGE_ID
or self.is_authenticated()
):
return None
# WELP. We must be dealing with someone trying to do non-auth things
# without being authed. Tell them off, based on message class.
reply = Message()
# Global requests have no details, just failure.
if ptype == MSG_GLOBAL_REQUEST:
reply.add_byte(cMSG_REQUEST_FAILURE)
# Channel opens let us reject w/ a specific type + message.
elif ptype == MSG_CHANNEL_OPEN:
kind = message.get_text() # noqa
chanid = message.get_int()
reply.add_byte(cMSG_CHANNEL_OPEN_FAILURE)
reply.add_int(chanid)
reply.add_int(OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED)
reply.add_string("")
reply.add_string("en")
# NOTE: Post-open channel messages do not need checking; the above will
# reject attemps to open channels, meaning that even if a malicious
# user tries to send a MSG_CHANNEL_REQUEST, it will simply fall under
# the logic that handles unknown channel IDs (as the channel list will
# be empty.)
return reply | 0.001186 |
def get_default_env(self):
"""
Vanilla Ansible local commands execute with an environment inherited
from WorkerProcess, we must emulate that.
"""
return dict_diff(
old=ansible_mitogen.process.MuxProcess.original_env,
new=os.environ,
) | 0.006536 |
def _get_path_to_jar(cls, coursier_cache_path, pants_jar_path_base, jar_path):
"""
Create the path to the jar that will live in .pants.d
:param coursier_cache_path: coursier cache location
:param pants_jar_path_base: location under pants workdir to store the hardlink to the coursier cache
:param jar_path: path of the jar
:return:
"""
if os.path.abspath(coursier_cache_path) not in os.path.abspath(jar_path):
# Appending the string 'absolute' to the jar_path and joining that is a hack to work around
# python's os.path.join behavior of throwing away all components that come before an
# absolute path. See https://docs.python.org/3.3/library/os.path.html#os.path.join
return os.path.join(pants_jar_path_base, os.path.normpath('absolute/' + jar_path))
else:
return os.path.join(pants_jar_path_base, 'relative', os.path.relpath(jar_path, coursier_cache_path)) | 0.013978 |
def convertVectorColumnsToML(dataset, *cols):
"""
Converts vector columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Vector` type to the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of vector columns to be converted.
New vector columns will be ignored. If unspecified, all old
vector columns will be converted excepted nested ones.
:return:
the input dataset with old vector columns converted to the
new vector type
>>> import pyspark
>>> from pyspark.mllib.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseVector)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertVectorColumnsToML", dataset, list(cols)) | 0.001889 |
def function_call_action(self, text, loc, fun):
"""Code executed after recognising the whole function call"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("FUN_CALL:",fun)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#check number of arguments
if len(self.function_arguments) != self.symtab.get_attribute(self.function_call_index):
raise SemanticException("Wrong number of arguments for function '%s'" % fun.name)
#arguments should be pushed to stack in reverse order
self.function_arguments.reverse()
self.codegen.function_call(self.function_call_index, self.function_arguments)
self.codegen.restore_used_registers()
return_type = self.symtab.get_type(self.function_call_index)
#restore previous function call data
self.function_call_index = self.function_call_stack.pop()
self.function_arguments = self.function_arguments_stack.pop()
register = self.codegen.take_register(return_type)
#move result to a new free register, to allow the next function call
self.codegen.move(self.codegen.take_function_register(return_type), register)
return register | 0.010244 |
def get_cluster_config(
cluster_type,
cluster_name=None,
kafka_topology_base_path=None,
):
"""Return the cluster configuration.
Use the local cluster if cluster_name is not specified.
:param cluster_type: the type of the cluster
:type cluster_type: string
:param cluster_name: the name of the cluster
:type cluster_name: string
:param kafka_topology_base_path: base path to look for <cluster_type>.yaml
:type cluster_name: string
:returns: the cluster
:rtype: ClusterConfig
"""
if not kafka_topology_base_path:
config_dirs = get_conf_dirs()
else:
config_dirs = [kafka_topology_base_path]
topology = None
for config_dir in config_dirs:
try:
topology = TopologyConfiguration(
cluster_type,
config_dir,
)
except MissingConfigurationError:
pass
if not topology:
raise MissingConfigurationError(
"No available configuration for type {0}".format(cluster_type),
)
if cluster_name:
return topology.get_cluster_by_name(cluster_name)
else:
return topology.get_local_cluster() | 0.000836 |
def fetch_sender_txs(self):
"""
Fetch all sender txs via JSON-RPC,
and merge them into our block data.
Try backing off (up to 5 times) if we fail
to fetch transactions via JSONRPC
Return True on success
Raise on error
"""
# fetch remaining sender transactions
if len(self.sender_info.keys()) > 0:
sender_txids = self.sender_info.keys()[:]
sender_txid_batches = []
batch_size = 20
for i in xrange(0, len(sender_txids), batch_size ):
sender_txid_batches.append( sender_txids[i:i+batch_size] )
for i in xrange(0, len(sender_txid_batches)):
sender_txid_batch = sender_txid_batches[i]
log.debug("Fetch %s TXs via JSON-RPC (%s-%s of %s)" % (len(sender_txid_batch), i * batch_size, i * batch_size + len(sender_txid_batch), len(sender_txids)))
sender_txs = None
for j in xrange(0, 5):
sender_txs = self.fetch_txs_rpc( self.bitcoind_opts, sender_txid_batch )
if sender_txs is None:
log.error("Failed to fetch transactions; trying again (%s of %s)" % (j+1, 5))
time.sleep(j+1)
continue
break
if sender_txs is None:
raise Exception("Failed to fetch transactions")
# pair back up with nulldata transactions
for sender_txid, sender_tx in sender_txs.items():
assert sender_txid in self.sender_info.keys(), "Unsolicited sender tx %s" % sender_txid
# match sender outputs to the nulldata tx's inputs
for nulldata_input_vout_index in self.sender_info[sender_txid].keys():
if sender_txid != "0000000000000000000000000000000000000000000000000000000000000000":
# regular tx, not coinbase
assert nulldata_input_vout_index < len(sender_tx['outs']), 'Output index {} is out of bounds for {}'.format(nulldata_input_vout_index, sender_txid)
# save sender info
self.add_sender_info(sender_txid, nulldata_input_vout_index, sender_tx['outs'][nulldata_input_vout_index])
else:
# coinbase
self.add_sender_info(sender_txid, nulldata_input_vout_index, sender_tx['outs'][0])
# update accounting
self.num_txs_received += 1
return True | 0.008023 |
def field2pattern(self, field, **kwargs):
"""Return the dictionary of OpenAPI field attributes for a set of
:class:`Range <marshmallow.validators.Regexp>` validators.
:param Field field: A marshmallow field.
:rtype: dict
"""
regex_validators = (
v
for v in field.validators
if isinstance(getattr(v, "regex", None), RegexType)
)
v = next(regex_validators, None)
attributes = {} if v is None else {"pattern": v.regex.pattern}
if next(regex_validators, None) is not None:
warnings.warn(
"More than one regex validator defined on {} field. Only the "
"first one will be used in the output spec.".format(type(field)),
UserWarning,
)
return attributes | 0.003555 |
def get_operations(self,
indices: Sequence[LogicalIndex],
qubits: Sequence[ops.Qid]
) -> ops.OP_TREE:
"""Gets the logical operations to apply to qubits.""" | 0.021552 |
def find_loader(self, fullname):
"""Try to find a loader for the specified module, or the namespace
package portions. Returns (loader, list-of-portions).
This method is deprecated. Use find_spec() instead.
"""
spec = self.find_spec(fullname)
if spec is None:
return None, []
return spec.loader, spec.submodule_search_locations or [] | 0.004988 |
def _make_tensor_fixed(name, data_type, dims, vals, raw=False):
'''
Make a TensorProto with specified arguments. If raw is False, this
function will choose the corresponding proto field to store the
values based on data_type. If raw is True, use "raw_data" proto
field to store the values, and values should be of type bytes in
this case.
'''
tensor = TensorProto()
tensor.data_type = data_type
tensor.name = name
if (data_type == TensorProto.COMPLEX64 or
data_type == TensorProto.COMPLEX128):
vals = split_complex_to_pairs(vals)
if raw:
tensor.raw_data = vals
else:
field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[
mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[data_type]]
getattr(tensor, field).extend(vals)
tensor.dims.extend(dims)
return tensor | 0.001161 |
def add_text_content_type(application, content_type, default_encoding,
dumps, loads):
"""
Add handler for a text content type.
:param tornado.web.Application application: the application to modify
:param str content_type: the content type to add
:param str default_encoding: encoding to use when one is unspecified
:param dumps: function that dumps a dictionary to a string.
``dumps(dict, encoding:str) -> str``
:param loads: function that loads a dictionary from a string.
``loads(str, encoding:str) -> dict``
Note that the ``charset`` parameter is stripped from `content_type`
if it is present.
"""
parsed = headers.parse_content_type(content_type)
parsed.parameters.pop('charset', None)
normalized = str(parsed)
add_transcoder(application,
handlers.TextContentHandler(normalized, dumps, loads,
default_encoding)) | 0.001018 |
def reverse_lookup(self, state, path):
"""
Returns a chrome URL for a given path, given the current package depth
in an error bundle.
State may either be an error bundle or the actual package stack.
"""
# Make sure the path starts with a forward slash.
if not path.startswith('/'):
path = '/%s' % path
# If the state is an error bundle, extract the package stack.
if not isinstance(state, list):
state = state.package_stack
content_paths = self.get_triples(subject='content')
for content_path in content_paths:
chrome_name = content_path['predicate']
if not content_path['object']:
continue
path_location = content_path['object'].split()[0]
if path_location.startswith('jar:'):
if not state:
continue
# Parse out the JAR and it's location within the chrome.
split_jar_url = path_location[4:].split('!', 2)
# Ignore invalid/unsupported JAR URLs.
if len(split_jar_url) != 2:
continue
# Unpack the JAR URL.
jar_path, package_path = split_jar_url
if jar_path != state[0]:
continue
return 'chrome://%s' % self._url_chunk_join(chrome_name,
package_path,
path)
else:
if state:
continue
path_location = '/%s/' % path_location.strip('/')
rel_path = os.path.relpath(path, path_location)
if rel_path.startswith('../') or rel_path == '..':
continue
return 'chrome://%s' % self._url_chunk_join(chrome_name,
rel_path)
return None | 0.000986 |
def order_delete(backend, kitchen, order_id):
"""
Delete one order or all orders in a kitchen
"""
use_kitchen = Backend.get_kitchen_name_soft(kitchen)
print use_kitchen
if use_kitchen is None and order_id is None:
raise click.ClickException('You must specify either a kitchen or an order_id or be in a kitchen directory')
if order_id is not None:
click.secho('%s - Delete an Order using id %s' % (get_datetime(), order_id), fg='green')
check_and_print(DKCloudCommandRunner.delete_one_order(backend.dki, order_id))
else:
click.secho('%s - Delete all orders in Kitchen %s' % (get_datetime(), use_kitchen), fg='green')
check_and_print(DKCloudCommandRunner.delete_all_order(backend.dki, use_kitchen)) | 0.007802 |
def get_im(self, force_update=False):
"""Get the influence map for the model, generating it if necessary.
Parameters
----------
force_update : bool
Whether to generate the influence map when the function is called.
If False, returns the previously generated influence map if
available. Defaults to True.
Returns
-------
networkx MultiDiGraph object containing the influence map.
The influence map can be rendered as a pdf using the dot layout
program as follows::
im_agraph = nx.nx_agraph.to_agraph(influence_map)
im_agraph.draw('influence_map.pdf', prog='dot')
"""
if self._im and not force_update:
return self._im
if not self.model:
raise Exception("Cannot get influence map if there is no model.")
def add_obs_for_agent(agent):
obj_mps = list(pa.grounded_monomer_patterns(self.model, agent))
if not obj_mps:
logger.debug('No monomer patterns found in model for agent %s, '
'skipping' % agent)
return
obs_list = []
for obj_mp in obj_mps:
obs_name = _monomer_pattern_label(obj_mp) + '_obs'
# Add the observable
obj_obs = Observable(obs_name, obj_mp, _export=False)
obs_list.append(obs_name)
try:
self.model.add_component(obj_obs)
except ComponentDuplicateNameError as e:
pass
return obs_list
# Create observables for all statements to check, and add to model
# Remove any existing observables in the model
self.model.observables = ComponentSet([])
for stmt in self.statements:
# Generate observables for Modification statements
if isinstance(stmt, Modification):
mod_condition_name = modclass_to_modtype[stmt.__class__]
if isinstance(stmt, RemoveModification):
mod_condition_name = modtype_to_inverse[mod_condition_name]
# Add modification to substrate agent
modified_sub = _add_modification_to_agent(stmt.sub,
mod_condition_name, stmt.residue,
stmt.position)
obs_list = add_obs_for_agent(modified_sub)
# Associate this statement with this observable
self.stmt_to_obs[stmt] = obs_list
# Generate observables for Activation/Inhibition statements
elif isinstance(stmt, RegulateActivity):
regulated_obj, polarity = \
_add_activity_to_agent(stmt.obj, stmt.obj_activity,
stmt.is_activation)
obs_list = add_obs_for_agent(regulated_obj)
# Associate this statement with this observable
self.stmt_to_obs[stmt] = obs_list
elif isinstance(stmt, RegulateAmount):
obs_list = add_obs_for_agent(stmt.obj)
self.stmt_to_obs[stmt] = obs_list
elif isinstance(stmt, Influence):
obs_list = add_obs_for_agent(stmt.obj.concept)
self.stmt_to_obs[stmt] = obs_list
# Add observables for each agent
for ag in self.agent_obs:
obs_list = add_obs_for_agent(ag)
self.agent_to_obs[ag] = obs_list
logger.info("Generating influence map")
self._im = self.generate_im(self.model)
#self._im.is_multigraph = lambda: False
# Now, for every rule in the model, check if there are any observables
# downstream; alternatively, for every observable in the model, get a
# list of rules.
# We'll need the dictionary to check if nodes are observables
node_attributes = nx.get_node_attributes(self._im, 'node_type')
for rule in self.model.rules:
obs_list = []
# Get successors of the rule node
for neighb in self._im.neighbors(rule.name):
# Check if the node is an observable
if node_attributes[neighb] != 'variable':
continue
# Get the edge and check the polarity
edge_sign = _get_edge_sign(self._im, (rule.name, neighb))
obs_list.append((neighb, edge_sign))
self.rule_obs_dict[rule.name] = obs_list
return self._im | 0.001518 |
def update_ticket(self, tid, tickets=None):
"""If the customer should be granted an electronic ticket as a result
of a successful payment, the merchant may (at any time) PUT ticket
information to this endpoint. There is an ordered list of tickets; the
merchant may PUT several times to update the list. The PUT overwrites
any existing content, so if adding additional tickets one must remember
to also include the tickets previously issued.
So far the only code type supported is "string", meaning a text code
that is displayed to the customer, however we will add QR code,
barcodes etc. soon. Please contact mCASH about supporting your
barcode.
Arguments:
tickets:
List of tickets to grant customer
"""
arguments = {'tickets': tickets}
return self.do_req('PUT',
self.merchant_api_base_url + '/payment_request/' +
tid + '/ticket/', arguments) | 0.001925 |
def search(query='', keywords=[], registry=None):
''' generator of objects returned by the search endpoint (both modules and
targets).
Query is a full-text search (description, name, keywords), keywords
search only the module/target description keywords lists.
If both parameters are specified the search is the intersection of the
two queries.
'''
registry = registry or Registry_Base_URL
url = '%s/search' % registry
headers = _headersForRegistry(registry)
params = {
'skip': 0,
'limit': 50
}
if len(query):
params['query'] = query
if len(keywords):
params['keywords[]'] = keywords
while True:
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
objects = ordered_json.loads(response.text)
if len(objects):
for o in objects:
yield o
params['skip'] += params['limit']
else:
break | 0.001946 |
def _connect(self):
""" Returns an aggregator connection. """
with self._lock:
if self._aggregator:
try:
return self._pool_connect(self._aggregator)
except PoolConnectionException:
self._aggregator = None
if not len(self._aggregators):
with self._pool_connect(self._primary_aggregator) as conn:
self._update_aggregator_list(conn)
conn.expire()
random.shuffle(self._aggregators)
last_exception = None
for aggregator in self._aggregators:
self.logger.debug('Attempting connection with %s:%s' % (aggregator[0], aggregator[1]))
try:
conn = self._pool_connect(aggregator)
# connection successful!
self._aggregator = aggregator
return conn
except PoolConnectionException as e:
# connection error
last_exception = e
else:
# bad news bears... try again later
self._aggregator = None
self._aggregators = []
raise last_exception | 0.002358 |
def num_columns(self):
"""Number of columns displayed."""
if self.term.is_a_tty:
return self.term.width // self.hint_width
return 1 | 0.011976 |
def create(self, username, **kwargs):
"""
Create a user in Keycloak
http://www.keycloak.org/docs-api/3.4/rest-api/index.html#_users_resource
:param str username:
:param object credentials: (optional)
:param str first_name: (optional)
:param str last_name: (optional)
:param str email: (optional)
:param boolean enabled: (optional)
"""
payload = OrderedDict(username=username)
for key in USER_KWARGS:
from keycloak.admin.clientroles import to_camel_case
if key in kwargs:
payload[to_camel_case(key)] = kwargs[key]
return self._client.post(
url=self._client.get_full_url(
self.get_path('collection', realm=self._realm_name)
),
data=json.dumps(payload, sort_keys=True)
) | 0.002286 |
def storage_accounts(self):
"""Instance depends on the API version:
* 2015-06-15: :class:`StorageAccountsOperations<azure.mgmt.storage.v2015_06_15.operations.StorageAccountsOperations>`
* 2016-01-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2016_01_01.operations.StorageAccountsOperations>`
* 2016-12-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2016_12_01.operations.StorageAccountsOperations>`
* 2017-06-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2017_06_01.operations.StorageAccountsOperations>`
* 2017-10-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2017_10_01.operations.StorageAccountsOperations>`
* 2018-02-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2018_02_01.operations.StorageAccountsOperations>`
* 2018-03-01-preview: :class:`StorageAccountsOperations<azure.mgmt.storage.v2018_03_01_preview.operations.StorageAccountsOperations>`
* 2018-07-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2018_07_01.operations.StorageAccountsOperations>`
"""
api_version = self._get_api_version('storage_accounts')
if api_version == '2015-06-15':
from .v2015_06_15.operations import StorageAccountsOperations as OperationClass
elif api_version == '2016-01-01':
from .v2016_01_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2016-12-01':
from .v2016_12_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2017-06-01':
from .v2017_06_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2017-10-01':
from .v2017_10_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2018-03-01-preview':
from .v2018_03_01_preview.operations import StorageAccountsOperations as OperationClass
elif api_version == '2018-07-01':
from .v2018_07_01.operations import StorageAccountsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | 0.007877 |
def new_config_event(self):
"""Called by the event loop when new config is available.
"""
try:
self.on_set_config()
except Exception as ex:
self.logger.exception(ex)
raise StopIteration() | 0.007813 |
def visit_DictComp(self, node: ast.DictComp) -> Any:
"""Compile the dictionary comprehension as a function and call it."""
result = self._execute_comprehension(node=node)
for generator in node.generators:
self.visit(generator.iter)
self.recomputed_values[node] = result
return result | 0.005935 |
def json_2_company(json_obj):
"""
transform JSON obj coming from Ariane to ariane_clip3 object
:param json_obj: the JSON obj coming from Ariane
:return: ariane_clip3 Company object
"""
LOGGER.debug("Company.json_2_company")
return Company(cmpid=json_obj['companyID'],
name=json_obj['companyName'],
description=json_obj['companyDescription'],
application_ids=json_obj['companyApplicationsID'],
ost_ids=json_obj['companyOSTypesID']) | 0.00346 |
def validate_properties(self):
"""Validates that the required properties for this Resource have been populated, and that all properties have
valid values.
:returns: True if all properties are valid
:rtype: bool
:raises TypeError: if any properties are invalid
"""
for name, property_type in self.property_types.items():
value = getattr(self, name)
# If the property value is an intrinsic function, any remaining validation has to be left to CloudFormation
if property_type.supports_intrinsics and self._is_intrinsic_function(value):
continue
# If the property value has not been set, verify that the property is not required.
if value is None:
if property_type.required:
raise InvalidResourceException(
self.logical_id,
"Missing required property '{property_name}'.".format(property_name=name))
# Otherwise, validate the value of the property.
elif not property_type.validate(value, should_raise=False):
raise InvalidResourceException(
self.logical_id,
"Type of property '{property_name}' is invalid.".format(property_name=name)) | 0.006024 |
def execute(self):
"""
Execute all of the saved commands and return results.
"""
try:
for key, value in self._watched_keys.items():
if self.mock_redis.redis.get(self.mock_redis._encode(key)) != value:
raise WatchError("Watched variable changed.")
return [command() for command in self.commands]
finally:
self._reset() | 0.006993 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.