text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _create_producer(self, settings):
"""Tries to establish a Kafka consumer connection"""
try:
brokers = settings['KAFKA_HOSTS']
self.logger.debug("Creating new kafka producer using brokers: " +
str(brokers))
return KafkaProducer(bootstrap_servers=brokers,
value_serializer=lambda m: json.dumps(m),
retries=3,
linger_ms=settings['KAFKA_PRODUCER_BATCH_LINGER_MS'],
buffer_memory=settings['KAFKA_PRODUCER_BUFFER_BYTES'])
except KeyError as e:
self.logger.error('Missing setting named ' + str(e),
{'ex': traceback.format_exc()})
except:
self.logger.error("Couldn't initialize kafka producer in plugin.",
{'ex': traceback.format_exc()})
raise | 0.008256 |
def create(model_config, model, vec_env, storage, takes, parallel_envs, action_noise=None, sample_args=None):
""" Vel factory function """
return EvaluateEnvCommand(
model_config=model_config,
model_factory=model,
env_factory=vec_env,
parallel_envs=parallel_envs,
action_noise=action_noise,
storage=storage,
takes=takes,
sample_args=sample_args
) | 0.004739 |
def last(args, dbtype=None):
"""
%prog database.fasta query.fasta
Run LAST by calling LASTDB and LASTAL. LAST program available:
<http://last.cbrc.jp>
Works with LAST-719.
"""
p = OptionParser(last.__doc__)
p.add_option("--dbtype", default="nucl",
choices=("nucl", "prot"),
help="Molecule type of subject database")
p.add_option("--path", help="Specify LAST path")
p.add_option("--mask", default=False, action="store_true", help="Invoke -c in lastdb")
p.add_option("--format", default="BlastTab",
choices=("TAB", "MAF", "BlastTab", "BlastTab+"),
help="Output format")
p.add_option("--minlen", default=0, type="int",
help="Filter alignments by how many bases match")
p.add_option("--minid", default=0, type="int", help="Minimum sequence identity")
p.set_cpus()
p.set_params()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
subject, query = args
path = opts.path
cpus = opts.cpus
if not dbtype:
dbtype = opts.dbtype
getpath = lambda x: op.join(path, x) if path else x
lastdb_bin = getpath("lastdb")
lastal_bin = getpath("lastal")
subjectdb = subject.rsplit(".", 1)[0]
run_lastdb(infile=subject, outfile=subjectdb + ".prj", mask=opts.mask, \
lastdb_bin=lastdb_bin, dbtype=dbtype)
u = 2 if opts.mask else 0
cmd = "{0} -u {1}".format(lastal_bin, u)
cmd += " -P {0} -i3G".format(cpus)
cmd += " -f {0}".format(opts.format)
cmd += " {0} {1}".format(subjectdb, query)
minlen = opts.minlen
minid = opts.minid
extra = opts.extra
assert minid != 100, "Perfect match not yet supported"
mm = minid / (100 - minid)
if minlen:
extra += " -e{0}".format(minlen)
if minid:
extra += " -r1 -q{0} -a{0} -b{0}".format(mm)
if extra:
cmd += " " + extra.strip()
lastfile = get_outfile(subject, query, suffix="last")
sh(cmd, outfile=lastfile) | 0.002911 |
def _decompress_nist256(pubkey):
"""
Load public key from the serialized blob.
The leading byte least-significant bit is used to decide how to recreate
the y-coordinate from the specified x-coordinate. See bitcoin/main.py#L198
(from https://github.com/vbuterin/pybitcointools/) for details.
"""
if pubkey[:1] in {b'\x02', b'\x03'}: # set by ecdsa_get_public_key33()
curve = ecdsa.NIST256p
P = curve.curve.p()
A = curve.curve.a()
B = curve.curve.b()
x = util.bytes2num(pubkey[1:33])
beta = pow(int(x * x * x + A * x + B), int((P + 1) // 4), int(P))
p0 = util.bytes2num(pubkey[:1])
y = (P - beta) if ((beta + p0) % 2) else beta
point = ecdsa.ellipticcurve.Point(curve.curve, x, y)
return ecdsa.VerifyingKey.from_public_point(point, curve=curve,
hashfunc=hashfunc)
else:
return None | 0.001047 |
def get_template_names(self):
"""
Dispatch template according to the kind of request: ajax or normal.
"""
if self.request.is_ajax():
return [self.list_template_name]
else:
return super(Search, self).get_template_names() | 0.007067 |
def find_files(self):
""" Gets modules routes.py and converts to module imports """
modules = self.evernode_app.get_modules()
root_path = sys.path[0] if self.evernode_app.root_path is None \
else self.evernode_app.root_path
dirs = [dict(
dir=os.path.join(root_path, 'resources', 'lang'), module="root")]
for module_name in modules:
modules_folder = 'modules{}%s'.format(os.sep)
if module_name is not None:
modules_folder = modules_folder % (module_name.strip(os.sep))
else:
continue
path = os.path.join(
root_path, modules_folder, 'resources', 'lang')
if os.path.isdir(path):
dirs.append(dict(dir=path, module=module_name))
for dir in dirs:
module_pack = {
'name': dir['module'],
'languages': [],
'file_packs': []
}
for path, subdirs, files in os.walk(dir['dir']):
for subdir in subdirs:
module_pack['languages'].append(subdir)
for name in files:
module_pack['file_packs'].append(dict(
file=os.path.join(path, name),
name=name.rsplit('.', 1)[0].lower(),
language=path.split("lang%s" % (os.sep), 1)[1].strip()
))
self.module_packs.append(module_pack)
for module_pack in self.module_packs:
module_pack['file_packs'] = \
list({v['file']: v for v in module_pack['file_packs']}
.values())
if self.app.config['DEBUG']:
print('--- Loaded Language Files ---')
print("Loaded Dirs: " + str(dirs))
print("Loaded Language Packs: " + str(self.module_packs)) | 0.00103 |
def transformations(self, relationship="all"):
"""Get all the transformations of this info.
Return a list of transformations involving this info. ``relationship``
can be "parent" (in which case only transformations where the info is
the ``info_in`` are returned), "child" (in which case only
transformations where the info is the ``info_out`` are returned) or
``all`` (in which case any transformations where the info is the
``info_out`` or the ``info_in`` are returned). The default is ``all``
"""
if relationship not in ["all", "parent", "child"]:
raise ValueError(
"You cannot get transformations of relationship {}".format(relationship)
+ "Relationship can only be parent, child or all."
)
if relationship == "all":
return Transformation.query.filter(
and_(
Transformation.failed == false(),
or_(
Transformation.info_in == self, Transformation.info_out == self
),
)
).all()
if relationship == "parent":
return Transformation.query.filter_by(
info_in_id=self.id, failed=False
).all()
if relationship == "child":
return Transformation.query.filter_by(
info_out_id=self.id, failed=False
).all() | 0.002717 |
def copy_file(
source_path,
target_path,
allow_undo=True,
no_confirm=False,
rename_on_collision=True,
silent=False,
extra_flags=0,
hWnd=None
):
"""Perform a shell-based file copy. Copying in
this way allows the possibility of undo, auto-renaming,
and showing the "flying file" animation during the copy.
The default options allow for undo, don't automatically
clobber on a name clash, automatically rename on collision
and display the animation.
"""
return _file_operation(
shellcon.FO_COPY,
source_path,
target_path,
allow_undo,
no_confirm,
rename_on_collision,
silent,
extra_flags,
hWnd
) | 0.001368 |
def first(self, cascadeFetch=False):
'''
First - Returns the oldest record (lowerst primary key) with current filters.
This makes an efficient queue, as it only fetches a single object.
@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model
will be fetched immediately. If False, foreign objects will be fetched on-access.
@return - Instance of Model object, or None if no items match current filters
'''
obj = None
matchedKeys = self.getPrimaryKeys(sortByAge=True)
if matchedKeys:
# Loop so we don't return None when there are items, if item is deleted between getting key and getting obj
while matchedKeys and obj is None:
obj = self.get(matchedKeys.pop(0), cascadeFetch=cascadeFetch)
return obj | 0.030457 |
def get_data(source, fields='*', env=None, first_row=0, count=-1, schema=None):
""" A utility function to get a subset of data from a Table, Query, Pandas dataframe or List.
Args:
source: the source of the data. Can be a Table, Pandas DataFrame, List of dictionaries or
lists, or a string, in which case it is expected to be the name of a table in BQ.
fields: a list of fields that we want to return as a list of strings, comma-separated string,
or '*' for all.
env: if the data source is a Query module, this is the set of variable overrides for
parameterizing the Query.
first_row: the index of the first row to return; default 0. Onl;y used if count is non-negative.
count: the number or rows to return. If negative (the default), return all rows.
schema: the schema of the data. Optional; if supplied this can be used to help do type-coercion.
Returns:
A tuple consisting of a dictionary and a count; the dictionary has two entries: 'cols'
which is a list of column metadata entries for Google Charts, and 'rows' which is a list of
lists of values. The count is the total number of rows in the source (independent of the
first_row/count parameters).
Raises:
Exception if the request could not be fulfilled.
"""
ipy = IPython.get_ipython()
if env is None:
env = {}
env.update(ipy.user_ns)
if isinstance(source, basestring):
source = google.datalab.utils.get_item(ipy.user_ns, source, source)
if isinstance(source, basestring):
source = google.datalab.bigquery.Table(source)
if isinstance(source, list):
if len(source) == 0:
return _get_data_from_empty_list(source, fields, first_row, count, schema)
elif isinstance(source[0], dict):
return _get_data_from_list_of_dicts(source, fields, first_row, count, schema)
elif isinstance(source[0], list):
return _get_data_from_list_of_lists(source, fields, first_row, count, schema)
else:
raise Exception("To get tabular data from a list it must contain dictionaries or lists.")
elif isinstance(source, pandas.DataFrame):
return _get_data_from_dataframe(source, fields, first_row, count, schema)
elif isinstance(source, google.datalab.bigquery.Query):
return _get_data_from_table(source.execute().result(), fields, first_row, count, schema)
elif isinstance(source, google.datalab.bigquery.Table):
return _get_data_from_table(source, fields, first_row, count, schema)
else:
raise Exception("Cannot chart %s; unsupported object type" % source) | 0.012505 |
def mahalanobis_norm(self, dx):
"""compute the Mahalanobis norm that is induced by the adapted
sample distribution, covariance matrix ``C`` times ``sigma**2``,
including ``sigma_vec``. The expected Mahalanobis distance to
the sample mean is about ``sqrt(dimension)``.
Argument
--------
A *genotype* difference `dx`.
Example
-------
>>> import cma, numpy
>>> es = cma.CMAEvolutionStrategy(numpy.ones(10), 1)
>>> xx = numpy.random.randn(2, 10)
>>> d = es.mahalanobis_norm(es.gp.geno(xx[0]-xx[1]))
`d` is the distance "in" the true sample distribution,
sampled points have a typical distance of ``sqrt(2*es.N)``,
where ``es.N`` is the dimension, and an expected distance of
close to ``sqrt(N)`` to the sample mean. In the example,
`d` is the Euclidean distance, because C = I and sigma = 1.
"""
return sqrt(sum((self.D**-1. * np.dot(self.B.T, dx / self.sigma_vec))**2)) / self.sigma | 0.002876 |
def get_units_property(self, *, unit_ids=None, property_name):
'''Returns a list of values stored under the property name corresponding
to a list of units
Parameters
----------
unit_ids: list
The unit ids for which the property will be returned
Defaults to get_unit_ids()
property_name: str
The name of the property
Returns
----------
values
The list of values
'''
if unit_ids is None:
unit_ids = self.get_unit_ids()
values = [self.get_unit_property(unit_id=unit, property_name=property_name) for unit in unit_ids]
return values | 0.005772 |
def uploadDeviceConfig(self):
"""Upload the device configuration of the fake device
selected in the __init__ methodi to the google account."""
upload = googleplay_pb2.UploadDeviceConfigRequest()
upload.deviceConfiguration.CopyFrom(self.deviceBuilder.getDeviceConfig())
headers = self.getHeaders(upload_fields=True)
stringRequest = upload.SerializeToString()
response = requests.post(UPLOAD_URL, data=stringRequest,
headers=headers,
verify=ssl_verify,
timeout=60,
proxies=self.proxies_config)
response = googleplay_pb2.ResponseWrapper.FromString(response.content)
try:
if response.payload.HasField('uploadDeviceConfigResponse'):
self.device_config_token = response.payload.uploadDeviceConfigResponse
self.device_config_token = self.device_config_token.uploadDeviceConfigToken
except ValueError:
pass | 0.004686 |
def lookup_friendships(self, user_ids=None, screen_names=None):
""" Perform bulk look up of friendships from user ID or screenname """
return self._lookup_friendships(list_to_csv(user_ids), list_to_csv(screen_names)) | 0.012931 |
def folderitem(self, obj, item, index):
"""Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
"""
item = super(ReferenceSamplesView, self).folderitem(obj, item, index)
# ensure we have an object and not a brain
obj = api.get_object(obj)
url = api.get_url(obj)
title = api.get_title(obj)
item["Title"] = title
item["replace"]["Title"] = get_link(url, value=title)
item["allow_edit"] = self.get_editable_columns()
# Supported Services
supported_services_choices = self.make_supported_services_choices(obj)
item["choices"]["SupportedServices"] = supported_services_choices
# Position
item["Position"] = "new"
item["choices"]["Position"] = self.make_position_choices()
return item | 0.001813 |
def raise_for_missing_name(self, line: str, position: int, namespace: str, name: str) -> None:
"""Raise an exception if the namespace is not defined or if it does not validate the given name."""
self.raise_for_missing_namespace(line, position, namespace, name)
if self.has_enumerated_namespace(namespace) and not self.has_enumerated_namespace_name(namespace, name):
raise MissingNamespaceNameWarning(self.get_line_number(), line, position, namespace, name)
if self.has_regex_namespace(namespace) and not self.has_regex_namespace_name(namespace, name):
raise MissingNamespaceRegexWarning(self.get_line_number(), line, position, namespace, name) | 0.011412 |
def _updateMinDutyCyclesGlobal(self):
"""
Updates the minimum duty cycles in a global fashion. Sets the minimum duty
cycles for the overlap all columns to be a percent of the maximum in the
region, specified by minPctOverlapDutyCycle. Functionality it is equivalent
to _updateMinDutyCyclesLocal, but this function exploits the globality of
the computation to perform it in a straightforward, and efficient manner.
"""
self._minOverlapDutyCycles.fill(
self._minPctOverlapDutyCycles * self._overlapDutyCycles.max()
) | 0.001786 |
def get_seh_chain(self):
"""
@rtype: list of tuple( int, int )
@return: List of structured exception handlers.
Each SEH is represented as a tuple of two addresses:
- Address of this SEH block
- Address of the SEH callback function
Do not confuse this with the contents of the SEH block itself,
where the first member is a pointer to the B{next} block instead.
@raise NotImplementedError:
This method is only supported in 32 bits versions of Windows.
"""
seh_chain = list()
try:
process = self.get_process()
seh = self.get_seh_chain_pointer()
while seh != 0xFFFFFFFF:
seh_func = process.read_pointer( seh + 4 )
seh_chain.append( (seh, seh_func) )
seh = process.read_pointer( seh )
except WindowsError:
seh_chain.append( (seh, None) )
return seh_chain | 0.01001 |
def remove(name, local):
'''Remove a module named NAME. Will remove the first resolved module named NAME. You can also specify a full path to a module. Use the --local option
to ensure removal of modules local to the currently active environment.'''
click.echo()
if not local: # Use resolver to find module
try:
r = cpenv.resolve(name)
except cpenv.ResolveError as e:
click.echo(e)
return
obj = r.resolved[0]
else: # Try to find module in active environment
env = cpenv.get_active_env()
if not env:
click.echo('You must activate an env to remove local modules')
return
mod = env.get_module(name)
if not mod:
click.echo('Failed to resolve module: ' + name)
return
obj = mod
if isinstance(obj, cpenv.VirtualEnvironment):
click.echo('{} is an environment. Use `cpenv remove` instead.')
return
click.echo(format_objects([obj]))
click.echo()
user_confirmed = click.confirm(
red('Are you sure you want to remove this module?')
)
if user_confirmed:
click.echo('Attempting to remove...', nl=False)
try:
obj.remove()
except Exception as e:
click.echo(bold_red('FAILED'))
click.echo(e)
else:
click.echo(bold_green('OK!')) | 0.002827 |
def get_strategy(name_or_cls):
"""Return the strategy identified by its name. If ``name_or_class`` is a class,
it will be simply returned.
"""
if isinstance(name_or_cls, six.string_types):
if name_or_cls not in STRATS:
raise MutationError("strat is not defined")
return STRATS[name_or_cls]()
return name_or_cls() | 0.00554 |
def reqScannerData(
self, subscription: ScannerSubscription,
scannerSubscriptionOptions: List[TagValue] = None,
scannerSubscriptionFilterOptions:
List[TagValue] = None) -> ScanDataList:
"""
Do a blocking market scan by starting a subscription and canceling it
after the initial list of results are in.
This method is blocking.
https://interactivebrokers.github.io/tws-api/market_scanners.html
Args:
subscription: Basic filters.
scannerSubscriptionOptions: Unknown.
scannerSubscriptionFilterOptions: Advanced generic filters.
"""
return self._run(
self.reqScannerDataAsync(
subscription, scannerSubscriptionOptions,
scannerSubscriptionFilterOptions)) | 0.00237 |
def p_with_statement(self, p):
"""with_statement : WITH LPAREN expr RPAREN statement"""
p[0] = self.asttypes.With(expr=p[3], statement=p[5])
p[0].setpos(p) | 0.011173 |
def _enhance_bass(self):
"""Update best span choices with bass enhancement as requested by user (Eq. 11)."""
if not self._bass_enhancement:
# like in supsmu, skip if alpha=0
return
bass_span = DEFAULT_SPANS[BASS_INDEX]
enhanced_spans = []
for i, best_span_here in enumerate(self._best_span_at_each_point):
best_smooth_index = DEFAULT_SPANS.index(best_span_here)
best_span = DEFAULT_SPANS[best_smooth_index]
best_span_residual = self._residual_smooths[best_smooth_index][i]
bass_span_residual = self._residual_smooths[BASS_INDEX][i]
if 0 < best_span_residual < bass_span_residual:
ri = best_span_residual / bass_span_residual
bass_factor = ri ** (10.0 - self._bass_enhancement)
enhanced_spans.append(best_span + (bass_span - best_span) * bass_factor)
else:
enhanced_spans.append(best_span)
self._best_span_at_each_point = enhanced_spans | 0.003835 |
def _fn_with_custom_grad(fn, inputs, grad_fn, use_global_vars=False):
"""Create a subgraph with a custom gradient.
Args:
fn: function that takes inputs as arguments and produces 1 or more Tensors.
inputs: list<Tensor>, will be passed as fn(*inputs).
grad_fn: function with signature
(inputs, vars, outputs, output_grads) -> (grad_inputs, grad_vars),
all of which are lists of Tensors.
use_global_vars: if True, variables will be the global variables created.
If False, will be the trainable variables.
Returns:
fn(*inputs)
"""
vs = tf.get_variable_scope()
get_vars_fn = (
vs.global_variables if use_global_vars else vs.trainable_variables)
len_before_vars = len(get_vars_fn())
inputs = list(inputs)
outputs = fn(*inputs)
train_vars = get_vars_fn()[len_before_vars:]
if grad_fn is None:
return outputs
if not isinstance(outputs, (tuple, list)):
outputs = [outputs]
outputs = list(outputs)
defun_inputs = [inputs, train_vars, outputs]
def custom_grad_fn(op, *dys):
"""Custom grad fn applying grad_fn for identity Defun."""
fn_inputs, fn_vars, fn_outputs = tf.contrib.framework.nest.pack_sequence_as(
defun_inputs, list(op.inputs))
dys = list(dys)
assert len(fn_outputs) == len(outputs)
assert len(fn_outputs) == len(dys)
grad_inputs, grad_vars = grad_fn(fn_inputs, fn_vars, fn_outputs, dys)
grad_outputs = [None] * len(fn_outputs)
return tuple(grad_inputs + grad_vars + grad_outputs)
# The Defun takes as input the original inputs, the trainable variables
# created in fn, and the outputs. In the forward it passes through the
# outputs. In the backwards, it produces gradients for the original inputs
# and the trainable variables.
in_types = [t.dtype for t in inputs]
out_types = [t.dtype for t in outputs]
var_types = [t.dtype for t in train_vars]
@function.Defun(
*(in_types + var_types + out_types),
func_name="identity_custom_grad%d" % ops.uid(),
python_grad_func=custom_grad_fn,
shape_func=lambda _: [t.get_shape() for t in outputs])
def identity(*args):
_, _, outs = tf.contrib.framework.nest.pack_sequence_as(defun_inputs, args)
return tuple([tf.identity(t) for t in outs])
flat_inputs = tf.contrib.framework.nest.flatten(defun_inputs)
id_out = identity(*flat_inputs)
return id_out | 0.010943 |
def _check_directory_arguments(self):
"""
Validates arguments for loading from directories, including static image and time series directories.
"""
if not os.path.isdir(self.datapath):
raise (NotADirectoryError('Directory does not exist: %s' % self.datapath))
if self.time_delay:
if self.time_delay < 1:
raise ValueError('Time step argument must be greater than 0, but gave: %i' % self.time_delay)
if not isinstance(self.time_delay, int):
raise ValueError('Time step argument must be an integer, but gave: %s' % str(self.time_delay)) | 0.00936 |
def Y_ampl(self, new_y_scale):
"""Make scaling on Y axis using predefined values"""
self.parent.value('y_scale', new_y_scale)
self.parent.traces.display() | 0.011236 |
def _safe_get(mapping, key, default=None):
"""Helper for accessing style values.
It exists to avoid checking whether `mapping` is indeed a mapping before
trying to get a key. In the context of style dicts, this eliminates "is
this a mapping" checks in two common situations: 1) a style argument is
None, and 2) a style key's value (e.g., width) can be either a mapping or a
plain value.
"""
try:
return mapping.get(key, default)
except AttributeError:
return default | 0.001923 |
def Nusselt_laminar(Tsat, Tw, rhog, rhol, kl, mul, Hvap, L, angle=90.):
r'''Calculates heat transfer coefficient for laminar film condensation
of a pure chemical on a flat plate, as presented in [1]_ according to an
analysis performed by Nusselt in 1916.
.. math::
h=0.943\left[\frac{g\sin(\theta)\rho_{liq}(\rho_l-\rho_v)k_{l}^3
\Delta H_{vap}}{\mu_l(T_{sat}-T_w)L}\right]^{0.25}
Parameters
----------
Tsat : float
Saturation temperature at operating pressure [Pa]
Tw : float
Wall temperature, [K]
rhog : float
Density of the gas [kg/m^3]
rhol : float
Density of the liquid [kg/m^3]
kl : float
Thermal conductivity of liquid [W/m/K]
mul : float
Viscosity of liquid [Pa*s]
Hvap : float
Heat of vaporization of the fluid at P, [J/kg]
L : float
Length of the plate [m]
angle : float, optional
Angle of inclination of the plate [degrees]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
Optionally, the plate may be inclined.
The constant 0.943 is actually:
.. math::
2\sqrt{2}/3
Examples
--------
p. 578 in [1]_, matches exactly.
>>> Nusselt_laminar(Tsat=370, Tw=350, rhog=7.0, rhol=585., kl=0.091,
... mul=158.9E-6, Hvap=776900, L=0.1)
1482.206403453679
References
----------
.. [1] Hewitt, G. L. Shires T. Reg Bott G. F., George L. Shires, and
T. R. Bott. Process Heat Transfer. 1E. Boca Raton: CRC Press, 1994.
'''
return 2.*2.**0.5/3.*(kl**3*rhol*(rhol - rhog)*g*sin(angle/180.*pi)
*Hvap/(mul*(Tsat - Tw)*L))**0.25 | 0.001738 |
def import_context(cls, context):
""" Import context to corresponding WContextProto object (:meth:`WContext.export_context` reverse operation)
:param context: context to import
:return: WContext
"""
if context is None or len(context) == 0:
return
result = WContext(context[0][0], context[0][1])
for iter_context in context[1:]:
result = WContext(iter_context[0], context_value=iter_context[1], linked_context=result)
return result | 0.030837 |
def _type_repr(obj):
"""Return the repr() of an object, special-casing types (internal helper).
If obj is a type, we return a shorter version than the default
type.__repr__, based on the module and qualified name, which is
typically enough to uniquely identify a type. For everything
else, we fall back on repr(obj).
"""
if isinstance(obj, type) and not isinstance(obj, TypingMeta):
if obj.__module__ == 'builtins':
return _qualname(obj)
return '%s.%s' % (obj.__module__, _qualname(obj))
if obj is ...:
return ('...')
if isinstance(obj, types.FunctionType):
return obj.__name__
return repr(obj) | 0.001471 |
def _create_app(self, color_depth, term='xterm'):
"""
Create CommandLineInterface for this client.
Called when the client wants to attach the UI to the server.
"""
output = Vt100_Output(_SocketStdout(self._send_packet),
lambda: self.size,
term=term,
write_binary=False)
self.client_state = self.pymux.add_client(
input=self._pipeinput, output=output, connection=self, color_depth=color_depth)
print('Start running app...')
future = self.client_state.app.run_async()
print('Start running app got future...', future)
@future.add_done_callback
def done(_):
print('APP DONE.........')
print(future.result())
self._close_connection() | 0.003501 |
def update_board(self, query_params=None):
'''
Update this board's information. Returns a new board.
'''
board_json = self.fetch_json(
uri_path=self.base_uri,
http_method='PUT',
query_params=query_params or {}
)
return self.create_board(board_json) | 0.006006 |
def ReadSignedBinaryReferences(
self, binary_id,
cursor=None):
"""Reads blob references for the signed binary with the given id."""
cursor.execute(
"""
SELECT blob_references, UNIX_TIMESTAMP(timestamp)
FROM signed_binary_references
WHERE binary_type = %s AND binary_path_hash = %s
""", [
binary_id.binary_type.SerializeToDataStore(),
mysql_utils.Hash(binary_id.path)
])
row = cursor.fetchone()
if not row:
raise db.UnknownSignedBinaryError(binary_id)
raw_references, timestamp = row
# TODO(hanuszczak): pytype does not understand overloads, so we have to cast
# to a non-optional object.
datetime = cast(rdfvalue.RDFDatetime,
mysql_utils.TimestampToRDFDatetime(timestamp))
references = rdf_objects.BlobReferences.FromSerializedString(raw_references)
return references, datetime | 0.007709 |
def MultiNotifyQueue(self, notifications, mutation_pool=None):
"""This is the same as NotifyQueue but for several session_ids at once.
Args:
notifications: A list of notifications.
mutation_pool: A MutationPool object to schedule Notifications on.
Raises:
RuntimeError: An invalid session_id was passed.
"""
extract_queue = lambda notification: notification.session_id.Queue()
for queue, notifications in iteritems(
collection.Group(notifications, extract_queue)):
self._MultiNotifyQueue(queue, notifications, mutation_pool=mutation_pool) | 0.006711 |
def create_directory(self):
"""
Creates a directory under the selected directory (if the selected item
is a file, the parent directory is used).
"""
src = self.get_current_path()
name, status = QtWidgets.QInputDialog.getText(
self.tree_view, _('Create directory'), _('Name:'),
QtWidgets.QLineEdit.Normal, '')
if status:
fatal_names = ['.', '..']
for i in fatal_names:
if i == name:
QtWidgets.QMessageBox.critical(
self.tree_view, _("Error"), _("Wrong directory name"))
return
if os.path.isfile(src):
src = os.path.dirname(src)
dir_name = os.path.join(src, name)
try:
os.makedirs(dir_name, exist_ok=True)
except OSError as e:
QtWidgets.QMessageBox.warning(
self.tree_view, _('Failed to create directory'),
_('Failed to create directory: "%s".\n\n%s') % (dir_name, str(e))) | 0.002747 |
def render_tree(self, data):
"""prepare the flows without saving to file
this method has been decoupled from render_flow to allow better
unit testing
"""
# TODO: find a way to make this localization aware...
# because ATM it formats texts using French style numbers...
# best way would be to let the user inject its own vars...
# but this would not work on fusion servers...
# so we must find a way to localize this a bit... or remove it and
# consider our caller must pre - render its variables to the desired
# locale...?
new_data = dict(
decimal=decimal,
format_float=(
lambda val: (
isinstance(
val, decimal.Decimal
) or isinstance(
val, float
)
) and str(val).replace('.', ',') or val
),
format_percentage=(
lambda val: ("%0.2f %%" % val).replace('.', ',')
)
)
# Soft page breaks are hints for applications for rendering a page
# break. Soft page breaks in for loops may compromise the paragraph
# formatting especially the margins. Open-/LibreOffice will regenerate
# the page breaks when displaying the document. Therefore it is save to
# remove them.
self.remove_soft_breaks()
# first we need to transform the py3o template into a valid
# Genshi template.
starting_tags, closing_tags = self.handle_instructions(
self.content_trees,
self.namespaces
)
parents = [tag[0].getparent() for tag in starting_tags]
linknum = len(parents)
parentnum = len(set(parents))
if not linknum == parentnum:
raise TemplateException(
"Every py3o link instruction should be on its own line"
)
for link, py3o_base in starting_tags:
self.handle_link(
link,
py3o_base,
closing_tags[id(link)]
)
self.__prepare_userfield_decl()
self.__prepare_usertexts()
self.__replace_image_links()
self.__add_images_to_manifest()
for fnum, content_tree in enumerate(self.content_trees):
content = lxml.etree.tostring(content_tree.getroot())
if self.ignore_undefined_variables:
template = MarkupTemplate(content, lookup='lenient')
else:
template = MarkupTemplate(content)
# then we need to render the genshi template itself by
# providing the data to genshi
template_dict = {}
template_dict.update(data.items())
template_dict.update(new_data.items())
self.output_streams.append(
(
self.templated_files[fnum],
template.generate(**template_dict)
)
) | 0.000653 |
def preview(self, argv):
"""Retrieve the preview for the specified search jobs."""
opts = cmdline(argv, FLAGS_RESULTS)
self.foreach(opts.args, lambda job:
output(job.preview(**opts.kwargs))) | 0.017621 |
def __getFormat(self, format):
"""
Defaults to JSON [ps: 'RDF' is the native rdflib representation]
"""
if format == "XML":
self.sparql.setReturnFormat(XML)
self.format = "XML"
elif format == "RDF":
self.sparql.setReturnFormat(RDF)
self.format = "RDF"
else:
self.sparql.setReturnFormat(JSON)
self.format = "JSON" | 0.040816 |
def _build_user_environment(self, envs, inputs, outputs, mounts):
"""Returns a dictionary of for the user container environment."""
envs = {env.name: env.value for env in envs}
envs.update(providers_util.get_file_environment_variables(inputs))
envs.update(providers_util.get_file_environment_variables(outputs))
envs.update(providers_util.get_file_environment_variables(mounts))
return envs | 0.002415 |
def add_string_pairs_from_button_element(xib_file, results, button, special_ui_components_prefix):
""" Adds strings pairs from a button xib element.
Args:
xib_file (str): Path to the xib file.
results (list): The list to add the results to.
button(element): The button element from the xib, to extract the string pairs from.
special_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT)
"""
button_entry_comment = extract_element_internationalized_comment(button)
if button_entry_comment is None:
return
for state in button.getElementsByTagName('state'):
state_name = state.attributes['key'].value
state_entry_comment = button_entry_comment + " - " + state_name + " state of button"
if not add_string_pairs_from_attributed_ui_element(results, state, state_entry_comment):
try:
button_entry_key = state.attributes['title'].value
except KeyError:
try:
button_entry_key = state.getElementsByTagName('string')[0].firstChild.nodeValue
except Exception:
continue
results.append((button_entry_key, state_entry_comment))
warn_if_element_not_of_class(button, 'Button', special_ui_components_prefix) | 0.005882 |
def load_model():
"""
Load a n-gram language model for mathematics in ARPA format which gets
shipped with hwrt.
Returns
-------
A NgramLanguageModel object
"""
logging.info("Load language model...")
ngram_arpa_t = pkg_resources.resource_filename('hwrt',
'misc/ngram.arpa.tar.bz2')
with tarfile.open(ngram_arpa_t, 'r:bz2') as tar:
tarfolder = tempfile.mkdtemp()
tar.extractall(path=tarfolder)
ngram_arpa_f = os.path.join(tarfolder, 'ngram.arpa')
with open(ngram_arpa_f) as f:
content = f.read()
ngram_model = NgramLanguageModel()
ngram_model.load_from_arpa_str(content)
return ngram_model | 0.001383 |
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(HostedGraphiteHandler, self).get_default_config()
config.update({
'apikey': '',
'host': 'carbon.hostedgraphite.com',
'port': 2003,
'proto': 'tcp',
'timeout': 15,
'batch': 1,
'max_backlog_multiplier': 5,
'trim_backlog_multiplier': 4,
})
return config | 0.004016 |
def set_wrap_mode(self, mode=None):
"""
Set wrap mode
Valid *mode* values: None, 'word', 'character'
"""
if mode == 'word':
wrap_mode = QTextOption.WrapAtWordBoundaryOrAnywhere
elif mode == 'character':
wrap_mode = QTextOption.WrapAnywhere
else:
wrap_mode = QTextOption.NoWrap
self.setWordWrapMode(wrap_mode) | 0.004773 |
def fundamental_arguments(t):
"""Compute the fundamental arguments (mean elements) of Sun and Moon.
`t` - TDB time in Julian centuries since J2000.0, as float or NumPy array
Outputs fundamental arguments, in radians:
a[0] = l (mean anomaly of the Moon)
a[1] = l' (mean anomaly of the Sun)
a[2] = F (mean argument of the latitude of the Moon)
a[3] = D (mean elongation of the Moon from the Sun)
a[4] = Omega (mean longitude of the Moon's ascending node);
from Simon section 3.4(b.3),
precession = 5028.8200 arcsec/cy)
"""
a = fa4 * t
a += fa3
a *= t
a += fa2
a *= t
a += fa1
a *= t
a += fa0
fmod(a, ASEC360, out=a)
a *= ASEC2RAD
if getattr(t, 'shape', ()):
return a
return a[:,0] | 0.002387 |
def _read_input_urls(cls, session: AppSession, default_scheme='http'):
'''Read the URLs provided by the user.'''
url_string_iter = session.args.urls or ()
# FIXME: url rewriter isn't created yet
url_rewriter = session.factory.get('URLRewriter')
if session.args.input_file:
if session.args.force_html:
lines = cls._input_file_as_html_links(session)
else:
lines = cls._input_file_as_lines(session)
url_string_iter = itertools.chain(url_string_iter, lines)
base_url = session.args.base
for url_string in url_string_iter:
_logger.debug(__('Parsing URL {0}', url_string))
if base_url:
url_string = wpull.url.urljoin(base_url, url_string)
try:
url_info = wpull.url.URLInfo.parse(
url_string, default_scheme=default_scheme)
_logger.debug(__('Parsed URL {0}', url_info))
if url_rewriter:
# TODO: this logic should be a hook
url_info = url_rewriter.rewrite(url_info)
_logger.debug(__('Rewritten URL {0}', url_info))
yield url_info
except ValueError as e:
_logger.info(__('Invalid URL {0}: {1}', url_string, e)) | 0.001471 |
def get_datarect(self):
"""Get the approximate bounding box of the displayed image.
Returns
-------
rect : tuple
Bounding box in data coordinates in the form of
``(x1, y1, x2, y2)``.
"""
x1, y1, x2, y2 = self._org_x1, self._org_y1, self._org_x2, self._org_y2
return (x1, y1, x2, y2) | 0.005495 |
def total(self):
"""Total cost of the order
"""
total = 0
for item in self.items.all():
total += item.total
return total | 0.011628 |
def did_composer_install(dir):
'''
Test to see if the vendor directory exists in this directory
dir
Directory location of the composer.json file
CLI Example:
.. code-block:: bash
salt '*' composer.did_composer_install /var/www/application
'''
lockFile = "{0}/vendor".format(dir)
if os.path.exists(lockFile):
return True
return False | 0.002525 |
def write_to_file(self, path, filename, footer=True):
"""
Class method responsible for generating a file containing the notebook object data.
----------
Parameters
----------
path : str
OpenSignalsTools Root folder path (where the notebook will be stored).
filename : str
Defines the name of the notebook file.
footer : bool
Flag that defines when the footer needs to be included in the Notebook.
"""
# =============================== Storage of Filename ======================================
self.filename = filename
# ======================== Inclusion of Footer in the Notebook =============================
if footer is True:
_generate_footer(self.notebook, self.notebook_type)
# ========== Code segment for application of the OpenSignalsTools CSS style ===========
self.notebook["cells"].append(nb.v4.new_markdown_cell(AUX_CODE_MESSAGE,
**{"metadata":
{"tags": ["hide_mark"]}}))
self.notebook["cells"].append(nb.v4.new_code_cell(CSS_STYLE_CODE,
**{"metadata":
{"tags": ["hide_both"]}}))
self.notebook["cells"].append(nb.v4.new_code_cell(JS_CODE_AUTO_PLAY,
**{"metadata":
{"tags": ["hide_both"]}}))
full_path = path + "\\Categories\\" + self.notebook_type + "\\" + filename + ".ipynb"
nb.write(self.notebook, full_path)
# ========================== Run Notebook Code Instructions ================================
os.system("jupyter nbconvert --execute --inplace --ExecutePreprocessor.timeout=-1 " +
full_path)
os.system("jupyter trust " + full_path) | 0.008169 |
def match_hail_sizes(model_tracks, obs_tracks, track_pairings):
"""
Given forecast and observed track pairings, maximum hail sizes are associated with each paired forecast storm
track timestep. If the duration of the forecast and observed tracks differ, then interpolation is used for the
intermediate timesteps.
Args:
model_tracks: List of model track STObjects
obs_tracks: List of observed STObjects
track_pairings: list of tuples containing the indices of the paired (forecast, observed) tracks
"""
unpaired = list(range(len(model_tracks)))
for p, pair in enumerate(track_pairings):
model_track = model_tracks[pair[0]]
unpaired.remove(pair[0])
obs_track = obs_tracks[pair[1]]
obs_hail_sizes = np.array([step[obs_track.masks[t] == 1].max()
for t, step in enumerate(obs_track.timesteps)])
if obs_track.times.size > 1 and model_track.times.size > 1:
normalized_obs_times = 1.0 / (obs_track.times.max() - obs_track.times.min())\
* (obs_track.times - obs_track.times.min())
normalized_model_times = 1.0 / (model_track.times.max() - model_track.times.min())\
* (model_track.times - model_track.times.min())
hail_interp = interp1d(normalized_obs_times, obs_hail_sizes, kind="nearest",
bounds_error=False, fill_value=0)
model_track.observations = hail_interp(normalized_model_times)
elif obs_track.times.size == 1:
model_track.observations = np.ones(model_track.times.shape) * obs_hail_sizes[0]
elif model_track.times.size == 1:
model_track.observations = np.array([obs_hail_sizes.max()])
print(pair[0], "obs", obs_hail_sizes)
print(pair[0], "model", model_track.observations)
for u in unpaired:
model_tracks[u].observations = np.zeros(model_tracks[u].times.shape) | 0.005231 |
def scan(self) -> ['File']:
""" Scan
Scan the directory for files and folders and update the file dictionary.
@return: List of files
"""
self._files = {}
output = self._listFilesWin() if isWindows else self._listFilesPosix()
output = [line for line in output if "__MACOSX" not in line]
for pathName in output:
if not pathName: # Sometimes we get empty lines
continue
pathName = pathName[len(self._path) + 1:]
file = File(self, pathName=pathName, exists=True)
self._files[file.pathName] = file
return self.files | 0.004601 |
def print_ec2_info(region,
instance_id,
access_key_id,
secret_access_key,
username):
""" outputs information about our EC2 instance """
data = get_ec2_info(instance_id=instance_id,
region=region,
access_key_id=access_key_id,
secret_access_key=secret_access_key,
username=username)
log_green("region: %s" % data['region'])
log_green("Instance_type: %s" % data['instance_type'])
log_green("Instance state: %s" % data['state'])
log_green("Public dns: %s" % data['public_dns_name'])
log_green("Ip address: %s" % data['ip_address'])
log_green("volume: %s" % data['volume'])
log_green("user: %s" % data['username'])
log_green("ssh -i %s %s@%s" % (env.key_filename,
username,
data['ip_address'])) | 0.001029 |
def display(level='DEBUG'):
"""display(level='DEBUG') forwards logs to stdout"""
logger = get_logger()
sh = logging.StreamHandler()
sh.setLevel(getattr(logging, level))
sh.setFormatter(DEFAULT_LOG_FORMAT)
logger.addHandler(sh) | 0.004 |
async def sendmail(
self,
sender: str,
recipients: RecipientsType,
message: Union[str, bytes],
mail_options: Iterable[str] = None,
rcpt_options: Iterable[str] = None,
timeout: DefaultNumType = _default,
) -> SendmailResponseType:
"""
This command performs an entire mail transaction.
The arguments are:
- sender: The address sending this mail.
- recipients: A list of addresses to send this mail to. A bare
string will be treated as a list with 1 address.
- message: The message string to send.
- mail_options: List of options (such as ESMTP 8bitmime) for the
MAIL command.
- rcpt_options: List of options (such as DSN commands) for all the
RCPT commands.
message must be a string containing characters in the ASCII range.
The string is encoded to bytes using the ascii codec, and lone \\\\r
and \\\\n characters are converted to \\\\r\\\\n characters.
If there has been no previous HELO or EHLO command this session, this
method tries EHLO first.
This method will return normally if the mail is accepted for at least
one recipient. It returns a tuple consisting of:
- an error dictionary, with one entry for each recipient that was
refused. Each entry contains a tuple of the SMTP error code
and the accompanying error message sent by the server.
- the message sent by the server in response to the DATA command
(often containing a message id)
Example:
>>> loop = asyncio.get_event_loop()
>>> smtp = aiosmtplib.SMTP(hostname="127.0.0.1", port=1025)
>>> loop.run_until_complete(smtp.connect())
(220, ...)
>>> recipients = ["[email protected]", "[email protected]", "[email protected]"]
>>> message = "From: [email protected]\\nSubject: testing\\nHello World"
>>> send_coro = smtp.sendmail("[email protected]", recipients, message)
>>> loop.run_until_complete(send_coro)
({}, 'OK')
>>> loop.run_until_complete(smtp.quit())
(221, Bye)
In the above example, the message was accepted for delivery for all
three addresses. If delivery had been only successful to two
of the three addresses, and one was rejected, the response would look
something like::
(
{"[email protected]": (550, "User unknown")},
"Written safely to disk. #902487694.289148.12219.",
)
If delivery is not successful to any addresses,
:exc:`.SMTPRecipientsRefused` is raised.
If :exc:`.SMTPResponseException` is raised by this method, we try to
send an RSET command to reset the server envelope automatically for
the next attempt.
:raises SMTPRecipientsRefused: delivery to all recipients failed
:raises SMTPResponseException: on invalid response
"""
if isinstance(recipients, str):
recipients = [recipients]
else:
recipients = list(recipients)
if mail_options is None:
mail_options = []
else:
mail_options = list(mail_options)
if rcpt_options is None:
rcpt_options = []
else:
rcpt_options = list(rcpt_options)
async with self._sendmail_lock:
if self.supports_extension("size"):
size_option = "size={}".format(len(message))
mail_options.append(size_option)
try:
await self.mail(sender, options=mail_options, timeout=timeout)
recipient_errors = await self._send_recipients(
recipients, options=rcpt_options, timeout=timeout
)
response = await self.data(message, timeout=timeout)
except (SMTPResponseException, SMTPRecipientsRefused) as exc:
# If we got an error, reset the envelope.
try:
await self.rset(timeout=timeout)
except (ConnectionError, SMTPResponseException):
# If we're disconnected on the reset, or we get a bad
# status, don't raise that as it's confusing
pass
raise exc
return recipient_errors, response.message | 0.000662 |
def get_service_ips(
service_name,
task_name=None,
inactive=False,
completed=False
):
""" Get a set of the IPs associated with a service
:param service_name: the service name
:type service_name: str
:param task_name: the task name
:type task_name: str
:param inactive: wehther to include inactive services
:type inactive: bool
:param completed: whether to include completed services
:type completed: bool
:return: a list of IP addresses
:rtype: [str]
"""
service_tasks = get_service_tasks(service_name, inactive, completed)
ips = set([])
for task in service_tasks:
if task_name is None or task['name'] == task_name:
for status in task['statuses']:
# Only the TASK_RUNNING status will have correct IP information.
if status["state"] != "TASK_RUNNING":
continue
for ip in status['container_status']['network_infos'][0]['ip_addresses']:
ips.add(ip['ip_address'])
return ips | 0.002679 |
def __parseResponse(self, result):
"""Parses the server response."""
response = []
for data in result['data'] :
result_dict={}
for k,v in data.items() :
column = self.getOutputColumn(k)
if column != None:
type = column.getSqlColumnType()
if type != None and type.startswith('int'):
result_dict.update({
k : int(v)
})
elif type != None and type.startswith('float'):
result_dict.update({
k : float(v)
})
elif type != None and type.startswith('timestamp'):
(dt, mSecs)= v.split(".")
dt = datetime.strptime(dt,"%Y-%m-%dT%H:%M:%S")
mSeconds = timedelta(microseconds = int(mSecs))
result_dict.update({
k : dt+mSeconds
})
else :
result_dict.update({
k : v
})
response.append(result_dict)
return response | 0.016794 |
def fill_document(self):
"""Add a section, a subsection and some text to the document."""
with self.create(Section('A section')):
self.append('Some regular text and some ')
self.append(italic('italic text. '))
with self.create(Subsection('A subsection')):
self.append('Also some crazy characters: $&#{}') | 0.005362 |
def getBumper(self):
'''
Returns last Bumper.
@return last JdeRobotTypes Bumper saved
'''
if self.hasproxy():
self.lock.acquire()
bumper = self.bumper
self.lock.release()
return bumper
return None | 0.010135 |
def filter(table, predicates):
"""
Select rows from table based on boolean expressions
Parameters
----------
predicates : boolean array expressions, or list thereof
Returns
-------
filtered_expr : TableExpr
"""
resolved_predicates = _resolve_predicates(table, predicates)
return _L.apply_filter(table, resolved_predicates) | 0.002717 |
def reset(self):
'''
Resets this agent type to prepare it for a new simulation run. This
includes resetting the random number generator and initializing the style
of each agent of this type.
'''
self.resetRNG()
sNow = np.zeros(self.pop_size)
Shk = self.RNG.rand(self.pop_size)
sNow[Shk < self.p_init] = 1
self.sNow = sNow | 0.009926 |
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file() | 0.003394 |
def op_extract(op_name, data, senders, inputs, outputs, block_id, vtxindex, txid):
"""
Extract an operation from transaction data.
Return the extracted fields as a dict.
"""
global EXTRACT_METHODS
if op_name not in EXTRACT_METHODS.keys():
raise Exception("No such operation '%s'" % op_name)
method = EXTRACT_METHODS[op_name]
op_data = method( data, senders, inputs, outputs, block_id, vtxindex, txid )
return op_data | 0.010823 |
def loadRecords(self, records):
"""
Loads the inputed records as children to this item.
:param records | [<orb.Table>, ..] || {<str> sub: <variant>, .. }
"""
self.setChildIndicatorPolicy(self.DontShowIndicatorWhenChildless)
self._loaded = True
if records is None:
return
# load sub-groups if desired
if self._nextLevels and RecordSet.typecheck(records):
level = self._nextLevels[0]
sublevels = self._nextLevels[1:]
records = records.grouped(level)
elif RecordSet.typecheck(records):
sublevels = None
records = records.all()
else:
sublevels = None
# load a child set of groups
if type(records) == dict:
try:
generator = self.treeWidget().createGroupItem
cls = None
except AttributeError:
generator = None
cls = type(self)
for subgroup, subrecords in records.items():
if generator:
generator(subgroup, subrecords, sublevels, self)
elif cls:
cls(self, subgroup, subrecords, sublevels)
# load records
else:
try:
generator = self.treeWidget().createRecordItem
cls = None
except AttributeError:
generator = None
cls = XOrbRecordItem
cls = self.treeWidget().createRecordItem
for record in records:
if generator:
generator(record, self)
elif cls:
cls(self, record) | 0.006363 |
def analyze(self, scratch, **kwargs):
"""Run and return the results of the VariableInitialization plugin."""
variables = dict((x, self.variable_state(x.scripts, x.variables))
for x in scratch.sprites)
variables['global'] = self.variable_state(self.iter_scripts(scratch),
scratch.stage.variables)
# Output for now
import pprint
pprint.pprint(variables)
return {'variables': variables} | 0.003891 |
def parse_wd_json(self, wd_json):
"""
Parses a WD entity json and generates the datatype objects, sets self.wd_json_representation
:param wd_json: the json of a WD entity
:type wd_json: A Python Json representation of a WD item
:return: returns the json representation containing 'labels', 'descriptions', 'claims', 'aliases', 'sitelinks'.
"""
wd_data = {x: wd_json[x] for x in ('labels', 'descriptions', 'claims', 'aliases') if x in wd_json}
wd_data['sitelinks'] = dict()
self.entity_metadata = {x: wd_json[x] for x in wd_json if x not in
('labels', 'descriptions', 'claims', 'aliases', 'sitelinks')}
self.sitelinks = wd_json.get('sitelinks', dict())
self.statements = []
for prop in wd_data['claims']:
for z in wd_data['claims'][prop]:
data_type = [x for x in WDBaseDataType.__subclasses__() if x.DTYPE == z['mainsnak']['datatype']][0]
statement = data_type.from_json(z)
self.statements.append(statement)
self.wd_json_representation = wd_data
self.original_statements = copy.deepcopy(self.statements)
return wd_data | 0.005682 |
def get_mime(self, path, isdir):
'''猜测文件类型, 根据它的文件扩展名'''
if isdir:
file_type = FOLDER
else:
file_type = mimetypes.guess_type(path)[0]
if not file_type:
file_type = UNKNOWN
return file_type | 0.007353 |
def start_worker_thread(
self,
sleep_interval=1.0):
"""start_worker_thread
Start the helper worker thread to publish queued messages
to Splunk
:param sleep_interval: sleep in seconds before reading from
the queue again
"""
# Start a worker thread responsible for sending logs
if self.sleep_interval > 0:
self.debug_log(
'starting worker thread')
self.timer = Timer(
sleep_interval,
self.perform_work)
self.timer.daemon = True # Auto-kill thread if main process exits
self.timer.start() | 0.002878 |
def create_post_execute(task_params, parameter_map):
"""
Builds the code block for the GPTool Execute method after the job is
submitted based on the input task_params.
:param task_params: A list of task parameters from the task info structure.
:return: A string representing the code block to the GPTool Execute method.
"""
gp_params = []
for task_param in task_params:
if task_param['direction'].upper() == 'INPUT':
continue
# Convert DataType
data_type = task_param['type'].upper()
if 'dimensions' in task_param:
data_type += 'ARRAY'
if data_type in parameter_map:
gp_params.append(parameter_map[data_type].post_execute().substitute(task_param))
return ''.join(gp_params) | 0.002532 |
def get_phi_ss(imt, mag, params):
"""
Returns the single station phi (or it's variance) for a given magnitude
and intensity measure type according to equation 5.14 of Al Atik (2015)
"""
C = params[imt]
if mag <= 5.0:
phi = C["a"]
elif mag > 6.5:
phi = C["b"]
else:
phi = C["a"] + (mag - 5.0) * ((C["b"] - C["a"]) / 1.5)
return phi | 0.002564 |
def jaccard_similarity(self,s1,s2):
"""
Calculate jaccard index of inferred associations of two subjects
|ancs(s1) /\ ancs(s2)|
---
|ancs(s1) \/ ancs(s2)|
"""
a1 = self.inferred_types(s1)
a2 = self.inferred_types(s2)
num_union = len(a1.union(a2))
if num_union == 0:
return 0.0
return len(a1.intersection(a2)) / num_union | 0.014218 |
def shutdown(self):
"""Shutdown the accept loop and stop running payloads"""
self._must_shutdown = True
self._is_shutdown.wait()
self._meta_runner.stop() | 0.010811 |
def remove(cls, target, exclude=None, ctx=None, select=lambda *p: True):
"""Remove from target annotations which inherit from cls.
:param target: target from where remove annotations which inherits from
cls.
:param tuple/type exclude: annotation types to exclude from selection.
:param ctx: target ctx.
:param select: annotation selection function which takes in parameters
a target, a ctx and an annotation and return True if the annotation
has to be removed.
"""
# initialize exclude
exclude = () if exclude is None else exclude
try:
# get local annotations
local_annotations = get_local_property(
target, Annotation.__ANNOTATIONS_KEY__
)
except TypeError:
raise TypeError('target {0} must be hashable'.format(target))
# if there are local annotations
if local_annotations is not None:
# get annotations to remove which inherits from cls
annotations_to_remove = [
annotation for annotation in local_annotations
if (
isinstance(annotation, cls)
and not isinstance(annotation, exclude)
and select(target, ctx, annotation)
)
]
# and remove annotations from target
for annotation_to_remove in annotations_to_remove:
annotation_to_remove.remove_from(target) | 0.001298 |
def get(self, key, lang=None):
""" Returns triple related to this node. Can filter on lang
:param key: Predicate of the triple
:param lang: Language of the triple if applicable
:rtype: Literal or BNode or URIRef
"""
if lang is not None:
for o in self.graph.objects(self.asNode(), key):
if o.language == lang:
yield o
else:
for o in self.graph.objects(self.asNode(), key):
yield o | 0.003906 |
def masked_relative_local_attention_1d(q,
k,
v,
block_length=128,
make_image_summary=False,
dropout_rate=0.,
heads_share_relative_embedding=False,
add_relative_to_values=False,
name=None):
"""Masked local 1d attention with relative positions.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position, in the corresponding block
and the previous block.
If mask_right is True, then a target position cannot see greater source
positions.
Args:
q: a Tensor with shape [batch, heads, length, depth_k]
k: a Tensor with shape [batch, heads, length, depth_k]
v: a Tensor with shape [batch, heads, length, depth_v]
block_length: an integer
make_image_summary: a boolean, whether to make an attention image summary.
dropout_rate: Dropout rate for attention dropout
heads_share_relative_embedding: a boolean for sharing relative embeddings.
add_relative_to_values: a boolean for whether to add relative component to
values.
name: an optional string
Returns:
a Tensor of shape [batch, heads, length, depth_v]
Raises:
ValueError: wwhen the name for the variable scope is not passed.
"""
if not name:
raise ValueError("Name must be assigned since reuse for variable scope is "
"set to tf.AUTO_REUSE, in order to reuse relative "
"embeddings of keys and values.")
# Reuse flag is set to auto_reuse to reuse relative embeddings of keys and
# values across blocks (first and tail blocks).
with tf.variable_scope(
name, default_name="masked_relative_local_attention_1d",
values=[q, k, v], reuse=tf.AUTO_REUSE):
default_block_length = block_length
batch = common_layers.shape_list(q)[0]
heads = common_layers.shape_list(q)[1]
length = common_layers.shape_list(q)[2]
# If (length < 2 * block_length), then we use only one block.
if isinstance(length, int) and isinstance(block_length, int):
block_length = length if length < block_length * 2 else block_length
else:
block_length = tf.where(
tf.less(length, block_length * 2), length, block_length)
depth_k = common_layers.shape_list(k)[3]
depth_v = common_layers.shape_list(v)[3]
original_length = length
padding_size = tf.mod(-length, block_length)
length += padding_size
padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]]
q = tf.pad(q, padding)
k = tf.pad(k, padding)
v = tf.pad(v, padding)
num_blocks = length // block_length
# compute attention for the first query block.
first_q = tf.slice(q, [0, 0, 0, 0], [-1, -1, block_length, -1])
first_k = tf.slice(k, [0, 0, 0, 0], [-1, -1, block_length, -1])
first_v = tf.slice(v, [0, 0, 0, 0], [-1, -1, block_length, -1])
# Relative embeddings will be used later as well.
# TODO(avaswani,annahuang): check why 2*bl was breaking for music
# Needs to be known at static shape inference time, hence cannot be
# 2 * block_length.
rel_embed_length = 4 * default_block_length
# We only multiply with the needed embeddings as we slice them out.
first_rel_embeddings = get_relative_embeddings_left(
rel_embed_length, block_length, depth_k, heads,
heads_share_relative_embedding, "relative_embeddings")
first_rel_logits = matmul_with_relative_keys(
first_q, first_rel_embeddings, heads_share_relative_embedding)
first_logits = tf.matmul(first_q, first_k, transpose_b=True)
first_logits += (
_relative_position_to_absolute_position_masked(first_rel_logits))
# adding a mask
first_logits += (
common_layers.cast_like(attention_bias_lower_triangle(block_length),
first_logits))
first_att = tf.nn.softmax(first_logits,
name="first_attention_weights")
# dropping out the attention links for each of the heads
first_att = common_layers.dropout_with_broadcast_dims(
first_att, 1.0 - dropout_rate,
broadcast_dims=None)
# only call image summary for the first block
if common_layers.should_generate_summaries() and make_image_summary:
attention_image_summary(first_att, None)
first_output = tf.matmul(first_att, first_v)
# compute attention for all subsequent query blocks.
q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k])
k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k])
v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v])
local_k = _make_local_block(k, depth_k, batch, heads, num_blocks,
block_length)
local_v = _make_local_block(v, depth_v, batch, heads, num_blocks,
block_length)
tail_q = tf.slice(q, [0, 0, 1, 0, 0], [-1, -1, -1, -1, -1])
tail_q = tf.reshape(tail_q,
[batch, heads, num_blocks - 1, block_length, depth_k])
local_length = common_layers.shape_list(local_k)[3]
# collapsing num blocks and batch size so that we can reuse
# functions
def _reshape_for_relative(x):
x_shape = common_layers.shape_list(x)
# [batch, num_blocks, heads, length, depth]
x = tf.transpose(x, [0, 2, 1, 3, 4])
x = tf.reshape(x, [batch*x_shape[2], heads, x_shape[3],
x_shape[4]])
return x
rel_tail_q = _reshape_for_relative(tail_q)
rel_k = _reshape_for_relative(local_k)
rel_v = _reshape_for_relative(local_v)
rel_embeddings = get_relative_embeddings_left(
rel_embed_length, 2 * block_length, depth_k, heads,
heads_share_relative_embedding, "relative_embeddings")
rel_logits = matmul_with_relative_keys(
rel_tail_q, rel_embeddings, heads_share_relative_embedding)
# Computing relative logits separately for the masked and unmasked parts
# because the reshaping logic is different for both
masked_rel_logits = tf.slice(rel_logits, [0, 0, 0, block_length],
[-1, -1, -1, -1])
masked_rel_logits = _relative_position_to_absolute_position_masked(
masked_rel_logits)
unmasked_rel_logits = tf.slice(rel_logits, [0, 0, 0, 0],
[-1, -1, -1, 2*block_length-1])
unmasked_rel_logits = _relative_position_to_absolute_position_unmasked(
unmasked_rel_logits)
all_rel_logits = tf.concat([unmasked_rel_logits, masked_rel_logits],
axis=3)
all_logits = (
tf.matmul(rel_tail_q, rel_k, transpose_b=True) + all_rel_logits)
# make sure source_pos <= target_pos
good_part = common_layers.ones_matrix_band_part(block_length,
local_length,
-1, block_length)
mask = (1.0 - good_part) * -1e9
mask = common_layers.cast_like(mask, all_logits)
all_logits += tf.reshape(mask, [1, 1, block_length, local_length])
weights = tf.nn.softmax(all_logits, name="attention_weights")
# [batch (* num_blocks), heads, query_length (=block_length),
# key_length (=2*block_length)]
weights = common_layers.dropout_with_broadcast_dims(
weights, 1.0 - dropout_rate,
broadcast_dims=None)
output = tf.matmul(weights, rel_v)
if add_relative_to_values:
# Adds the contribution of the weighted relative embeddings to the values.
weights_for_unmasked, weights_for_masked = (
tf.split(weights, 2, axis=3))
rel_weights_unmasked = _absolute_position_to_relative_position_unmasked(
weights_for_unmasked)
rel_weights_masked = _absolute_position_to_relative_position_masked(
weights_for_masked)
value_rel_embeddings_unmasked = get_relative_embeddings_left(
rel_embed_length, 2 * block_length, depth_v,
heads, heads_share_relative_embedding,
"value_relative_embeddings")
# The unmasked part starts with index -1 as opposed 0 has take uptil last.
if heads_share_relative_embedding:
value_rel_embeddings_unmasked = value_rel_embeddings_unmasked[:-1, :]
else:
value_rel_embeddings_unmasked = value_rel_embeddings_unmasked[:, :-1, :]
value_rel_embeddings_masked = get_relative_embeddings_left(
rel_embed_length, block_length, depth_v,
heads, heads_share_relative_embedding,
"value_relative_embeddings")
# [batch (*num_blocks), heads, query length, key length]
rel_weights = tf.concat(
[rel_weights_unmasked, rel_weights_masked], axis=3)
if heads_share_relative_embedding:
value_rel_embeddings_concat_axis = 0
else:
value_rel_embeddings_concat_axis = 1
value_rel_embeddings = tf.concat(
[value_rel_embeddings_unmasked, value_rel_embeddings_masked],
axis=value_rel_embeddings_concat_axis)
output_rel = matmul_with_relative_values(
rel_weights, value_rel_embeddings, heads_share_relative_embedding)
output += output_rel
# bring to [batch, heads, num_blocks-1, block_length, depth]
output = tf.reshape(output,
[batch, num_blocks-1, heads, block_length, depth_v])
output = tf.transpose(output, [0, 2, 1, 3, 4])
output = tf.reshape(
output, [batch, heads, (num_blocks - 1) * block_length, depth_v])
output = tf.concat([first_output, output], axis=2)
output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1])
output = tf.reshape(output, [batch, heads, original_length, depth_v])
return output | 0.003403 |
def finite_pixels(self):
""" Return an array of the finite pixels.
Returns
-------
:obj:`numpy.ndarray`
Nx2 array of the finite pixels
"""
finite_px = np.where(np.isfinite(self.data))
finite_px = np.c_[finite_px[0], finite_px[1]]
return finite_px | 0.006173 |
def _iterate_managers(connection, skip):
"""Iterate over instantiated managers."""
for idx, name, manager_cls in _iterate_manage_classes(skip):
if name in skip:
continue
try:
manager = manager_cls(connection=connection)
except TypeError as e:
click.secho(f'Could not instantiate {name}: {e}', fg='red')
else:
yield idx, name, manager | 0.00237 |
def iter_chunks(self, start_count=0):
"""
Iterate over the chunks of the file according to their length prefixes.
yields: index <int>, encrypted chunks without length prefixes <bytes>, lastchunk <bool>
"""
ciphertext = self.chunks_block
chunknum = start_count
idx = 0
lastchunk = False
while idx < len(ciphertext):
plainlen = int.from_bytes(ciphertext[idx: idx+4], 'little')
chunklen = plainlen + 16
if idx + 4 + chunklen == len(ciphertext):
lastchunk = True
elif idx + chunklen > len(ciphertext):
raise ValueError("Bad ciphertext; when reading chunks, hit EOF early")
yield chunknum, ciphertext[idx + 4 : idx + 4 + chunklen], lastchunk
idx += chunklen + 4
chunknum += 1 | 0.005828 |
def ls(self, glob_str):
"""
Return just the filenames that match `glob_str` inside the store directory.
:param str glob_str: A glob string, i.e. 'state_*'
:return: list of matched keys
"""
path = os.path.join(self.uri, glob_str)
return [os.path.split(s)[1] for s in glob.glob(path)] | 0.00885 |
def update_app_icon(self):
"""
Update the app icon if the user is not trying to resize the window.
"""
if os.name == 'nt' or not hasattr(self, '_last_window_size'): # pragma: no cover
# DO NOT EVEN ATTEMPT TO UPDATE ICON ON WINDOWS
return
cur_time = time.time()
if self._last_window_size != self.window_size: # pragma: no cover
# Window size hasn't remained constant since last render.
# This means the user is resizing it so ignore update.
pass
elif ((cur_time - self._last_update_time > BackgroundPlotter.ICON_TIME_STEP)
and self._last_camera_pos != self.camera_position):
# its been a while since last update OR
# the camera position has changed and its been at leat one second
# Update app icon as preview of the window
img = pad_image(self.image)
qimage = QtGui.QImage(img.copy(), img.shape[1],
img.shape[0], QtGui.QImage.Format_RGB888)
icon = QtGui.QIcon(QtGui.QPixmap.fromImage(qimage))
self.app.setWindowIcon(icon)
# Update trackers
self._last_update_time = cur_time
self._last_camera_pos = self.camera_position
# Update trackers
self._last_window_size = self.window_size | 0.002882 |
def get_all_reserved_instances_offerings(self, reserved_instances_id=None,
instance_type=None,
availability_zone=None,
product_description=None,
filters=None):
"""
Describes Reserved Instance offerings that are available for purchase.
:type reserved_instances_id: str
:param reserved_instances_id: Displays Reserved Instances with the
specified offering IDs.
:type instance_type: str
:param instance_type: Displays Reserved Instances of the specified
instance type.
:type availability_zone: str
:param availability_zone: Displays Reserved Instances within the
specified Availability Zone.
:type product_description: str
:param product_description: Displays Reserved Instances with the
specified product description.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of :class:`boto.ec2.reservedinstance.ReservedInstancesOffering`
"""
params = {}
if reserved_instances_id:
params['ReservedInstancesId'] = reserved_instances_id
if instance_type:
params['InstanceType'] = instance_type
if availability_zone:
params['AvailabilityZone'] = availability_zone
if product_description:
params['ProductDescription'] = product_description
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeReservedInstancesOfferings',
params, [('item', ReservedInstancesOffering)],
verb='POST') | 0.002903 |
def process_args():
"""
Parse command-line arguments.
"""
parser = argparse.ArgumentParser(description="A script for plotting files containing spike time data")
parser.add_argument('spiketimeFiles',
type=str,
metavar='<spiketime file>',
help='List of text file containing spike times',
nargs='+')
parser.add_argument('-format',
type=str,
metavar='<format>',
default=DEFAULTS['format'],
help='How the spiketimes are represented on each line of file:\n'+\
'id_t: id of cell, space(s)/tab(s), time of spike (default);\n'+\
't_id: time of spike, space(s)/tab(s), id of cell;\n'+\
'sonata: SONATA format HDF5 file containing spike times')
parser.add_argument('-rates',
action='store_true',
default=DEFAULTS['rates'],
help='Show a plot of rates')
parser.add_argument('-showPlotsAlready',
action='store_true',
default=DEFAULTS['show_plots_already'],
help='Show plots once generated')
parser.add_argument('-saveSpikePlotTo',
type=str,
metavar='<spiketime plot filename>',
default=DEFAULTS['save_spike_plot_to'],
help='Name of file in which to save spiketime plot')
parser.add_argument('-rateWindow',
type=int,
metavar='<rate window>',
default=DEFAULTS['rate_window'],
help='Window for rate calculation in ms')
parser.add_argument('-rateBins',
type=int,
metavar='<rate bins>',
default=DEFAULTS['rate_bins'],
help='Number of bins for rate histogram')
return parser.parse_args() | 0.013351 |
def update_state(world):
"""
Increment the world state, determining which cells live, die, or appear.
Args:
world (list[list]): A square matrix of cells
Returns: None
"""
world_size = len(world)
def wrap(index):
"""Wrap an index around the other end of the array"""
return index % world_size
for x in range(world_size):
for y in range(world_size):
# Decide if this node cares about the rules right now
if not world[x][y].allow_change.get():
continue
live_neighbor_count = sum([
world[wrap(x)][wrap(y + 1)].value,
world[wrap(x + 1)][wrap(y + 1)].value,
world[wrap(x + 1)][wrap(y)].value,
world[wrap(x + 1)][wrap(y - 1)].value,
world[wrap(x)][wrap(y-1)].value,
world[wrap(x - 1)][wrap(y - 1)].value,
world[wrap(x - 1)][wrap(y)].value,
world[wrap(x - 1)][wrap(y + 1)].value
])
if world[x][y].value:
# Any live cell with fewer than two live neighbours dies
# Any live cell with more than three live neighbours dies
# Any live cell with two or three live neighbours lives
if not (live_neighbor_count == 2 or live_neighbor_count == 3):
world[x][y].value = False
else:
# Any dead cell with exactly three live neighbours comes alive
if live_neighbor_count == 3:
world[x][y].value = True | 0.000625 |
def login_with(cls, platform, third_party_auth_data):
'''
把第三方平台号绑定到 User 上
:param platform: 第三方平台名称 base string
'''
user = User()
return user.link_with(platform, third_party_auth_data) | 0.008547 |
def is_in_range(self, values, unit=None, raise_exception=True):
"""Check if a list of values is within physically/mathematically possible range.
Args:
values: A list of values.
unit: The unit of the values. If not specified, the default metric
unit will be assumed.
raise_exception: Set to True to raise an exception if not in range.
"""
self._is_numeric(values)
if unit is None or unit == self.units[0]:
minimum = self.min
maximum = self.max
else:
namespace = {'self': self}
self.is_unit_acceptable(unit, True)
min_statement = "self._{}_to_{}(self.min)".format(
self._clean(self.units[0]), self._clean(unit))
max_statement = "self._{}_to_{}(self.max)".format(
self._clean(self.units[0]), self._clean(unit))
minimum = eval(min_statement, namespace)
maximum = eval(max_statement, namespace)
for value in values:
if value < minimum or value > maximum:
if not raise_exception:
return False
else:
raise ValueError(
'{0} should be between {1} and {2}. Got {3}'.format(
self.__class__.__name__, self.min, self.max, value
)
)
return True | 0.002059 |
def attach_intf_router(self, tenant_id, tenant_name, router_id):
"""Routine to attach the interface to the router. """
in_sub = self.get_in_subnet_id(tenant_id)
out_sub = self.get_out_subnet_id(tenant_id)
# Modify Hard coded Name fixme
subnet_lst = set()
subnet_lst.add(in_sub)
subnet_lst.add(out_sub)
ret = self.os_helper.add_intf_router(router_id, tenant_id, subnet_lst)
return ret, in_sub, out_sub | 0.004237 |
def delete_directory(self, dirname):
"""Delete a directory (and contents) from the bucket.
Parameters
----------
dirname : `str`
Name of the directory, relative to ``bucket_root/``.
Raises
------
RuntimeError
Raised when there are no objects to delete (directory
does not exist).
"""
key = os.path.join(self._bucket_root, dirname)
if not key.endswith('/'):
key += '/'
key_objects = [{'Key': obj.key}
for obj in self._bucket.objects.filter(Prefix=key)]
if len(key_objects) == 0:
msg = 'No objects in bucket directory {}'.format(dirname)
raise RuntimeError(msg)
delete_keys = {'Objects': key_objects}
# based on http://stackoverflow.com/a/34888103
s3 = self._session.resource('s3')
r = s3.meta.client.delete_objects(Bucket=self._bucket.name,
Delete=delete_keys)
self._logger.debug(r)
if 'Errors' in r:
raise S3Error('S3 could not delete {0}'.format(key)) | 0.001742 |
def fetch(force=False):
"""Fetch and extract latest Life-Line version of Fiji is just ImageJ
to *~/.bin*.
Parameters
----------
force : bool
Force overwrite of existing Fiji in *~/.bin*.
"""
try:
# python 2
from urllib2 import urlopen, HTTPError, URLError
except ImportError:
# python 3
from urllib.request import urlopen, HTTPError, URLError
if os.path.isdir(FIJI_ROOT) and not force:
return
elif not os.path.isdir(FIJI_ROOT):
print('Fiji missing in %s' % FIJI_ROOT)
if force:
print('Deleting %s' % FIJI_ROOT)
shutil.rmtree(FIJI_ROOT, ignore_errors=True)
print('Downloading fiji from %s' % URL)
try:
req = urlopen(URL)
try:
size = int(req.info()['content-length'])
except AttributeError:
size = -1
chunk = 512*1024
fp = BytesIO()
i = 0
while 1:
data = req.read(chunk)
if not data:
break
fp.write(data)
if size > 0:
percent = fp.tell() // (size/100)
msg = 'Downloaded %d percent \r' % percent
else:
msg = 'Downloaded %d bytes\r' % fp.tell()
sys.stdout.write(msg)
except (HTTPError, URLError) as e:
print('Error getting fiji: {}'.format(e))
sys.exit(1)
try:
print('\nExtracting zip')
z = ZipFile(fp)
z.extractall(BIN_FOLDER)
# move to Fiji-VERSION.app to easily check if it exists (upon fijibin upgrade)
os.rename(EXTRACT_FOLDER, FIJI_ROOT)
except (BadZipFile, IOError) as e:
print('Error extracting zip: {}'.format(e))
sys.exit(1)
for path in BIN_NAMES.values():
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC) | 0.001064 |
def make_generic_validator(validator: AnyCallable) -> 'ValidatorCallable':
"""
Make a generic function which calls a validator with the right arguments.
Unfortunately other approaches (eg. return a partial of a function that builds the arguments) is slow,
hence this laborious way of doing things.
It's done like this so validators don't all need **kwargs in their signature, eg. any combination of
the arguments "values", "fields" and/or "config" are permitted.
"""
sig = signature(validator)
args = list(sig.parameters.keys())
first_arg = args.pop(0)
if first_arg == 'self':
raise ConfigError(
f'Invalid signature for validator {validator}: {sig}, "self" not permitted as first argument, '
f'should be: (cls, value, values, config, field), "values", "config" and "field" are all optional.'
)
elif first_arg == 'cls':
# assume the second argument is value
return wraps(validator)(_generic_validator_cls(validator, sig, set(args[1:])))
else:
# assume the first argument was value which has already been removed
return wraps(validator)(_generic_validator_basic(validator, sig, set(args))) | 0.005771 |
def start_recording(self, file='mingus_dump.wav'):
"""Initialize a new wave file for recording."""
w = wave.open(file, 'wb')
w.setnchannels(2)
w.setsampwidth(2)
w.setframerate(44100)
self.wav = w | 0.00823 |
def validate(self):
"""Validate that the BinaryComposition is correctly representable."""
_validate_operator_name(self.operator, BinaryComposition.SUPPORTED_OPERATORS)
if not isinstance(self.left, Expression):
raise TypeError(u'Expected Expression left, got: {} {} {}'.format(
type(self.left).__name__, self.left, self))
if not isinstance(self.right, Expression):
raise TypeError(u'Expected Expression right, got: {} {}'.format(
type(self.right).__name__, self.right)) | 0.005376 |
def open_dataset(self, service):
"""Opens and returns the NetCDF dataset associated with a service, or returns a previously-opened dataset"""
if not self.dataset:
path = os.path.join(SERVICE_DATA_ROOT, service.data_path)
self.dataset = netCDF4.Dataset(path, 'r')
return self.dataset | 0.009063 |
def is_correct(self):
"""Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
state = True
# Internal checks before executing inherited function...
# If we got the _time parameters, we are time based. Unless, we are not :)
if hasattr(self, 'first_notification_time') or hasattr(self, 'last_notification_time'):
self.time_based = True
# Ok now we manage special cases...
if not hasattr(self, 'contacts') and not hasattr(self, 'contact_groups'):
self.add_error('%s: I do not have contacts nor contact_groups' % (self.get_name()))
state = False
# If time_based or not, we do not check all properties
if self.time_based:
if not hasattr(self, 'first_notification_time'):
self.add_error('%s: I do not have first_notification_time' % (self.get_name()))
state = False
if not hasattr(self, 'last_notification_time'):
self.add_error('%s: I do not have last_notification_time' % (self.get_name()))
state = False
else: # we check classical properties
if not hasattr(self, 'first_notification'):
self.add_error('%s: I do not have first_notification' % (self.get_name()))
state = False
if not hasattr(self, 'last_notification'):
self.add_error('%s: I do not have last_notification' % (self.get_name()))
state = False
# Change the special_properties definition according to time_based ...
save_special_properties = self.special_properties
if self.time_based:
self.special_properties = self.special_properties_time_based
state_parent = super(Escalation, self).is_correct()
if self.time_based:
self.special_properties = save_special_properties
return state_parent and state | 0.004704 |
def pos(self, element = None):
''' Tries to decide about the part of speech. '''
tags = []
if element:
if element.startswith(('de ', 'het ', 'het/de', 'de/het')) and not re.search('\[[\w|\s][\w|\s]+\]', element.split('\r\n')[0], re.U):
tags.append('NN')
if re.search('[\w|\s|/]+ \| [\w|\s|/]+ - [\w|\s|/]+', element, re.U):
tags.append('VB')
if re.search('[\w|\s]+ \| [\w|\s]+', element, re.U):
tags.append('JJ')
return tags
else:
for element in self.elements:
if self.word in unicode(element):
tag = self.pos(element)
if tag:
return tag | 0.065767 |
def get_relationships_for_destination(self, destination_id=None):
"""Gets a ``RelationshipList`` corresponding to the given peer ``Id``.
arg: destination_id (osid.id.Id): a peer ``Id``
return: (osid.relationship.RelationshipList) - the relationships
raise: NullArgument - ``destination_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
if destination_id is None:
raise NullArgument()
url_path = ('/handcar/services/relationship/families/' +
self._catalog_idstr + '/relationships?sourceid=' +
str(destination_id))
return objects.RelationshipList(self._get_request(url_path)) | 0.002347 |
def remove_namespace(doc, namespace):
'''Remove namespace in the passed document in place.'''
ns = u'{%s}' % namespace
nsl = len(ns)
for elem in doc.getiterator():
if elem.tag.startswith(ns):
elem.tag = elem.tag[nsl:]
elem.attrib['oxmlns'] = namespace | 0.025455 |
def main():
'''
This is called when we're executed from the commandline.
The current usage from the command-line is described below::
usage: hatlc [-h] [--describe] hatlcfile
read a HAT LC of any format and output to stdout
positional arguments:
hatlcfile path to the light curve you want to read and pipe to stdout
optional arguments:
-h, --help show this help message and exit
--describe don't dump the columns, show only object info and LC metadata
'''
# handle SIGPIPE sent by less, head, et al.
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
import argparse
aparser = argparse.ArgumentParser(
description='read a HAT LC of any format and output to stdout'
)
aparser.add_argument(
'hatlcfile',
action='store',
type=str,
help=("path to the light curve you want to read and pipe to stdout")
)
aparser.add_argument(
'--describe',
action='store_true',
default=False,
help=("don't dump the columns, show only object info and LC metadata")
)
args = aparser.parse_args()
filetoread = args.hatlcfile
if not os.path.exists(filetoread):
LOGERROR("file provided: %s doesn't seem to exist" % filetoread)
sys.exit(1)
# figure out the type of LC this is
filename = os.path.basename(filetoread)
# switch based on filetype
if filename.endswith('-hatlc.csv.gz') or filename.endswith('-csvlc.gz'):
if args.describe:
describe(read_csvlc(filename))
sys.exit(0)
else:
with gzip.open(filename,'rb') as infd:
for line in infd:
print(line.decode(),end='')
elif filename.endswith('-hatlc.sqlite.gz'):
lcdict, msg = read_and_filter_sqlitecurve(filetoread)
# dump the description
describe(lcdict, offsetwith='#')
# stop here if describe is True
if args.describe:
sys.exit(0)
# otherwise, continue to parse the cols, etc.
# get the aperture names
apertures = sorted(lcdict['lcapertures'].keys())
# update column defs per aperture
for aper in apertures:
COLUMNDEFS.update({'%s_%s' % (x, aper): COLUMNDEFS[x] for x in
LC_MAG_COLUMNS})
COLUMNDEFS.update({'%s_%s' % (x, aper): COLUMNDEFS[x] for x in
LC_ERR_COLUMNS})
COLUMNDEFS.update({'%s_%s' % (x, aper): COLUMNDEFS[x] for x in
LC_FLAG_COLUMNS})
formstr = ','.join([COLUMNDEFS[x][1] for x in lcdict['columns']])
ndet = lcdict['objectinfo']['ndet']
for ind in range(ndet):
line = [lcdict[x][ind] for x in lcdict['columns']]
formline = formstr % tuple(line)
print(formline)
else:
LOGERROR('unrecognized HATLC file: %s' % filetoread)
sys.exit(1) | 0.001645 |
def login(method):
"""Require user to login."""
def wrapper(*args, **kwargs):
crawler = args[0].crawler # args[0] is a NetEase object
try:
if os.path.isfile(cookie_path):
with open(cookie_path, 'r') as cookie_file:
cookie = cookie_file.read()
expire_time = re.compile(r'\d{4}-\d{2}-\d{2}').findall(cookie)
now = time.strftime('%Y-%m-%d', time.localtime(time.time()))
if expire_time[0] > now:
crawler.session.cookies.load()
else:
crawler.login()
else:
crawler.login()
except RequestException:
click.echo('Maybe password error, please try again.')
sys.exit(1)
result = method(*args, **kwargs)
return result
return wrapper | 0.001138 |
def dump_nodes(self):
"""Dump current screen UI to list
Returns:
List of UINode object, For
example:
[UINode(
bounds=Bounds(left=0, top=0, right=480, bottom=168),
checkable=False,
class_name='android.view.View',
text='',
resource_id='',
package='com.sonyericsson.advancedwidget.clock')]
"""
xmldata = self._uiauto.dump()
dom = xml.dom.minidom.parseString(xmldata.encode('utf-8'))
root = dom.documentElement
nodes = root.getElementsByTagName('node')
ui_nodes = []
for node in nodes:
ui_nodes.append(self._parse_xml_node(node))
return ui_nodes | 0.002611 |
def _solve_msm_eigensystem(transmat, k):
"""Find the dominant eigenpairs of an MSM transition matrix
Parameters
----------
transmat : np.ndarray, shape=(n_states, n_states)
The transition matrix
k : int
The number of eigenpairs to find.
Notes
-----
Normalize the left (:math:`\phi`) and right (:math:``\psi``) eigenfunctions
according to the following criteria.
* The first left eigenvector, \phi_1, _is_ the stationary
distribution, and thus should be normalized to sum to 1.
* The left-right eigenpairs should be biorthonormal:
<\phi_i, \psi_j> = \delta_{ij}
* The left eigenvectors should satisfy
<\phi_i, \phi_i>_{\mu^{-1}} = 1
* The right eigenvectors should satisfy <\psi_i, \psi_i>_{\mu} = 1
Returns
-------
eigvals : np.ndarray, shape=(k,)
The largest `k` eigenvalues
lv : np.ndarray, shape=(n_states, k)
The normalized left eigenvectors (:math:`\phi`) of ``transmat``
rv : np.ndarray, shape=(n_states, k)
The normalized right eigenvectors (:math:`\psi`) of ``transmat``
"""
u, lv, rv = scipy.linalg.eig(transmat, left=True, right=True)
order = np.argsort(-np.real(u))
u = np.real_if_close(u[order[:k]])
lv = np.real_if_close(lv[:, order[:k]])
rv = np.real_if_close(rv[:, order[:k]])
return _normalize_eigensystem(u, lv, rv) | 0.010653 |
def random_lattice_box(mol_list, mol_number, size,
spacing=np.array([0.3, 0.3, 0.3])):
'''Make a box by placing the molecules specified in *mol_list* on
random points of an evenly spaced lattice.
Using a lattice automatically ensures that no two molecules are
overlapping.
**Parameters**
mol_list: list of Molecule instances
A list of each kind of molecules to add to the system.
mol_number: list of int
The number of molecules to place for each kind.
size: np.ndarray((3,), float)
The box size in nm
spacing: np.ndarray((3,), float), [0.3 0.3 0.3]
The lattice spacing in nm.
**Returns**
A System instance.
**Example**
Typical box with 1000 water molecules randomly placed in a box of size
``[2.0 2.0 2.0]``::
from chemlab.db import ChemlabDB
# Example water molecule
water = ChemlabDB().get('molecule', 'example.water')
s = random_water_box([water], [1000], [2.0, 2.0, 2.0])
'''
# Generate the coordinates
positions = spaced_lattice(size, spacing)
# Randomize them
np.random.shuffle(positions)
n_mol = sum(mol_number)
n_atoms = sum(nmol*mol.n_atoms for mol, nmol in zip(mol_list, mol_number))
# Assert that we have enough space
assert len(positions) >= n_mol, "Can't fit {} molecules in {} spaces".format(n_mol,
len(positions))
box_vectors = np.zeros((3, 3))
box_vectors[0,0] = size[0]
box_vectors[1,1] = size[1]
box_vectors[2,2] = size[2]
# Initialize a system
s = System.empty()
with s.batch() as b:
mol_list = [m.copy() for m in mol_list]
# Add the molecules
pi = 0
for i, mol in enumerate(mol_list):
for j in range(mol_number[i]):
mol.move_to(positions[pi])
b.append(mol.copy())
pi += 1
return s | 0.007855 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.