text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _calc_recip(self):
"""
Perform the reciprocal space summation. Calculates the quantity
E_recip = 1/(2PiV) sum_{G < Gmax} exp(-(G.G/4/eta))/(G.G) S(G)S(-G)
where
S(G) = sum_{k=1,N} q_k exp(-i G.r_k)
S(G)S(-G) = |S(G)|**2
This method is heavily vectorized to utilize numpy's C backend for
speed.
"""
numsites = self._s.num_sites
prefactor = 2 * pi / self._vol
erecip = np.zeros((numsites, numsites), dtype=np.float)
forces = np.zeros((numsites, 3), dtype=np.float)
coords = self._coords
rcp_latt = self._s.lattice.reciprocal_lattice
recip_nn = rcp_latt.get_points_in_sphere([[0, 0, 0]], [0, 0, 0],
self._gmax)
frac_coords = [fcoords for (fcoords, dist, i, img) in recip_nn if dist != 0]
gs = rcp_latt.get_cartesian_coords(frac_coords)
g2s = np.sum(gs ** 2, 1)
expvals = np.exp(-g2s / (4 * self._eta))
grs = np.sum(gs[:, None] * coords[None, :], 2)
oxistates = np.array(self._oxi_states)
# create array where q_2[i,j] is qi * qj
qiqj = oxistates[None, :] * oxistates[:, None]
# calculate the structure factor
sreals = np.sum(oxistates[None, :] * np.cos(grs), 1)
simags = np.sum(oxistates[None, :] * np.sin(grs), 1)
for g, g2, gr, expval, sreal, simag in zip(gs, g2s, grs, expvals,
sreals, simags):
# Uses the identity sin(x)+cos(x) = 2**0.5 sin(x + pi/4)
m = (gr[None, :] + pi / 4) - gr[:, None]
np.sin(m, m)
m *= expval / g2
erecip += m
if self._compute_forces:
pref = 2 * expval / g2 * oxistates
factor = prefactor * pref * (
sreal * np.sin(gr) - simag * np.cos(gr))
forces += factor[:, None] * g[None, :]
forces *= EwaldSummation.CONV_FACT
erecip *= prefactor * EwaldSummation.CONV_FACT * qiqj * 2 ** 0.5
return erecip, forces | 0.001408 |
def unpack_rpc_response(status, response=None, rpc_id=0, address=0):
"""Unpack an RPC status back in to payload or exception."""
status_code = status & ((1 << 6) - 1)
if address == 8:
status_code &= ~(1 << 7)
if status == 0:
raise BusyRPCResponse()
elif status == 2:
raise RPCNotFoundError("rpc %d:%04X not found" % (address, rpc_id))
elif status == 3:
raise RPCErrorCode(status_code)
elif status == 0xFF:
raise TileNotFoundError("tile %d not found" % address)
elif status_code != 0:
raise RPCErrorCode(status_code)
if response is None:
response = b''
return response | 0.001497 |
def overview(index, start, end):
"""Compute metrics in the overview section for enriched git indexes.
Returns a dictionary. Each key in the dictionary is the name of
a metric, the value is the value of that metric. Value can be
a complex object (eg, a time series).
:param index: index object
:param start: start date to get the data from
:param end: end date to get the data upto
:return: dictionary with the value of the metrics
"""
results = {
"activity_metrics": [Commits(index, start, end)],
"author_metrics": [Authors(index, start, end)],
"bmi_metrics": [],
"time_to_close_metrics": [],
"projects_metrics": []
}
return results | 0.001381 |
def set(self, prefix, url, obj):
""" Add an object into the cache """
if not self.cache_dir:
return
filename = self._get_cache_file(prefix, url)
try:
os.makedirs(os.path.join(self.cache_dir, prefix))
except OSError:
pass
with open(filename, 'wb') as file:
pickle.dump(obj, file) | 0.005305 |
def get_storage_controller_hotplug_capable(self, controller_type):
"""Returns whether the given storage controller supports
hot-plugging devices.
in controller_type of type :class:`StorageControllerType`
The storage controller to check the setting for.
return hotplug_capable of type bool
Returned flag indicating whether the controller is hotplug capable
"""
if not isinstance(controller_type, StorageControllerType):
raise TypeError("controller_type can only be an instance of type StorageControllerType")
hotplug_capable = self._call("getStorageControllerHotplugCapable",
in_p=[controller_type])
return hotplug_capable | 0.005369 |
def _random_key(self):
""" Return random session key """
hashstr = '%s%s' % (random.random(), self.time_module.time())
return hashlib.md5(hashstr).hexdigest() | 0.010989 |
def check_privatenet(self):
"""
Check if privatenet is running, and if container is same as the current Chains/privnet database.
Raises:
PrivnetConnectionError: if the private net couldn't be reached or the nonce does not match
"""
rpc_settings.setup(self.RPC_LIST)
client = RPCClient()
try:
version = client.get_version()
except NEORPCException:
raise PrivnetConnectionError("Error: private network container doesn't seem to be running, or RPC is not enabled.")
print("Privatenet useragent '%s', nonce: %s" % (version["useragent"], version["nonce"]))
# Now check if nonce is the same as in the chain path
nonce_container = str(version["nonce"])
neopy_chain_meta_filename = os.path.join(self.chain_leveldb_path, ".privnet-nonce")
if os.path.isfile(neopy_chain_meta_filename):
nonce_chain = open(neopy_chain_meta_filename, "r").read()
if nonce_chain != nonce_container:
raise PrivnetConnectionError(
"Chain database in Chains/privnet is for a different private network than the current container. "
"Consider deleting the Chain directory with 'rm -rf %s*'." % self.chain_leveldb_path
)
else:
# When the Chains/privnet folder is removed, we need to create the directory
if not os.path.isdir(self.chain_leveldb_path):
os.mkdir(self.chain_leveldb_path)
# Write the nonce to the meta file
with open(neopy_chain_meta_filename, "w") as f:
f.write(nonce_container) | 0.005935 |
def write(self, destination, source_model, name=None):
"""
Exports to NRML
"""
if os.path.exists(destination):
os.remove(destination)
self.destination = destination
if name:
source_model.name = name
output_source_model = Node("sourceModel", {"name": name})
dic = groupby(source_model.sources,
operator.itemgetter('tectonicRegion'))
for i, (trt, srcs) in enumerate(dic.items(), 1):
output_source_model.append(
Node('sourceGroup',
{'tectonicRegion': trt, 'name': 'group %d' % i},
nodes=srcs))
print("Exporting Source Model to %s" % self.destination)
with open(self.destination, "wb") as f:
nrml.write([output_source_model], f, "%s") | 0.002364 |
def generateVariantAnnotation(self, variant):
"""
Generate a random variant annotation based on a given variant.
This generator should be seeded with a value that is unique to the
variant so that the same annotation will always be produced regardless
of the order it is generated in.
"""
# To make this reproducible, make a seed based on this
# specific variant.
seed = self._randomSeed + variant.start + variant.end
randomNumberGenerator = random.Random()
randomNumberGenerator.seed(seed)
ann = protocol.VariantAnnotation()
ann.variant_annotation_set_id = str(self.getCompoundId())
ann.variant_id = variant.id
ann.created = datetime.datetime.now().isoformat() + "Z"
# make a transcript effect for each alternate base element
# multiplied by a random integer (1,5)
for base in variant.alternate_bases:
ann.transcript_effects.add().CopyFrom(
self.generateTranscriptEffect(
variant, ann, base, randomNumberGenerator))
ann.id = self.getVariantAnnotationId(variant, ann)
return ann | 0.001688 |
def Log(self, format_str, *args):
"""Logs the message using the flow's standard logging.
Args:
format_str: Format string
*args: arguments to the format string
"""
log_entry = rdf_flow_objects.FlowLogEntry(
client_id=self.rdf_flow.client_id,
flow_id=self.rdf_flow.flow_id,
hunt_id=self.rdf_flow.parent_hunt_id,
message=format_str % args)
data_store.REL_DB.WriteFlowLogEntries([log_entry])
if self.rdf_flow.parent_hunt_id:
db_compat.ProcessHuntFlowLog(self.rdf_flow, format_str % args) | 0.003584 |
def get_prompt_tokens(_):
"""Return a list of tokens for the prompt"""
namespace = q(r'\d')
if namespace == '.':
namespace = ''
return [(Token.Generic.Prompt, 'q%s)' % namespace)] | 0.004926 |
def add_group(group_name, system_group=False, gid=None):
"""Add a group to the system
Will log but otherwise succeed if the group already exists.
:param str group_name: group to create
:param bool system_group: Create system group
:param int gid: GID for user being created
:returns: The password database entry struct, as returned by `grp.getgrnam`
"""
try:
group_info = grp.getgrnam(group_name)
log('group {0} already exists!'.format(group_name))
if gid:
group_info = grp.getgrgid(gid)
log('group with gid {0} already exists!'.format(gid))
except KeyError:
log('creating group {0}'.format(group_name))
add_new_group(group_name, system_group, gid)
group_info = grp.getgrnam(group_name)
return group_info | 0.001221 |
def root_and_path(self):
""":returns: a tuple (parent, [members,... ]"""
rt = []
curAttr = self
while isinstance(curAttr.parent, AdHocTree):
rt.append(curAttr.name)
curAttr = curAttr.parent
rt.reverse()
return (curAttr.parent, rt) | 0.006623 |
def autoconf(self):
"""Implements Munin Plugin Auto-Configuration Option.
@return: True if plugin can be auto-configured, False otherwise.
"""
apacheInfo = ApacheInfo(self._host, self._port,
self._user, self._password,
self._statuspath, self._ssl)
return apacheInfo is not None | 0.012195 |
def log_backend_action(action=None):
""" Logging for backend method.
Expects django model instance as first argument.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(self, instance, *args, **kwargs):
action_name = func.func_name.replace('_', ' ') if action is None else action
logger.debug('About to %s `%s` (PK: %s).', action_name, instance, instance.pk)
result = func(self, instance, *args, **kwargs)
logger.debug('Action `%s` was executed successfully for `%s` (PK: %s).',
action_name, instance, instance.pk)
return result
return wrapped
return decorator | 0.005714 |
def _make_graphite_api_points_list(influxdb_data):
"""Make graphite-api data points dictionary from Influxdb ResultSet data"""
_data = {}
for key in influxdb_data.keys():
_data[key[0]] = [(datetime.datetime.fromtimestamp(float(d['time'])),
d['value']) for d in influxdb_data.get_points(key[0])]
return _data | 0.005602 |
def nameservers(self):
"""
:rtype: list
:returns: A list of nameserver strings for this hosted zone.
"""
# If this HostedZone was instantiated by ListHostedZones, the nameservers
# attribute didn't get populated. If the user requests it, we'll
# lazy load by querying it in after the fact. It's safe to cache like
# this since these nameserver values won't change.
if not self._nameservers:
# We'll just snatch the nameserver values from a fresh copy
# via GetHostedZone.
hosted_zone = self.connection.get_hosted_zone_by_id(self.id)
self._nameservers = hosted_zone._nameservers
return self._nameservers | 0.004093 |
def _read_section(self, f, integer=True):
"""
Reads one section from the mesh3d file.
integer ... if True, all numbers are passed to int(), otherwise to
float(), before returning
Some examples how a section can look like:
2
1 2 5 4 7 8 11 10
2 3 6 5 8 9 12 11
or
5
1 2 3 4 1
1 2 6 5 1
2 3 7 6 1
3 4 8 7 1
4 1 5 8 1
or
0
"""
if integer:
dtype=int
else:
dtype=float
l = self._read_line(f)
N = int(l)
rows = []
for i in range(N):
l = self._read_line(f)
row = nm.fromstring(l, sep=" ", dtype=dtype)
rows.append(row)
return nm.array(rows) | 0.007273 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'size') and self.size is not None:
_dict['size'] = self.size
if hasattr(self, 'hits') and self.hits is not None:
_dict['hits'] = self.hits._to_dict()
return _dict | 0.006061 |
def fd_weights_all(x, x0=0, n=1):
"""
Return finite difference weights for derivatives of all orders up to n.
Parameters
----------
x : vector, length m
x-coordinates for grid points
x0 : scalar
location where approximations are to be accurate
n : scalar integer
highest derivative that we want to find weights for
Returns
-------
weights : array, shape n+1 x m
contains coefficients for the j'th derivative in row j (0 <= j <= n)
Notes
-----
The x values can be arbitrarily spaced but must be distinct and len(x) > n.
The Fornberg algorithm is much more stable numerically than regular
vandermonde systems for large values of n.
See also
--------
fd_weights
References
----------
B. Fornberg (1998)
"Calculation of weights_and_points in finite difference formulas",
SIAM Review 40, pp. 685-691.
http://www.scholarpedia.org/article/Finite_difference_method
"""
m = len(x)
_assert(n < m, 'len(x) must be larger than n')
weights = np.zeros((m, n + 1))
_fd_weights_all(weights, x, x0, n)
return weights.T | 0.000861 |
def start(controller_class):
"""Start the Helper controller either in the foreground or as a daemon
process.
:param controller_class: The controller class handle to create and run
:type controller_class: callable
"""
args = parser.parse()
obj = controller_class(args, platform.operating_system())
if args.foreground:
try:
obj.start()
except KeyboardInterrupt:
obj.stop()
else:
try:
with platform.Daemon(obj) as daemon:
daemon.start()
except (OSError, ValueError) as error:
sys.stderr.write('\nError starting %s: %s\n\n' %
(sys.argv[0], error))
sys.exit(1) | 0.00137 |
def user_save_event(user):
""" Handle persist event for user entities """
msg = 'User ({}){} updated/saved'.format(user.id, user.email)
current_app.logger.info(msg) | 0.005682 |
def files(self):
""" Return the names of files to be created. """
files_description = [
[ self.project_name,
'bootstrap',
'BootstrapScriptFileTemplate' ],
[ self.project_name,
'CHANGES.txt',
'PythonPackageCHANGESFileTemplate' ],
[ self.project_name,
'LICENSE.txt',
'GPL3FileTemplate' ],
[ self.project_name,
'MANIFEST.in',
'PythonPackageMANIFESTFileTemplate' ],
[ self.project_name,
'README.txt',
'READMEReSTFileTemplate' ],
[ self.project_name,
'setup.py',
'PythonPackageSetupFileTemplate' ],
[ self.project_name + '/' + self.project_name.lower(),
'__init__.py',
None ],
[ self.project_name + '/docs',
'index.rst',
None ],
]
return files_description | 0.017964 |
def lag_calc(detections, detect_data, template_names, templates,
shift_len=0.2, min_cc=0.4, horizontal_chans=['E', 'N', '1', '2'],
vertical_chans=['Z'], cores=1, interpolate=False,
plot=False, parallel=True, debug=0):
"""
Main lag-calculation function for detections of specific events.
Overseer function to take a list of detection objects, cut the data for
them to lengths of the same length of the template + shift_len on
either side. This will output a :class:`obspy.core.event.Catalog` of
picked events. Pick times are based on the lag-times found at the maximum
correlation, providing that correlation is above the min_cc.
:type detections: list
:param detections:
List of :class:`eqcorrscan.core.match_filter.Detection` objects.
:type detect_data: obspy.core.stream.Stream
:param detect_data:
All the data needed to cut from - can be a gappy Stream.
:type template_names: list
:param template_names:
List of the template names, used to help identify families of events.
Must be in the same order as templates.
:type templates: list
:param templates:
List of the templates, templates must be a list of
:class:`obspy.core.stream.Stream` objects.
:type shift_len: float
:param shift_len:
Shift length allowed for the pick in seconds, will be plus/minus this
amount - default=0.2
:type min_cc: float
:param min_cc:
Minimum cross-correlation value to be considered a pick, default=0.4.
:type horizontal_chans: list
:param horizontal_chans:
List of channel endings for horizontal-channels, on which S-picks will
be made.
:type vertical_chans: list
:param vertical_chans:
List of channel endings for vertical-channels, on which P-picks will
be made.
:type cores: int
:param cores:
Number of cores to use in parallel processing, defaults to one.
:type interpolate: bool
:param interpolate:
Interpolate the correlation function to achieve sub-sample precision.
:type plot: bool
:param plot:
To generate a plot for every detection or not, defaults to False
:type parallel: bool
:param parallel: Turn parallel processing on or off.
:type debug: int
:param debug: Debug output level, 0-5 with 5 being the most output.
:returns:
Catalog of events with picks. No origin information is included.
These events can then be written out via
:func:`obspy.core.event.Catalog.write`, or to Nordic Sfiles using
:func:`eqcorrscan.utils.sfile_util.eventtosfile` and located
externally.
:rtype: obspy.core.event.Catalog
.. note::
Picks output in catalog are generated relative to the template
start-time. For example, if you generated your template with a
pre_pick time of 0.2 seconds, you should expect picks generated by
lag_calc to occur 0.2 seconds before the true phase-pick. This
is because we do not currently store template meta-data alongside the
templates.
.. warning::
Because of the above note, origin times will be consistently
shifted by the static pre_pick applied to the templates.
.. warning::
This routine requires only one template per channel (e.g. you should
not use templates with a P and S template on a single channel). If
this does occur an error will be raised.
.. note::
S-picks will be made on horizontal channels, and P picks made on
vertical channels - the default is that horizontal channels end in
one of: 'E', 'N', '1' or '2', and that vertical channels end in 'Z'.
The options vertical_chans and horizontal_chans can be changed to suit
your dataset.
.. note::
Individual channel cross-correlations are stored as a
:class:`obspy.core.event.Comment` for each pick, and the summed
cross-correlation value resulting from these is stored as a
:class:`obspy.core.event.Comment` in the main
:class:`obspy.core.event.Event` object.
.. note::
The order of events is preserved (e.g. detections[n] == output[n]),
providing picks have been made for that event. If no picks have
been made for an event, it will not be included in the output.
However, as each detection has an ID associated with it, these can
be mapped to the output resource_id for each Event in the output
Catalog. e.g.
detections[n].id == output[m].resource_id
if the output[m] is for the same event as detections[n].
"""
if debug > 2 and plot:
prep_plot = True
else:
prep_plot = False
# First check that sample rates are equal for everything
for tr in detect_data:
if tr.stats.sampling_rate != detect_data[0].stats.sampling_rate:
raise LagCalcError('Sampling rates are not equal')
for template in templates:
for tr in template:
if tr.stats.sampling_rate != detect_data[0].stats.sampling_rate:
raise LagCalcError('Sampling rates are not equal')
# Work out the delays for each template
delays = [] # List of tuples of (tempname, (sta, chan, delay))
zipped_templates = list(zip(template_names, templates))
detect_stachans = [(tr.stats.station, tr.stats.channel)
for tr in detect_data]
for template in zipped_templates:
temp_delays = {}
# Remove channels not present in continuous data
_template = template[1].copy()
for tr in _template:
if (tr.stats.station, tr.stats.channel) not in detect_stachans:
_template.remove(tr)
for tr in _template:
temp_delays.update(
{tr.stats.station + '.' + tr.stats.channel:
tr.stats.starttime -
_template.sort(['starttime'])[0].stats.starttime})
delays.append((template[0], temp_delays))
del _template
# Segregate detections by template, then feed to day_loop
initial_cat = Catalog()
for template in zipped_templates:
print('Running lag-calc for template %s' % template[0])
template_detections = [detection for detection in detections
if detection.template_name == template[0]]
t_delays = [d for d in delays if d[0] == template[0]][0][1]
# Check template-channels against triggered detection-channels. If the
# detection was made without template-channels that would have trig-
# gered earlier, then adjust the detection by that delay/earliness.
delaylist = list(t_delays.items())
delaylist.sort(key=lambda tup: tup[1])
for detection in template_detections:
# Find the channel with smallest delay on which the detection
# triggered. Use that delay to reduce the detection-time.
detection_stachans = list()
for stachan in detection.chans:
detection_stachans.append(stachan[0] + '.' + stachan[1])
# Find the earliest template-channel that triggered at detection
earlier = 0
for delay in delaylist:
delay_stachan = delay[0]
if delay_stachan in detection_stachans:
earlier = delay[1]
break
detection.detect_time = detection.detect_time - earlier
if earlier > 0:
print('Adjusting ' + detection.id + ' by ' + str(earlier))
debug_print(
'There are %i detections' % len(template_detections), 2, debug)
detect_streams = _prepare_data(
detect_data=detect_data, detections=template_detections,
template=template, delays=t_delays, shift_len=shift_len,
plot=prep_plot)
detect_streams = [detect_stream[1] for detect_stream in detect_streams]
if len(template_detections) > 0:
template_cat = _day_loop(
detection_streams=detect_streams, template=template[1],
min_cc=min_cc, detections=template_detections,
horizontal_chans=horizontal_chans,
vertical_chans=vertical_chans, interpolate=interpolate,
cores=cores, parallel=parallel, debug=debug)
initial_cat += template_cat
if plot:
for i, event in enumerate(template_cat):
if len(event.picks) == 0:
continue
plot_stream = detect_streams[i].copy()
template_plot = template[1].copy()
pick_stachans = [(pick.waveform_id.station_code,
pick.waveform_id.channel_code)
for pick in event.picks]
for tr in plot_stream:
if (tr.stats.station, tr.stats.channel) \
not in pick_stachans:
plot_stream.remove(tr)
for tr in template_plot:
if (tr.stats.station, tr.stats.channel) \
not in pick_stachans:
template_plot.remove(tr)
plot_repicked(template=template_plot, picks=event.picks,
det_stream=plot_stream)
# Order the catalogue to match the input
output_cat = Catalog()
for det in detections:
event = [e for e in initial_cat if str(e.resource_id) == str(det.id)]
if len(event) == 1:
output_cat.append(event[0])
elif len(event) == 0:
print('No picks made for detection: \n%s' % det.__str__())
else:
raise NotImplementedError('Multiple events with same id,'
' should not happen')
return output_cat | 0.0001 |
def my_func(version): # noqa: D202
"""Enclosing function."""
class MyClass(object):
"""Enclosed class."""
if version == 2:
import docs.support.python2_module as pm
else:
import docs.support.python3_module as pm
def __init__(self, value):
self._value = value
def _get_value(self):
return self._value
value = property(_get_value, pm._set_value, None, "Value property") | 0.002101 |
def _make(c):
"""
create html from template, adding figure,
annotation and sequences counts
"""
ann = defaultdict(list)
for pos in c['ann']:
for db in pos:
ann[db] += list(pos[db])
logger.debug(ann)
valid = [l for l in c['valid']]
ann_list = [", ".join(list(set(ann[feature]))) for feature in ann if feature in valid]
return valid, ann_list | 0.007444 |
def update(ctx, name, description, tags):
"""Update job.
Uses [Caching](/references/polyaxon-cli/#caching)
Example:
\b
```bash
$ polyaxon job -j 2 update --description="new description for my job"
```
"""
user, project_name, _job = get_job_or_local(ctx.obj.get('project'), ctx.obj.get('job'))
update_dict = {}
if name:
update_dict['name'] = name
if description:
update_dict['description'] = description
tags = validate_tags(tags)
if tags:
update_dict['tags'] = tags
if not update_dict:
Printer.print_warning('No argument was provided to update the job.')
sys.exit(0)
try:
response = PolyaxonClient().job.update_job(
user, project_name, _job, update_dict)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not update job `{}`.'.format(_job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success("Job updated.")
get_job_details(response) | 0.00273 |
def python_to_jupyter_cli(args=None, namespace=None):
"""Exposes the jupyter notebook renderer to the command line
Takes the same arguments as ArgumentParser.parse_args
"""
from . import gen_gallery # To avoid circular import
parser = argparse.ArgumentParser(
description='Sphinx-Gallery Notebook converter')
parser.add_argument('python_src_file', nargs='+',
help='Input Python file script to convert. '
'Supports multiple files and shell wildcards'
' (e.g. *.py)')
args = parser.parse_args(args, namespace)
for src_file in args.python_src_file:
file_conf, blocks = split_code_and_text_blocks(src_file)
print('Converting {0}'.format(src_file))
gallery_conf = copy.deepcopy(gen_gallery.DEFAULT_GALLERY_CONF)
example_nb = jupyter_notebook(blocks, gallery_conf)
save_notebook(example_nb, replace_py_ipynb(src_file)) | 0.001034 |
def help_text(self, name, text, text_kind='plain', trim_pfx=0):
"""
Provide help text for the user.
This method will convert the text as necessary with docutils and
display it in the WBrowser plugin, if available. If the plugin is
not available and the text is type 'rst' then the text will be
displayed in a plain text widget.
Parameters
----------
name : str
Category of help to show.
text : str
The text to show. Should be plain, HTML or RST text
text_kind : str (optional)
One of 'plain', 'html', 'rst'. Default is 'plain'.
trim_pfx : int (optional)
Number of spaces to trim off the beginning of each line of text.
"""
if trim_pfx > 0:
# caller wants to trim some space off the front
# of each line
text = toolbox.trim_prefix(text, trim_pfx)
if text_kind == 'rst':
# try to convert RST to HTML using docutils
try:
overrides = {'input_encoding': 'ascii',
'output_encoding': 'utf-8'}
text_html = publish_string(text, writer_name='html',
settings_overrides=overrides)
# docutils produces 'bytes' output, but webkit needs
# a utf-8 string
text = text_html.decode('utf-8')
text_kind = 'html'
except Exception as e:
self.logger.error("Error converting help text to HTML: %s" % (
str(e)))
# revert to showing RST as plain text
else:
raise ValueError(
"I don't know how to display text of kind '%s'" % (text_kind))
if text_kind == 'html':
self.help(text=text, text_kind='html')
else:
self.show_help_text(name, text) | 0.001019 |
def watch(self, filepath, func=None, delay=None, ignore=None):
"""Add the given filepath for watcher list.
Once you have intialized a server, watch file changes before
serve the server::
server.watch('static/*.stylus', 'make static')
def alert():
print('foo')
server.watch('foo.txt', alert)
server.serve()
:param filepath: files to be watched, it can be a filepath,
a directory, or a glob pattern
:param func: the function to be called, it can be a string of
shell command, or any callable object without
parameters
:param delay: Delay sending the reload message. Use 'forever' to
not send it. This is useful to compile sass files to
css, but reload on changed css files then only.
:param ignore: A function return True to ignore a certain pattern of
filepath.
"""
if isinstance(func, string_types):
cmd = func
func = shell(func)
func.name = "shell: {}".format(cmd)
self.watcher.watch(filepath, func, delay, ignore=ignore) | 0.001613 |
def rmdir(self, paths):
''' Delete a directory
:param paths: Paths to delete
:type paths: list
:returns: a generator that yields dictionaries
.. note: directories have to be empty.
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("rmdir: no path given")
processor = lambda path, node: self._handle_rmdir(path, node)
for item in self._find_items(paths, processor, include_toplevel=True):
if item:
yield item | 0.004808 |
def _manifest(self):
"""Return manifest content."""
if self._manifest_cache is None:
self._manifest_cache = self._storage_broker.get_manifest()
return self._manifest_cache | 0.009615 |
def fmt_duration(duration):
""" Format a duration value in seconds to a readable form.
"""
try:
return fmt.human_duration(float(duration), 0, 2, True)
except (ValueError, TypeError):
return "N/A".rjust(len(fmt.human_duration(0, 0, 2, True))) | 0.003663 |
async def observations(self):
"""Retrieve current weather observation."""
observations = []
raw_stations = await self.retrieve(url=API_OBSERVATION_STATIONS,
headers={'Referer': 'http://www.ipma.pt'})
if not raw_stations:
return observations
raw_observations = await self.retrieve(url=API_OBSERVATION_OBSERVATIONS,
headers={'Referer': 'http://www.ipma.pt'})
if not raw_observations:
return observations
Station = namedtuple('ObservationStation', ['latitude', 'longitude', 'stationID',
'stationName', 'currentObs'])
Observation = namedtuple('Observation', ['temperature', 'humidity',
'windspeed', 'winddirection',
'precipitation', 'pressure',
'description'])
last_observation = sorted(raw_observations.keys())[-1]
for station in raw_stations:
_station = raw_observations[last_observation][str(station.get('properties').get('idEstacao'))]
if _station is None:
continue
_observation = Observation(
_station['temperatura'],
_station['humidade'],
_station['intensidadeVentoKM'] if _station['intensidadeVentoKM'] != -99.0 else None,
WIND_DIRECTION[WIND_DIRECTION_ID[_station['idDireccVento']]],
_station['precAcumulada'] if _station['precAcumulada'] != -99.0 else None,
_station['pressao'] if _station['pressao'] != -99.0 else None,
"{} @ {}".format(station.get('properties').get('localEstacao'), last_observation),
)
_station = Station(
station.get('geometry').get('coordinates')[1],
station.get('geometry').get('coordinates')[0],
station.get('properties').get('idEstacao'),
station.get('properties').get('localEstacao'),
_observation)
observations.append(_station)
return observations | 0.005712 |
def flownet2_fusion(self, x):
"""
Architecture in Table 4 of FlowNet 2.0.
Args:
x: NCHW tensor, where C=11 is the concatenation of 7 items of [3, 2, 2, 1, 1, 1, 1] channels.
"""
with argscope([tf.layers.conv2d], activation=lambda x: tf.nn.leaky_relu(x, 0.1),
padding='valid', strides=2, kernel_size=3,
data_format='channels_first'), \
argscope([tf.layers.conv2d_transpose], padding='same', activation=tf.identity,
data_format='channels_first', strides=2, kernel_size=4):
conv0 = tf.layers.conv2d(pad(x, 1), 64, name='conv0', strides=1)
x = tf.layers.conv2d(pad(conv0, 1), 64, name='conv1')
conv1 = tf.layers.conv2d(pad(x, 1), 128, name='conv1_1', strides=1)
x = tf.layers.conv2d(pad(conv1, 1), 128, name='conv2')
conv2 = tf.layers.conv2d(pad(x, 1), 128, name='conv2_1', strides=1)
flow2 = tf.layers.conv2d(pad(conv2, 1), 2, name='predict_flow2', strides=1, activation=tf.identity)
flow2_up = tf.layers.conv2d_transpose(flow2, 2, name='upsampled_flow2_to_1')
x = tf.layers.conv2d_transpose(conv2, 32, name='deconv1', activation=lambda x: tf.nn.leaky_relu(x, 0.1))
concat1 = tf.concat([conv1, x, flow2_up], axis=1, name='concat1')
interconv1 = tf.layers.conv2d(pad(concat1, 1), 32, strides=1, name='inter_conv1', activation=tf.identity)
flow1 = tf.layers.conv2d(pad(interconv1, 1), 2, name='predict_flow1', strides=1, activation=tf.identity)
flow1_up = tf.layers.conv2d_transpose(flow1, 2, name='upsampled_flow1_to_0')
x = tf.layers.conv2d_transpose(concat1, 16, name='deconv0', activation=lambda x: tf.nn.leaky_relu(x, 0.1))
concat0 = tf.concat([conv0, x, flow1_up], axis=1, name='concat0')
interconv0 = tf.layers.conv2d(pad(concat0, 1), 16, strides=1, name='inter_conv0', activation=tf.identity)
flow0 = tf.layers.conv2d(pad(interconv0, 1), 2, name='predict_flow0', strides=1, activation=tf.identity)
return tf.identity(flow0, name='flow2') | 0.006422 |
def complement_alleles(self):
"""Complement the alleles of this variant.
This will call this module's `complement_alleles` function.
Note that this will not create a new object, but modify the state of
the current instance.
"""
self.alleles = self._encode_alleles(
[complement_alleles(i) for i in self.alleles]
) | 0.005222 |
def run_pandoc(text='', args=None):
"""
Low level function that calls Pandoc with (optionally)
some input text and/or arguments
"""
if args is None:
args = []
pandoc_path = which('pandoc')
if pandoc_path is None or not os.path.exists(pandoc_path):
raise OSError("Path to pandoc executable does not exists")
proc = Popen([pandoc_path] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=text.encode('utf-8'))
exitcode = proc.returncode
if exitcode != 0:
raise IOError(err)
return out.decode('utf-8') | 0.001664 |
def verify_ed25519_signature(public_key, contents, signature, message):
"""Verify that ``signature`` comes from ``public_key`` and ``contents``.
Args:
public_key (Ed25519PublicKey): the key to verify the signature
contents (bytes): the contents that was signed
signature (bytes): the signature to verify
message (str): the error message to raise.
Raises:
ScriptWorkerEd25519Error: on failure
"""
try:
public_key.verify(signature, contents)
except InvalidSignature as exc:
raise ScriptWorkerEd25519Error(message % {'exc': str(exc)}) | 0.001629 |
def _get_updated_environment(self, env_dict=None):
"""Returns globals environment with 'magic' variable
Parameters
----------
env_dict: Dict, defaults to {'S': self}
\tDict that maps global variable name to value
"""
if env_dict is None:
env_dict = {'S': self}
env = globals().copy()
env.update(env_dict)
return env | 0.004854 |
def parse_branchinglevel(self, branchinglevel_node, depth, validate):
"""
Parse one branching level.
:param branchinglevel_node:
``etree.Element`` object with tag "logicTreeBranchingLevel".
:param depth:
The sequential number of this branching level, based on 0.
:param validate:
Whether or not the branching level, its branchsets and their
branches should be validated.
Enumerates children branchsets and call :meth:`parse_branchset`,
:meth:`validate_branchset`, :meth:`parse_branches` and finally
:meth:`apply_branchset` for each.
Keeps track of "open ends" -- the set of branches that don't have
any child branchset on this step of execution. After processing
of every branching level only those branches that are listed in it
can have child branchsets (if there is one on the next level).
"""
new_open_ends = set()
branchsets = branchinglevel_node.nodes
for number, branchset_node in enumerate(branchsets):
branchset = self.parse_branchset(branchset_node, depth, number,
validate)
self.parse_branches(branchset_node, branchset, validate)
if self.root_branchset is None: # not set yet
self.num_paths = 1
self.root_branchset = branchset
else:
self.apply_branchset(branchset_node, branchset)
for branch in branchset.branches:
new_open_ends.add(branch)
self.num_paths *= len(branchset.branches)
if number > 0:
logging.warning('There is a branching level with multiple '
'branchsets in %s', self.filename)
self.open_ends.clear()
self.open_ends.update(new_open_ends) | 0.001059 |
def is_admin(self, send, nick, required_role='admin'):
"""Checks if a nick is a admin.
If NickServ hasn't responded yet, then the admin is unverified,
so assume they aren't a admin.
"""
# If the required role is None, bypass checks.
if not required_role:
return True
# Current roles are admin and owner, which is a superset of admin.
with self.db.session_scope() as session:
admin = session.query(orm.Permissions).filter(orm.Permissions.nick == nick).first()
if admin is None:
return False
# owner implies admin, but not the other way around.
if required_role == "owner" and admin.role != "owner":
return False
# no nickserv support, assume people are who they say they are.
if not self.config['feature'].getboolean('nickserv'):
return True
if not admin.registered:
self.update_authstatus(nick)
# We don't necessarily want to complain in all cases.
if send is not None:
send("Unverified admin: %s" % nick, target=self.config['core']['channel'])
return False
else:
if not self.features['account-notify']:
# reverify every 5min if we don't have the notification feature.
if datetime.now() - admin.time > timedelta(minutes=5):
self.update_authstatus(nick)
return True | 0.003193 |
def peek(self, default=None):
'''Returns `default` is there is no subsequent item'''
try:
result = self.pointer.next()
# immediately push it back onto the front of the iterable
self.pointer = itertools.chain([result], self.pointer)
return result
except StopIteration:
# nothing to put back; iterating doesn't change anything past the end
return default | 0.006696 |
def highlight_code(self, ontospy_entity):
"""
produce an html version of Turtle code with syntax highlighted
using Pygments CSS
"""
try:
pygments_code = highlight(ontospy_entity.rdf_source(),
TurtleLexer(), HtmlFormatter())
pygments_code_css = HtmlFormatter().get_style_defs('.highlight')
return {
"pygments_code": pygments_code,
"pygments_code_css": pygments_code_css
}
except Exception as e:
printDebug("Error: Pygmentize Failed", "red")
return {} | 0.00313 |
def _get_inference_input(self,
trans_inputs: List[TranslatorInput]) -> Tuple[mx.nd.NDArray,
int,
Optional[lexicon.TopKLexicon],
List[Optional[constrained.RawConstraintList]],
List[Optional[constrained.RawConstraintList]],
mx.nd.NDArray]:
"""
Assembles the numerical data for the batch. This comprises an NDArray for the source sentences,
the bucket key (padded source length), and a list of raw constraint lists, one for each sentence in the batch,
an NDArray of maximum output lengths for each sentence in the batch.
Each raw constraint list contains phrases in the form of lists of integers in the target language vocabulary.
:param trans_inputs: List of TranslatorInputs.
:return NDArray of source ids (shape=(batch_size, bucket_key, num_factors)),
bucket key, lexicon for vocabulary restriction, list of raw constraint
lists, and list of phrases to avoid, and an NDArray of maximum output
lengths.
"""
batch_size = len(trans_inputs)
bucket_key = data_io.get_bucket(max(len(inp.tokens) for inp in trans_inputs), self.buckets_source)
source = mx.nd.zeros((batch_size, bucket_key, self.num_source_factors), ctx=self.context)
restrict_lexicon = None # type: Optional[lexicon.TopKLexicon]
raw_constraints = [None] * batch_size # type: List[Optional[constrained.RawConstraintList]]
raw_avoid_list = [None] * batch_size # type: List[Optional[constrained.RawConstraintList]]
max_output_lengths = [] # type: List[int]
for j, trans_input in enumerate(trans_inputs):
num_tokens = len(trans_input)
max_output_lengths.append(self.models[0].get_max_output_length(data_io.get_bucket(num_tokens, self.buckets_source)))
source[j, :num_tokens, 0] = data_io.tokens2ids(trans_input.tokens, self.source_vocabs[0])
factors = trans_input.factors if trans_input.factors is not None else []
num_factors = 1 + len(factors)
if num_factors != self.num_source_factors:
logger.warning("Input %d factors, but model(s) expect %d", num_factors,
self.num_source_factors)
for i, factor in enumerate(factors[:self.num_source_factors - 1], start=1):
# fill in as many factors as there are tokens
source[j, :num_tokens, i] = data_io.tokens2ids(factor, self.source_vocabs[i])[:num_tokens]
# Check if vocabulary selection/restriction is enabled:
# - First, see if the translator input provides a lexicon (used for multiple lexicons)
# - If not, see if the translator itself provides a lexicon (used for single lexicon)
# - The same lexicon must be used for all inputs in the batch.
if trans_input.restrict_lexicon is not None:
if restrict_lexicon is not None and restrict_lexicon is not trans_input.restrict_lexicon:
logger.warning("Sentence %s: different restrict_lexicon specified, will overrule previous. "
"All inputs in batch must use same lexicon." % trans_input.sentence_id)
restrict_lexicon = trans_input.restrict_lexicon
elif self.restrict_lexicon is not None:
if isinstance(self.restrict_lexicon, dict):
# This code should not be reachable since the case is checked when creating
# translator inputs. It is included here to guarantee that the translator can
# handle any valid input regardless of whether it was checked at creation time.
logger.warning("Sentence %s: no restrict_lexicon specified for input when using multiple lexicons, "
"defaulting to first lexicon for entire batch." % trans_input.sentence_id)
restrict_lexicon = list(self.restrict_lexicon.values())[0]
else:
restrict_lexicon = self.restrict_lexicon
if trans_input.constraints is not None:
raw_constraints[j] = [data_io.tokens2ids(phrase, self.vocab_target) for phrase in
trans_input.constraints]
if trans_input.avoid_list is not None:
raw_avoid_list[j] = [data_io.tokens2ids(phrase, self.vocab_target) for phrase in
trans_input.avoid_list]
if any(self.unk_id in phrase for phrase in raw_avoid_list[j]):
logger.warning("Sentence %s: %s was found in the list of phrases to avoid; "
"this may indicate improper preprocessing.", trans_input.sentence_id, C.UNK_SYMBOL)
return source, bucket_key, restrict_lexicon, raw_constraints, raw_avoid_list, \
mx.nd.array(max_output_lengths, ctx=self.context, dtype='int32') | 0.007621 |
def update(self, friendly_name=values.unset, code_length=values.unset,
lookup_enabled=values.unset, skip_sms_to_landlines=values.unset,
dtmf_input_required=values.unset, tts_name=values.unset,
psd2_enabled=values.unset):
"""
Update the ServiceInstance
:param unicode friendly_name: A string to describe the verification service
:param unicode code_length: The length of the verification code to generate
:param bool lookup_enabled: Whether to perform a lookup with each verification
:param bool skip_sms_to_landlines: Whether to skip sending SMS verifications to landlines
:param bool dtmf_input_required: Whether to ask the user to press a number before delivering the verify code in a phone call
:param unicode tts_name: The name of an alternative text-to-speech service to use in phone calls
:param bool psd2_enabled: Whether to pass PSD2 transaction parameters when starting a verification
:returns: Updated ServiceInstance
:rtype: twilio.rest.verify.v2.service.ServiceInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
code_length=code_length,
lookup_enabled=lookup_enabled,
skip_sms_to_landlines=skip_sms_to_landlines,
dtmf_input_required=dtmf_input_required,
tts_name=tts_name,
psd2_enabled=psd2_enabled,
) | 0.008141 |
def _GetAppYamlHostname(application_path, open_func=open):
"""Build the hostname for this app based on the name in app.yaml.
Args:
application_path: A string with the path to the AppEngine application. This
should be the directory containing the app.yaml file.
open_func: Function to call to open a file. Used to override the default
open function in unit tests.
Returns:
A hostname, usually in the form of "myapp.appspot.com", based on the
application name in the app.yaml file. If the file can't be found or
there's a problem building the name, this will return None.
"""
try:
app_yaml_file = open_func(os.path.join(application_path or '.', 'app.yaml'))
config = yaml.safe_load(app_yaml_file.read())
except IOError:
# Couldn't open/read app.yaml.
return None
application = config.get('application')
if not application:
return None
if ':' in application:
# Don't try to deal with alternate domains.
return None
# If there's a prefix ending in a '~', strip it.
tilde_index = application.rfind('~')
if tilde_index >= 0:
application = application[tilde_index + 1:]
if not application:
return None
return '%s.appspot.com' % application | 0.011281 |
def create_analytic_backend(settings):
"""
Creates a new Analytics backend from the settings
:param settings: Dictionary of settings for the analytics backend
:returns: A backend object implementing the analytics api
>>>
>>> analytics = create_analytic({
>>> 'backend': 'analytics.backends.redis.Redis',
>>> 'settings': {
>>> 'defaults': {
>>> 'host': 'localhost',
>>> 'port': 6379,
>>> 'db': 0,
>>> },
>>> 'hosts': [{'db': 0}, {'db': 1}, {'host': 'redis.example.org'}]
>>> },
>>> })
"""
backend = settings.get('backend')
if isinstance(backend, basestring):
backend = import_string(backend)
elif backend:
backend = backend
else:
raise KeyError('backend')
return backend(settings.get("settings", {})) | 0.001129 |
def read_xml(self):
"""
read metadata from xml and set all the found properties.
:return: the root element of the xml
:rtype: ElementTree.Element
"""
with reading_ancillary_files(self):
root = super(ImpactLayerMetadata, self).read_xml()
if root is not None:
self._read_provenance_from_xml(root)
return root | 0.00495 |
def partition(self, dimension):
"""
Partition subspace into desired dimension.
:type dimension: int
:param dimension: Maximum dimension to use.
"""
# Take leftmost 'dimension' input basis vectors
for i, channel in enumerate(self.u):
if self.v[i].shape[1] < dimension:
raise IndexError('Channel is max dimension %s'
% self.v[i].shape[1])
self.data[i] = channel[:, 0:dimension]
self.dimension = dimension
return self | 0.003571 |
def minmax(low, high):
"""Test that the data items fall within range: low <= x <= high."""
def decorator(function):
"""Decorate a function with args."""
@functools.wraps(function)
def wrapper(*args, **kwargs):
"""Wrap the function."""
series = function(*args, **kwargs)
lo_pass = low <= series
hi_pass = series <= high
return lo_pass & hi_pass
return wrapper
return decorator | 0.002075 |
def tear_down(self):
"""
Tears down all temp files and directories.
"""
while len(self._temp_directories) > 0:
directory = self._temp_directories.pop()
shutil.rmtree(directory, ignore_errors=True)
while len(self._temp_files) > 0:
file = self._temp_files.pop()
try:
os.remove(file)
except OSError:
pass | 0.004619 |
def sync_readmes():
""" just copies README.md into README for pypi documentation """
print("syncing README")
with open("README.md", 'r') as reader:
file_text = reader.read()
with open("README", 'w') as writer:
writer.write(file_text) | 0.003774 |
def rename(client, old, new, force):
"""Rename the workflow named <old> to <new>."""
from renku.models.refs import LinkReference
LinkReference(client=client, name=_ref(old)).rename(_ref(new), force=force) | 0.00463 |
def quote(identifier,
html=HTML_STRING.match, valid_id=ID.match, dot_keywords=KEYWORDS):
"""Return DOT identifier from string, quote if needed.
>>> quote('')
'""'
>>> quote('spam')
'spam'
>>> quote('spam spam')
'"spam spam"'
>>> quote('-4.2')
'-4.2'
>>> quote('.42')
'.42'
>>> quote('<<b>spam</b>>')
'<<b>spam</b>>'
>>> quote(nohtml('<>'))
'"<>"'
"""
if html(identifier) and not isinstance(identifier, NoHtml):
pass
elif not valid_id(identifier) or identifier.lower() in dot_keywords:
return '"%s"' % identifier.replace('"', '\\"')
return identifier | 0.00152 |
def estimate_hessian(objective_func, parameters,
lower_bounds=None, upper_bounds=None,
step_ratio=2, nmr_steps=5,
max_step_sizes=None,
data=None, cl_runtime_info=None):
"""Estimate and return the upper triangular elements of the Hessian of the given function at the given parameters.
This calculates the Hessian using central difference (using a 2nd order Taylor expansion) with a Richardson
extrapolation over the proposed sequence of steps. If enough steps are given, we apply a Wynn epsilon extrapolation
on top of the Richardson extrapolated results. If more steps are left, we return the estimate with the lowest error,
taking into account outliers using a median filter.
The Hessian is evaluated at the steps:
.. math::
\quad ((f(x + d_j e_j + d_k e_k) - f(x + d_j e_j - d_k e_k)) -
(f(x - d_j e_j + d_k e_k) - f(x - d_j e_j - d_k e_k)) /
(4 d_j d_k)
where :math:`e_j` is a vector where element :math:`j` is one and the rest are zero
and :math:`d_j` is a scalar spacing :math:`steps_j`.
Steps are generated according to an exponentially diminishing ratio, defined as:
steps = max_step * step_ratio**-i, i = 0,1,..,nmr_steps-1.
Where the maximum step can be provided. For example, a maximum step of 2 with a step ratio of 2, computed for
4 steps gives: [2.0, 1.0, 0.5, 0.25]. If lower and upper bounds are given, we use as maximum step size the largest
step size that fits between the Hessian point and the boundaries.
The steps define the order of the estimation, with 2 steps resulting in a O(h^2) estimate, 3 steps resulting in a
O(h^4) estimate and 4 or more steps resulting in a O(h^6) derivative estimate.
Args:
objective_func (mot.lib.cl_function.CLFunction): The function we want to differentiate.
A CL function with the signature:
.. code-block:: c
double <func_name>(local const mot_float_type* const x, void* data);
The objective function has the same signature as the minimization function in MOT. For the numerical
hessian, the ``objective_list`` parameter is ignored.
parameters (ndarray): The parameters at which to evaluate the gradient. A (d, p) matrix with d problems,
and p parameters
lower_bounds (list or None): a list of length (p,) for p parameters with the lower bounds.
Each element of the list can be a scalar or a vector (of the same length as the number
of problem instances). To disable bounds for this parameter use -np.inf.
upper_bounds (list or None): a list of length (p,) for p parameters with the upper bounds.
Each element of the list can be a scalar or a vector (of the same length as the number
of problem instances). To disable bounds for this parameter use np.inf.
step_ratio (float): the ratio at which the steps diminish.
nmr_steps (int): the number of steps we will generate. We will calculate the derivative for each of these
step sizes and extrapolate the best step size from among them. The minimum number of steps is 1.
max_step_sizes (float or ndarray or None): the maximum step size, or the maximum step size per parameter.
If None is given, we use 0.1 for all parameters. If a float is given, we use that for all parameters.
If a list is given, it should be of the same length as the number of parameters.
data (mot.lib.kernel_data.KernelData): the user provided data for the ``void* data`` pointer.
cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information
Returns:
ndarray: per problem instance a vector with the upper triangular elements of the Hessian matrix.
This array can hold NaN's, for elements where the Hessian failed to approximate.
"""
if len(parameters.shape) == 1:
parameters = parameters[None, :]
nmr_voxels = parameters.shape[0]
nmr_params = parameters.shape[1]
nmr_derivatives = nmr_params * (nmr_params + 1) // 2
initial_step = _get_initial_step(parameters, lower_bounds, upper_bounds, max_step_sizes)
kernel_data = {
'parameters': Array(parameters, ctype='mot_float_type'),
'initial_step': Array(initial_step, ctype='float'),
'derivatives': Zeros((nmr_voxels, nmr_derivatives), 'double'),
'errors': Zeros((nmr_voxels, nmr_derivatives), 'double'),
'x_tmp': LocalMemory('mot_float_type', nmr_params),
'data': data,
'scratch': LocalMemory('double', nmr_steps + (nmr_steps - 1) + nmr_steps)
}
hessian_kernel = SimpleCLFunction.from_string('''
void _numdiff_hessian(
global mot_float_type* parameters,
global float* initial_step,
global double* derivatives,
global double* errors,
local mot_float_type* x_tmp,
void* data,
local double* scratch){
if(get_local_id(0) == 0){
for(uint i = 0; i < ''' + str(nmr_params) + '''; i++){
x_tmp[i] = parameters[i];
}
}
barrier(CLK_LOCAL_MEM_FENCE);
double f_x_input = ''' + objective_func.get_cl_function_name() + '''(x_tmp, data);
// upper triangle loop
uint coord_ind = 0;
for(int i = 0; i < ''' + str(nmr_params) + '''; i++){
for(int j = i; j < ''' + str(nmr_params) + '''; j++){
_numdiff_hessian_element(
data, x_tmp, f_x_input, i, j, initial_step,
derivatives + coord_ind, errors + coord_ind, scratch);
coord_ind++;
}
}
}
''', dependencies=[objective_func,
_get_numdiff_hessian_element_func(objective_func, nmr_steps, step_ratio)])
hessian_kernel.evaluate(kernel_data, nmr_voxels, use_local_reduction=True, cl_runtime_info=cl_runtime_info)
return kernel_data['derivatives'].get_data() | 0.005946 |
def _hide(self):
"""Hide the tray icon."""
self._icon.set_visible(False)
self._icon.disconnect(self._conn_left)
self._icon.disconnect(self._conn_right)
self._conn_left = None
self._conn_right = None | 0.00813 |
def ReturnLeasedCronJobs(self, jobs):
"""Makes leased cron jobs available for leasing again."""
errored_jobs = []
for returned_job in jobs:
existing_lease = self.cronjob_leases.get(returned_job.cron_job_id)
if existing_lease is None:
errored_jobs.append(returned_job)
continue
if (returned_job.leased_until != existing_lease[0] or
returned_job.leased_by != existing_lease[1]):
errored_jobs.append(returned_job)
continue
del self.cronjob_leases[returned_job.cron_job_id]
if errored_jobs:
raise ValueError("Some jobs could not be returned: %s" %
",".join(job.cron_job_id for job in errored_jobs)) | 0.009873 |
def vod_data(self, vid=None):
"""
Get the VOD data path and the default VOD ID
:return:
"""
page = self.session.http.get(self.url)
m = self._vod_re.search(page.text)
vod_data_url = m and urljoin(self.url, m.group(0))
if vod_data_url:
self.logger.debug("Found VOD data url: {0}", vod_data_url)
res = self.session.http.get(vod_data_url)
return self.session.http.json(res) | 0.004264 |
def _XYZvxvyvz(self,*args,**kwargs):
"""Calculate X,Y,Z,U,V,W"""
obs, ro, vo= self._parse_radec_kwargs(kwargs,vel=True)
thiso= self(*args,**kwargs)
if not len(thiso.shape) == 2: thiso= thiso.reshape((thiso.shape[0],1))
if len(thiso[:,0]) != 4 and len(thiso[:,0]) != 6: #pragma: no cover
raise AttributeError("orbit must track azimuth to use radeclbduvw functions")
elif len(thiso[:,0]) == 4: #planarOrbit
if isinstance(obs,(nu.ndarray,list)):
Xsun= nu.sqrt(obs[0]**2.+obs[1]**2.)
X,Y,Z = coords.galcencyl_to_XYZ(\
thiso[0,:],thiso[3,:]-nu.arctan2(obs[1],obs[0]),
nu.zeros_like(thiso[0]),
Xsun=Xsun/ro,Zsun=obs[2]/ro,_extra_rot=False).T
vX,vY,vZ = coords.galcencyl_to_vxvyvz(\
thiso[1,:],thiso[2,:],nu.zeros_like(thiso[0]),
thiso[3,:]-nu.arctan2(obs[1],obs[0]),
vsun=nu.array(# have to rotate
[obs[3]*obs[0]/Xsun+obs[4]*obs[1]/Xsun,
-obs[3]*obs[1]/Xsun+obs[4]*obs[0]/Xsun,
obs[5]])/vo,
Xsun=Xsun/ro,Zsun=obs[2]/ro,_extra_rot=False).T
else: #Orbit instance
obs.turn_physical_off()
if obs.dim() == 2:
X,Y,Z = coords.galcencyl_to_XYZ(\
thiso[0,:],thiso[3,:]-obs.phi(*args,**kwargs),
nu.zeros_like(thiso[0]),
Xsun=obs.R(*args,**kwargs),Zsun=0.,_extra_rot=False).T
vX,vY,vZ = coords.galcencyl_to_vxvyvz(\
thiso[1,:],thiso[2,:],nu.zeros_like(thiso[0]),
thiso[3,:]-obs.phi(*args,**kwargs),
vsun=nu.array([\
obs.vR(*args,**kwargs),obs.vT(*args,**kwargs),
0.]),
Xsun=obs.R(*args,**kwargs),Zsun=0.,_extra_rot=False).T
else:
X,Y,Z = coords.galcencyl_to_XYZ(\
thiso[0,:],thiso[3,:]-obs.phi(*args,**kwargs),
nu.zeros_like(thiso[0]),
Xsun=obs.R(*args,**kwargs),
Zsun=obs.z(*args,**kwargs),_extra_rot=False).T
vX,vY,vZ = coords.galcencyl_to_vxvyvz(\
thiso[1,:],thiso[2,:],nu.zeros_like(thiso[0]),
thiso[3,:]-obs.phi(*args,**kwargs),
vsun=nu.array([\
obs.vR(*args,**kwargs),
obs.vT(*args,**kwargs),
obs.vz(*args,**kwargs)]),
Xsun=obs.R(*args,**kwargs),
Zsun=obs.z(*args,**kwargs),_extra_rot=False).T
obs.turn_physical_on()
else: #FullOrbit
if isinstance(obs,(nu.ndarray,list)):
Xsun= nu.sqrt(obs[0]**2.+obs[1]**2.)
X,Y,Z = coords.galcencyl_to_XYZ(\
thiso[0,:],thiso[5,:]-nu.arctan2(obs[1],obs[0]),thiso[3,:],
Xsun=Xsun/ro,Zsun=obs[2]/ro).T
vX,vY,vZ = coords.galcencyl_to_vxvyvz(\
thiso[1,:],thiso[2,:],thiso[4,:],
thiso[5,:]-nu.arctan2(obs[1],obs[0]),
vsun=nu.array(# have to rotate
[obs[3]*obs[0]/Xsun+obs[4]*obs[1]/Xsun,
-obs[3]*obs[1]/Xsun+obs[4]*obs[0]/Xsun,
obs[5]])/vo,
Xsun=Xsun/ro,Zsun=obs[2]/ro).T
else: #Orbit instance
obs.turn_physical_off()
if obs.dim() == 2:
X,Y,Z = coords.galcencyl_to_XYZ(\
thiso[0,:],thiso[5,:]-obs.phi(*args,**kwargs),
thiso[3,:],
Xsun=obs.R(*args,**kwargs),Zsun=0.).T
vX,vY,vZ = coords.galcencyl_to_vxvyvz(\
thiso[1,:],thiso[2,:],thiso[4,:],
thiso[5,:]-obs.phi(*args,**kwargs),
vsun=nu.array([\
obs.vR(*args,**kwargs),obs.vT(*args,**kwargs),
0.]),
Xsun=obs.R(*args,**kwargs),Zsun=0.).T
else:
X,Y,Z = coords.galcencyl_to_XYZ(\
thiso[0,:],thiso[5,:]-obs.phi(*args,**kwargs),
thiso[3,:],
Xsun=obs.R(*args,**kwargs),
Zsun=obs.z(*args,**kwargs)).T
vX,vY,vZ = coords.galcencyl_to_vxvyvz(\
thiso[1,:],thiso[2,:],thiso[4,:],
thiso[5,:]-obs.phi(*args,**kwargs),
vsun=nu.array([\
obs.vR(*args,**kwargs),
obs.vT(*args,**kwargs),
obs.vz(*args,**kwargs)]),
Xsun=obs.R(*args,**kwargs),
Zsun=obs.z(*args,**kwargs)).T
obs.turn_physical_on()
return (X*ro,Y*ro,Z*ro,vX*vo,vY*vo,vZ*vo) | 0.034607 |
def _populate_union_type_attributes(self, env, data_type):
"""
Converts a forward reference of a union into a complete definition.
"""
parent_type = None
extends = data_type._ast_node.extends
if extends:
# A parent type must be fully defined and not just a forward
# reference.
parent_type = self._resolve_type(env, extends, True)
if isinstance(parent_type, Alias):
raise InvalidSpec(
'A union cannot extend an alias. '
'Use the canonical name instead.',
data_type._ast_node.lineno, data_type._ast_node.path)
if isinstance(parent_type, Nullable):
raise InvalidSpec(
'A union cannot extend a nullable type.',
data_type._ast_node.lineno, data_type._ast_node.path)
if not isinstance(parent_type, Union):
raise InvalidSpec(
'A union can only extend another union: '
'%s is not a union.' % quote(parent_type.name),
data_type._ast_node.lineno, data_type._ast_node.path)
api_type_fields = []
for stone_field in data_type._ast_node.fields:
if stone_field.name == 'other':
raise InvalidSpec(
"Union cannot define an 'other' field because it is "
"reserved as the catch-all field for open unions.",
stone_field.lineno, stone_field.path)
api_type_fields.append(self._create_union_field(env, stone_field))
catch_all_field = None
if data_type.closed:
if parent_type and not parent_type.closed:
# Due to the reversed super type / child type relationship for
# unions, a child type cannot be closed if its parent is open
# because the parent now has an extra field that is not
# recognized by the child if it were substituted in for it.
raise InvalidSpec(
"Union cannot be closed since parent type '%s' is open." % (
parent_type.name),
data_type._ast_node.lineno, data_type._ast_node.path)
else:
if not parent_type or parent_type.closed:
# Create a catch-all field
catch_all_field = UnionField(
name='other', data_type=Void(), doc=None,
ast_node=data_type._ast_node, catch_all=True)
api_type_fields.append(catch_all_field)
data_type.set_attributes(
data_type._ast_node.doc, api_type_fields, parent_type, catch_all_field) | 0.001453 |
def show_help(message=None):
"""Open an help message in the user's browser
:param message: An optional message object to display in the dialog.
:type message: Message.Message
"""
help_path = mktemp('.html')
with open(help_path, 'wb+') as f:
help_html = get_help_html(message)
f.write(help_html.encode('utf8'))
path_with_protocol = 'file://' + help_path
QDesktopServices.openUrl(QUrl(path_with_protocol)) | 0.002169 |
def parse(cls, parser, text, pos): # pylint: disable=W0613
"""Match simple values excluding some Keywords like 'and' and 'or'"""
if not text.strip():
return text, SyntaxError("Invalid value")
class Rule(object):
grammar = attr('value', SpiresSimpleValue), omit(re.compile(".*"))
try:
tree = pypeg2.parse(text, Rule, whitespace="")
except SyntaxError:
return text, SyntaxError("Expected %r" % cls)
else:
r = tree.value
if r.value.lower() in ('and', 'or', 'not'):
return text, SyntaxError("Invalid value %s" % r.value)
return text[len(r.value):], r | 0.002911 |
def touching(self, other):
""" Return true if this rectangle is touching the given shape. """
if self.top < other.bottom: return False
if self.bottom > other.top: return False
if self.left > other.right: return False
if self.right < other.left: return False
return True | 0.018809 |
def show(self):
"""Show the hidden spinner."""
thr_is_alive = self._spin_thread and self._spin_thread.is_alive()
if thr_is_alive and self._hide_spin.is_set():
# clear the hidden spinner flag
self._hide_spin.clear()
# clear the current line so the spinner is not appended to it
sys.stdout.write("\r")
self._clear_line() | 0.00495 |
def get_modules(folder, include_meta=False):
"""Finds modules (recursively) in folder
:param folder: root folder
:param include_meta: whether include meta files like (__init__ or
__version__)
:return: list of modules
"""
files = [
file
for file in _get_modules(folder)
if is_file(file) # just files
]
if not include_meta:
files = [
file
for file in files
if not Document(file).name.startswith("__")
]
return files | 0.001862 |
def open_file(link, session=None, stream=True):
"""
Open local or remote file for reading.
:type link: pip._internal.index.Link or str
:type session: requests.Session
:param bool stream: Try to stream if remote, default True
:raises ValueError: If link points to a local directory.
:return: a context manager to the opened file-like object
"""
if not isinstance(link, six.string_types):
try:
link = link.url_without_fragment
except AttributeError:
raise ValueError("Cannot parse url from unkown type: {0!r}".format(link))
if not is_valid_url(link) and os.path.exists(link):
link = path_to_url(link)
if is_file_url(link):
# Local URL
local_path = url_to_path(link)
if os.path.isdir(local_path):
raise ValueError("Cannot open directory for read: {}".format(link))
else:
with io.open(local_path, "rb") as local_file:
yield local_file
else:
# Remote URL
headers = {"Accept-Encoding": "identity"}
if not session:
from requests import Session
session = Session()
with session.get(link, headers=headers, stream=stream) as resp:
try:
raw = getattr(resp, "raw", None)
result = raw if raw else resp
yield result
finally:
if raw:
conn = getattr(raw, "_connection")
if conn is not None:
conn.close()
result.close() | 0.001252 |
def update(state, host, cache_time=None, touch_periodic=False):
'''
Updates apt repos.
+ cache_time: cache updates for this many seconds
+ touch_periodic: touch ``/var/lib/apt/periodic/update-success-stamp`` after update
'''
# If cache_time check when apt was last updated, prevent updates if within time
if cache_time:
# Ubuntu provides this handy file
cache_info = host.fact.file(APT_UPDATE_FILENAME)
# Time on files is not tz-aware, and will be the same tz as the server's time,
# so we can safely remove the tzinfo from host.fact.date before comparison.
host_cache_time = host.fact.date.replace(tzinfo=None) - timedelta(seconds=cache_time)
if cache_info and cache_info['mtime'] and cache_info['mtime'] > host_cache_time:
return
yield 'apt-get update'
# Some apt systems (Debian) have the /var/lib/apt/periodic directory, but
# don't bother touching anything in there - so pyinfra does it, enabling
# cache_time to work.
if cache_time:
yield 'touch {0}'.format(APT_UPDATE_FILENAME) | 0.006335 |
def get_abbr_impl():
"""Return abbreviated implementation name."""
impl = platform.python_implementation()
if impl == 'PyPy':
return 'pp'
elif impl == 'Jython':
return 'jy'
elif impl == 'IronPython':
return 'ip'
elif impl == 'CPython':
return 'cp'
raise LookupError('Unknown Python implementation: ' + impl) | 0.002717 |
def set_info_page(self):
"""Set current info_page."""
if self.info_page is not None:
self.infowidget.setHtml(
self.info_page,
QUrl.fromLocalFile(self.css_path)
) | 0.008368 |
def triplifyGML(dpath="../data/fb/",fname="foo.gdf",fnamei="foo_interaction.gdf",
fpath="./fb/",scriptpath=None,uid=None,sid=None,fb_link=None,ego=True,umbrella_dir=None):
"""Produce a linked data publication tree from a standard GML file.
INPUTS:
======
=> the data directory path
=> the file name (fname) of the friendship network
=> the file name (fnamei) of the interaction network
=> the final path (fpath) for the tree of files to be created
=> a path to the script that is calling this function (scriptpath)
=> the numeric id (uid) of the facebook user or group of the network(s)
=> the numeric id (sid) of the facebook user or group of the network (s)
=> the facebook link (fb_link) of the user or group
=> the network is from a user (ego==True) or a group (ego==False)
OUTPUTS:
=======
the tree in the directory fpath."""
c("iniciado tripgml")
if sum(c.isdigit() for c in fname)==4:
year=re.findall(r".*(\d\d\d\d).gml",fname)[0][0]
B.datetime_snapshot=datetime.date(*[int(i) for i in (year)])
if sum(c.isdigit() for c in fname)==12:
day,month,year,hour,minute=re.findall(r".*(\d\d)(\d\d)(\d\d\d\d)_(\d\d)(\d\d).gml",fname)[0]
B.datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)])
if sum(c.isdigit() for c in fname)==14:
day,month,year,hour,minute,second=re.findall(r".*(\d\d)(\d\d)(\d\d\d\d)_(\d\d)(\d\d)(\d\d).gml",fname)[0]
B.datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute,second)])
elif sum(c.isdigit() for c in fname)==8:
day,month,year=re.findall(r".*(\d\d)(\d\d)(\d\d\d\d).gml",fname)[0]
B.datetime_snapshot=datetime.date(*[int(i) for i in (year,month,day)])
B.datetime_snapshot_=datetime_snapshot.isoformat()
B.fname=fname
B.fnamei=fnamei
B.name=fname.replace(".gml","_gml")
if fnamei:
B.namei=fnamei[:-4]
B.ego=ego
B.friendship=bool(fname)
B.interaction=bool(fnamei)
B.sid=sid
B.uid=uid
B.scriptpath=scriptpath
B.fb_link=fb_link
B.dpath=dpath
B.fpath=fpath
B.prefix="https://raw.githubusercontent.com/OpenLinkedSocialData/{}master/".format(umbrella_dir)
B.umbrella_dir=umbrella_dir
c("antes de ler")
#fnet=S.fb.readGML(dpath+fname) # return networkx graph
fnet=S.fb.readGML2(dpath+fname) # return networkx graph
# return fnet
c("depois de ler, antes de fazer rdf")
fnet_=rdfFriendshipNetwork(fnet) # return rdflib graph
if B.interaction:
inet=S.fb.readGML(dpath+fnamei) # return networkx graph
inet_=rdfInteractionNetwork(inet) # return rdflib graph
else:
inet_=0
meta=makeMetadata(fnet_,inet_) # return rdflib graph with metadata about the structure
c("depois de rdf, escrita em disco")
writeAllFB(fnet_,inet_,meta) # write linked data tree
c("cabo") | 0.028078 |
def _parse_common(text, **options):
"""
Tries to parse the string as a common datetime format.
:param text: The string to parse.
:type text: str
:rtype: dict or None
"""
m = COMMON.match(text)
has_date = False
year = 0
month = 1
day = 1
if not m:
raise ParserError("Invalid datetime string")
if m.group("date"):
# A date has been specified
has_date = True
year = int(m.group("year"))
if not m.group("monthday"):
# No month and day
month = 1
day = 1
else:
if options["day_first"]:
month = int(m.group("day"))
day = int(m.group("month"))
else:
month = int(m.group("month"))
day = int(m.group("day"))
if not m.group("time"):
return date(year, month, day)
# Grabbing hh:mm:ss
hour = int(m.group("hour"))
minute = int(m.group("minute"))
if m.group("second"):
second = int(m.group("second"))
else:
second = 0
# Grabbing subseconds, if any
microsecond = 0
if m.group("subsecondsection"):
# Limiting to 6 chars
subsecond = m.group("subsecond")[:6]
microsecond = int("{:0<6}".format(subsecond))
if has_date:
return datetime(year, month, day, hour, minute, second, microsecond)
return time(hour, minute, second, microsecond) | 0.000689 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'score') and self.score is not None:
_dict['score'] = self.score
return _dict | 0.008889 |
def get_version(self, filename=None, version=-1):
"""Get a file from GridFS by ``"filename"``.
Returns a version of the file in GridFS whose filename matches
`filename` and whose metadata fields match the supplied keyword
arguments, as an instance of :class:`~gridfs.grid_file.GridOut`.
Version numbering is a convenience atop the GridFS API provided
by MongoDB. If more than one file matches the query (either by
`filename` alone, by metadata fields, or by a combination of
both), then version ``-1`` will be the most recently uploaded
matching file, ``-2`` the second most recently
uploaded, etc. Version ``0`` will be the first version
uploaded, ``1`` the second version, etc. So if three versions
have been uploaded, then version ``0`` is the same as version
``-3``, version ``1`` is the same as version ``-2``, and
version ``2`` is the same as version ``-1``. Note that searching by
random (unindexed) meta data is not supported here.
Raises :class:`~gridfs.errors.NoFile` if no such version of
that file exists.
:Parameters:
- `filename`: ``"filename"`` of the file to get, or `None`
- `version` (optional): version of the file to get (defaults
to -1, the most recent version uploaded)
"""
query = {"filename": filename}
skip = abs(version)
if version < 0:
skip -= 1
myorder = DESCENDING("uploadDate")
else:
myorder = ASCENDING("uploadDate")
def ok(cursor):
if cursor:
return GridOut(self.__collection, cursor[0])
raise NoFile("no version %d for filename %r" % (version, filename))
return self.__files.find(query, filter=filter.sort(myorder), limit=1, skip=skip)\
.addCallback(ok) | 0.002087 |
def save_model(self, request, obj, form, change):
"""
Set the ID of the parent page if passed in via querystring, and
make sure the new slug propagates to all descendant pages.
"""
if change and obj._old_slug != obj.slug:
# _old_slug was set in PageAdminForm.clean_slug().
new_slug = obj.slug or obj.generate_unique_slug()
obj.slug = obj._old_slug
obj.set_slug(new_slug)
# Force parent to be saved to trigger handling of ordering and slugs.
parent = request.GET.get("parent")
if parent is not None and not change:
obj.parent_id = parent
obj.save()
super(PageAdmin, self).save_model(request, obj, form, change) | 0.002653 |
def edit_distance(seq1, seq2, action_function=lowest_cost_action, test=operator.eq):
"""Computes the edit distance between the two given sequences.
This uses the relatively fast method that only constructs
two columns of the 2d array for edits. This function actually uses four columns
because we track the number of matches too.
"""
m = len(seq1)
n = len(seq2)
# Special, easy cases:
if seq1 == seq2:
return 0, n
if m == 0:
return n, 0
if n == 0:
return m, 0
v0 = [0] * (n + 1) # The two 'error' columns
v1 = [0] * (n + 1)
m0 = [0] * (n + 1) # The two 'match' columns
m1 = [0] * (n + 1)
for i in range(1, n + 1):
v0[i] = i
for i in range(1, m + 1):
v1[0] = i
for j in range(1, n + 1):
cost = 0 if test(seq1[i - 1], seq2[j - 1]) else 1
# The costs
ins_cost = v1[j - 1] + 1
del_cost = v0[j] + 1
sub_cost = v0[j - 1] + cost
# Match counts
ins_match = m1[j - 1]
del_match = m0[j]
sub_match = m0[j - 1] + int(not cost)
action = action_function(ins_cost, del_cost, sub_cost, ins_match,
del_match, sub_match, cost)
if action in [EQUAL, REPLACE]:
v1[j] = sub_cost
m1[j] = sub_match
elif action == INSERT:
v1[j] = ins_cost
m1[j] = ins_match
elif action == DELETE:
v1[j] = del_cost
m1[j] = del_match
else:
raise Exception('Invalid dynamic programming option returned!')
# Copy the columns over
for i in range(0, n + 1):
v0[i] = v1[i]
m0[i] = m1[i]
return v1[n], m1[n] | 0.001618 |
def image_export(self, image_name, dest_url, remote_host=None):
"""Export the image to the specified location
:param image_name: image name that can be uniquely identify an image
:param dest_url: the location of exported image, eg.
file:///opt/images/export.img, now only support export to remote server
or local server's file system
:param remote_host: the server that the image will be export to, if
remote_host is None, the image will be stored in the dest_path in
local server, the format is username@IP eg. [email protected]
:returns a dictionary that contains the exported image info
{
'image_name': the image_name that exported
'image_path': the image_path after exported
'os_version': the os version of the exported image
'md5sum': the md5sum of the original image
}
"""
try:
return self._imageops.image_export(image_name, dest_url,
remote_host)
except exception.SDKBaseException:
LOG.error("Failed to export image '%s'" % image_name)
raise | 0.001708 |
def _load_values(self, db_key: str) -> dict:
"""Load values from the db at the specified key, db_key.
FIXME(BMo): Could also be extended to load scalar types (instead of
just list and hash)
"""
if self._db.type(db_key) == 'list':
db_values = self._db.lrange(db_key, 0, -1)
for i, value in enumerate(db_values):
try:
db_values[i] = ast.literal_eval(value)
except SyntaxError:
pass
except ValueError:
pass
else: # self._db.type == 'hash'
db_values = self._db.hgetall(db_key)
for _key, _value in db_values.items():
try:
db_values[_key] = ast.literal_eval(_value)
except SyntaxError:
pass
except ValueError:
pass
return db_values | 0.002083 |
def get_version():
"""
Get version without importing from elasticapm. This avoids any side effects
from importing while installing and/or building the module
:return: a string, indicating the version
"""
version_file = open(os.path.join("elasticapm", "version.py"), encoding="utf-8")
for line in version_file:
if line.startswith("__version__"):
version_tuple = ast.literal_eval(line.split(" = ")[1])
return ".".join(map(str, version_tuple))
return "unknown" | 0.003839 |
def compute(self, pairs, x=None, x_link=None):
"""Return continuous random values for each record pair.
Parameters
----------
pairs : pandas.MultiIndex
A pandas MultiIndex with the record pairs to compare. The indices
in the MultiIndex are indices of the DataFrame(s) to link.
x : pandas.DataFrame
The DataFrame to link. If `x_link` is given, the comparing is a
linking problem. If `x_link` is not given, the problem is one of
deduplication.
x_link : pandas.DataFrame, optional
The second DataFrame.
Returns
-------
pandas.Series, pandas.DataFrame, numpy.ndarray
The result of comparing record pairs (the features). Can be
a tuple with multiple pandas.Series, pandas.DataFrame,
numpy.ndarray objects.
"""
df_empty = pd.DataFrame(index=pairs)
return self._compute(
tuple([df_empty]),
tuple([df_empty])
) | 0.001921 |
def is_on(self):
"""
Get sensor state.
Assume offline or open (worst case).
"""
if self._type == 'Occupancy':
return self.status not in CONST.STATUS_ONLINE
return self.status not in (CONST.STATUS_OFF, CONST.STATUS_OFFLINE,
CONST.STATUS_CLOSED) | 0.0059 |
def imbound(clspatch, *args, **kwargs):
"""
:param clspatch:
:param args:
:param kwargs:
:return:
"""
# todo : add example
c = kwargs.pop('color', kwargs.get('edgecolor', None))
kwargs.update(facecolor='none', edgecolor=c)
return impatch(clspatch, *args, **kwargs) | 0.003279 |
def has_any_role(*items):
r"""A :func:`.check` that is added that checks if the member invoking the
command has **any** of the roles specified. This means that if they have
one out of the three roles specified, then this check will return `True`.
Similar to :func:`.has_role`\, the names or IDs passed in must be exact.
This check raises one of two special exceptions, :exc:`.MissingAnyRole` if the user
is missing all roles, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1.0
Raise :exc:`.MissingAnyRole` or :exc:`.NoPrivateMessage`
instead of generic :exc:`.CheckFailure`
Parameters
-----------
items: List[Union[:class:`str`, :class:`int`]]
An argument list of names or IDs to check that the member has roles wise.
Example
--------
.. code-block:: python3
@bot.command()
@commands.has_any_role('Library Devs', 'Moderators', 492212595072434186)
async def cool(ctx):
await ctx.send('You are cool indeed')
"""
def predicate(ctx):
if not isinstance(ctx.channel, discord.abc.GuildChannel):
raise NoPrivateMessage()
getter = functools.partial(discord.utils.get, ctx.author.roles)
if any(getter(id=item) is not None if isinstance(item, int) else getter(name=item) is not None for item in items):
return True
raise MissingAnyRole(items)
return check(predicate) | 0.003927 |
def Majority(k, n):
"""Return a DataSet with n k-bit examples of the majority problem:
k random bits followed by a 1 if more than half the bits are 1, else 0."""
examples = []
for i in range(n):
bits = [random.choice([0, 1]) for i in range(k)]
bits.append(int(sum(bits) > k/2))
examples.append(bits)
return DataSet(name="majority", examples=examples) | 0.002538 |
def topDownCompute(self, encoded):
"""See the function description in base.py"""
return EncoderResult(value=0, scalar=0,
encoding=numpy.zeros(self.n)) | 0.005464 |
def move_optimizer_to_cuda(optimizer):
"""
Move the optimizer state to GPU, if necessary.
After calling, any parameter specific state in the optimizer
will be located on the same device as the parameter.
"""
for param_group in optimizer.param_groups:
for param in param_group['params']:
if param.is_cuda:
param_state = optimizer.state[param]
for k in param_state.keys():
if isinstance(param_state[k], torch.Tensor):
param_state[k] = param_state[k].cuda(device=param.get_device()) | 0.003339 |
def _rpc(self, method, *args):
"""Sends an rpc to the app.
Args:
method: str, The name of the method to execute.
args: any, The args of the method.
Returns:
The result of the rpc.
Raises:
ProtocolError: Something went wrong with the protocol.
ApiError: The rpc went through, however executed with errors.
"""
with self._lock:
apiid = next(self._counter)
data = {'id': apiid, 'method': method, 'params': args}
request = json.dumps(data)
self._client_send(request)
response = self._client_receive()
if not response:
raise ProtocolError(self._ad,
ProtocolError.NO_RESPONSE_FROM_SERVER)
result = json.loads(str(response, encoding='utf8'))
if result['error']:
raise ApiError(self._ad, result['error'])
if result['id'] != apiid:
raise ProtocolError(self._ad, ProtocolError.MISMATCHED_API_ID)
if result.get('callback') is not None:
if self._event_client is None:
self._event_client = self._start_event_client()
return callback_handler.CallbackHandler(
callback_id=result['callback'],
event_client=self._event_client,
ret_value=result['result'],
method_name=method,
ad=self._ad)
return result['result'] | 0.001332 |
def _convert(cls, other, ignoreScalars=False):
'''
:other: Point or point equivalent
:ignorescalars: optional boolean
:return: Point
Class private method for converting 'other' into a Point
subclasss. If 'other' already is a Point subclass, nothing
is done. If ignoreScalars is True and other is a float or int
type, a TypeError exception is raised.
'''
if ignoreScalars:
if isinstance(other, (int, float)):
msg = "unable to convert {} to {}".format(other, cls.__name__)
raise TypeError(msg)
return cls(other) if not issubclass(type(other), cls) else other | 0.002899 |
def _fuzzy_custom_query(issn, titles):
"""
Este metodo constroi a lista de filtros por título de periódico que
será aplicada na pesquisa boleana como match por similaridade "should".
A lista de filtros é coletada do template de pesquisa customizada
do periódico, quanto este template existir.
"""
custom_queries = journal_titles.load(issn).get('should', [])
titles = [{'title': i} for i in titles if i not in [x['title'] for x in custom_queries]]
titles.extend(custom_queries)
for item in titles:
if len(item['title'].strip()) == 0:
continue
query = {
"fuzzy": {
"reference_source_cleaned": {
"value": utils.cleanup_string(item['title']),
"fuzziness": item.get('fuzziness', 3),
"max_expansions": 50
}
}
}
yield query | 0.003906 |
def update_port(port, name, admin_state_up=True, profile=None):
'''
Updates a port
CLI Example:
.. code-block:: bash
salt '*' neutron.update_port port-name network-name new-port-name
:param port: Port name or ID
:param name: Name of this port
:param admin_state_up: Set admin state up to true or false,
default: true (Optional)
:param profile: Profile to build on (Optional)
:return: Value of updated port information
'''
conn = _auth(profile)
return conn.update_port(port, name, admin_state_up) | 0.001764 |
def CreateBlockDeviceMap(self, image_id, instance_type):
"""
If you launch without specifying a manual device block mapping, you may
not get all the ephemeral devices available to the given instance type.
This will build one that ensures all available ephemeral devices are
mapped.
"""
# get the block device mapping stored with the image
image = self.ec2.get_image(image_id)
block_device_map = image.block_device_mapping
assert(block_device_map)
# update it to include the ephemeral devices
# max is 4... is it an error for instances with fewer than 4 ?
# see: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/
# InstanceStorage.html#StorageOnInstanceTypes
ephemeral_device_names = ['/dev/sdb', '/dev/sdc', '/dev/sdd', '/dev/sde']
for i, device_name in enumerate(ephemeral_device_names):
name = 'ephemeral%d' % (i)
bdt = blockdevicemapping.BlockDeviceType(ephemeral_name = name)
block_device_map[device_name] = bdt
return block_device_map | 0.018484 |
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._state is not None:
return False
if self._time_start is not None:
return False
if self._time_end is not None:
return False
if self._error_message is not None:
return False
if self._scheduled_object is not None:
return False
if self._result_object is not None:
return False
if self._request_reference_split_the_bill is not None:
return False
return True | 0.00339 |
def removeApplicationManifest(self, pchApplicationManifestFullPath):
"""Removes an application manifest from the list to load when building the list of installed applications."""
fn = self.function_table.removeApplicationManifest
result = fn(pchApplicationManifestFullPath)
return result | 0.009375 |
def osCopy(self):
""" Triggers the OS "copy" keyboard shortcut """
k = Keyboard()
k.keyDown("{CTRL}")
k.type("c")
k.keyUp("{CTRL}") | 0.011696 |
def disease_comment(self, comment=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.DiseaseComment` objects in database
:param comment: Comment(s) to disease
:type comment: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.DiseaseComment`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.DiseaseComment`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.DiseaseComment)
q = self.get_model_queries(q, ((comment, models.DiseaseComment.comment),))
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | 0.005596 |
def makeArg(segID, N, CA, C, O, geo):
'''Creates an Arginie residue'''
##R-Group
CA_CB_length=geo.CA_CB_length
C_CA_CB_angle=geo.C_CA_CB_angle
N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle
CB_CG_length=geo.CB_CG_length
CA_CB_CG_angle= geo.CA_CB_CG_angle
N_CA_CB_CG_diangle=geo.N_CA_CB_CG_diangle
CG_CD_length=geo.CG_CD_length
CB_CG_CD_angle=geo.CB_CG_CD_angle
CA_CB_CG_CD_diangle=geo.CA_CB_CG_CD_diangle
CD_NE_length=geo.CD_NE_length
CG_CD_NE_angle=geo.CG_CD_NE_angle
CB_CG_CD_NE_diangle=geo.CB_CG_CD_NE_diangle
NE_CZ_length=geo.NE_CZ_length
CD_NE_CZ_angle=geo.CD_NE_CZ_angle
CG_CD_NE_CZ_diangle=geo.CG_CD_NE_CZ_diangle
CZ_NH1_length=geo.CZ_NH1_length
NE_CZ_NH1_angle=geo.NE_CZ_NH1_angle
CD_NE_CZ_NH1_diangle=geo.CD_NE_CZ_NH1_diangle
CZ_NH2_length=geo.CZ_NH2_length
NE_CZ_NH2_angle=geo.NE_CZ_NH2_angle
CD_NE_CZ_NH2_diangle=geo.CD_NE_CZ_NH2_diangle
carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle)
CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C")
carbon_g= calculateCoordinates(N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle)
CG= Atom("CG", carbon_g, 0.0, 1.0, " ", " CG", 0, "C")
carbon_d= calculateCoordinates(CA, CB, CG, CG_CD_length, CB_CG_CD_angle, CA_CB_CG_CD_diangle)
CD= Atom("CD", carbon_d, 0.0, 1.0, " ", " CD", 0, "C")
nitrogen_e= calculateCoordinates(CB, CG, CD, CD_NE_length, CG_CD_NE_angle, CB_CG_CD_NE_diangle)
NE= Atom("NE", nitrogen_e, 0.0, 1.0, " ", " NE", 0, "N")
carbon_z= calculateCoordinates(CG, CD, NE, NE_CZ_length, CD_NE_CZ_angle, CG_CD_NE_CZ_diangle)
CZ= Atom("CZ", carbon_z, 0.0, 1.0, " ", " CZ", 0, "C")
nitrogen_h1= calculateCoordinates(CD, NE, CZ, CZ_NH1_length, NE_CZ_NH1_angle, CD_NE_CZ_NH1_diangle)
NH1= Atom("NH1", nitrogen_h1, 0.0, 1.0, " ", " NH1", 0, "N")
nitrogen_h2= calculateCoordinates(CD, NE, CZ, CZ_NH2_length, NE_CZ_NH2_angle, CD_NE_CZ_NH2_diangle)
NH2= Atom("NH2", nitrogen_h2, 0.0, 1.0, " ", " NH2", 0, "N")
##Create Residue Data Structure
res= Residue((' ', segID, ' '), "ARG", ' ')
res.add(N)
res.add(CA)
res.add(C)
res.add(O)
res.add(CB)
res.add(CG)
res.add(CD)
res.add(NE)
res.add(CZ)
res.add(NH1)
res.add(NH2)
return res | 0.022563 |
def traverse(self):
"""Traverse the tree yielding the direction taken to a node, the
co-ordinates of that node and the directions leading from the Node.
Yields
------
(direction, (x, y), {:py:class:`~rig.routing_table.Routes`, ...})
Direction taken to reach a Node in the tree, the (x, y) co-ordinate
of that Node and routes leading to children of the Node.
"""
# A queue of (direction, node) to visit. The direction is the Links
# entry which describes the direction in which we last moved to reach
# the node (or None for the root).
to_visit = deque([(None, self)])
while to_visit:
direction, node = to_visit.popleft()
# Determine the set of directions we must travel to reach the
# children
out_directions = set()
for child_direction, child in node.children:
# Note that if the direction is unspecified, we simply
# (silently) don't add a route for that child.
if child_direction is not None:
out_directions.add(child_direction)
# Search the next steps of the route too
if isinstance(child, RoutingTree):
assert child_direction is not None
to_visit.append((child_direction, child))
# Yield the information pertaining to this Node
yield direction, node.chip, out_directions | 0.00132 |
def _print_base64(self, base64_data):
"""
Pipe the binary directly to the label printer. Works under Linux
without requiring PySerial. This is not typically something you
should call directly, unless you have special needs.
@type base64_data: L{str}
@param base64_data: The base64 encoded string for the label to print.
"""
label_file = open(self.device, "w")
label_file.write(base64_data)
label_file.close() | 0.006024 |
def _add_qualified_edge_helper(self, u, v, relation, annotations, subject_modifier, object_modifier) -> str:
"""Add a qualified edge from the internal aspects of the parser."""
return self.graph.add_qualified_edge(
u,
v,
relation=relation,
evidence=self.control_parser.evidence,
citation=self.control_parser.citation.copy(),
annotations=annotations,
subject_modifier=subject_modifier,
object_modifier=object_modifier,
**{LINE: self.get_line_number()}
) | 0.005137 |
def refresh_from_pdb(self, pdb_state):
"""
Refresh Variable Explorer and Editor from a Pdb session,
after running any pdb command.
See publish_pdb_state and notify_spyder in spyder_kernels
"""
if 'step' in pdb_state and 'fname' in pdb_state['step']:
fname = pdb_state['step']['fname']
lineno = pdb_state['step']['lineno']
self.sig_pdb_step.emit(fname, lineno)
if 'namespace_view' in pdb_state:
self.sig_namespace_view.emit(ast.literal_eval(
pdb_state['namespace_view']))
if 'var_properties' in pdb_state:
self.sig_var_properties.emit(ast.literal_eval(
pdb_state['var_properties'])) | 0.002674 |
def go_to(self, url_or_text):
"""Go to page utl."""
if is_text_string(url_or_text):
url = QUrl(url_or_text)
else:
url = url_or_text
self.notebookwidget.load(url) | 0.009217 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.