text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def sprinter(self):
"""
Called when parallelize is True.
This function will generate the file names in a directory tree by adding directories to a Queue and
continuously exploring directories in the Queue until Queue is emptied.
Significantly faster than crawler method for larger directory trees.
"""
self._printer('Multiprocess Walk')
# Loop through directories in case there is more than one (1)
for directory in self.directory:
self._get_root_files(directory) # Add file within root directory if filepaths is empty
# acquire the list of paths
first_level_dirs = next(os.walk(directory))[1]
for path in first_level_dirs:
self.unsearched.put((directory, path))
self._printer('Pool Processing STARTED')
pool = Pool(self.pool_size)
pool.map_async(self.parallel_worker, range(self.pool_size))
pool.close()
self.unsearched.join()
self._printer('Pool Processing ENDED')
return self.filepaths | 0.0037 |
def texture_array(self, size, components, data=None, *, alignment=1, dtype='f1') -> 'TextureArray':
'''
Create a :py:class:`TextureArray` object.
Args:
size (tuple): The ``(width, height, layers)`` of the texture.
components (int): The number of components 1, 2, 3 or 4.
data (bytes): Content of the texture. The size must be ``(width, height * layers)``
so each layer is stacked vertically.
Keyword Args:
alignment (int): The byte alignment 1, 2, 4 or 8.
dtype (str): Data type.
Returns:
:py:class:`Texture3D` object
'''
res = TextureArray.__new__(TextureArray)
res.mglo, res._glo = self.mglo.texture_array(size, components, data, alignment, dtype)
res._size = size
res._components = components
res._dtype = dtype
res.ctx = self
res.extra = None
return res | 0.004926 |
def add_event_handler(self, action):
"""Add a event handler into actions list
:param action: event handler to add
:type action: alignak.eventhandler.EventHandler
:return: None
"""
if action.uuid in self.actions:
logger.info("Already existing event handler: %s", action)
return
self.actions[action.uuid] = action
self.nb_event_handlers += 1 | 0.004662 |
def load_hdf(cls, filename, path='', ic=None):
"""
Loads stored ObservationTree from file.
You can provide the isochrone to use; or it will default to MIST
TODO: saving and loading must be fixed! save ic type, bands, etc.
"""
store = pd.HDFStore(filename)
try:
samples = store[path+'/df']
attrs = store.get_storer(path+'/df').attrs
except:
store.close()
raise
df = store[path+'/df']
new = cls.from_df(df)
if ic is None:
ic = get_ichrone('mist')
new.define_models(ic, N=attrs.N, index=attrs.index)
new.spectroscopy = attrs.spectroscopy
new.parallax = attrs.parallax
store.close()
return new | 0.003836 |
def __select_autocomplete_identities(self, sources):
"""Select the identities used for autocompleting"""
MIN_PRIORITY = 99999999
checked = {}
for source in sources:
uids = api.unique_identities(self.db, source=source)
for uid in uids:
if uid.uuid in checked:
continue
max_priority = MIN_PRIORITY
selected = []
for identity in sorted(uid.identities, key=lambda x: x.id):
try:
priority = sources.index(identity.source)
if priority < max_priority:
selected = [identity]
max_priority = priority
elif priority == max_priority:
selected.append(identity)
except ValueError:
continue
checked[uid.uuid] = selected
identities = collections.OrderedDict(sorted(checked.items(),
key=lambda t: t[0]))
return identities | 0.001738 |
def clear_last_check(self):
"""Clear the checksum of the file."""
with db.session.begin_nested():
self.last_check = None
self.last_check_at = datetime.utcnow()
return self | 0.009132 |
def initialize(
self, config_file: str = "bmi_config.txt", initialize_indicators=True
):
""" Initialize the executable AnalysisGraph with a config file.
Args:
config_file
Returns:
AnalysisGraph
"""
self.t = 0.0
if not os.path.isfile(config_file):
self.create_bmi_config_file(config_file)
self.s0 = [
pd.read_csv(
config_file, index_col=0, header=None, error_bad_lines=False
)[1]
for _ in range(self.res)
]
self.s0_original = self.s0[0].copy(deep=True)
self.latent_state_vector = self.construct_default_initial_state()
for n in self.nodes(data=True):
rv = LatentVar(n[0])
n[1]["rv"] = rv
n[1]["update_function"] = self.default_update_function
rv.dataset = [1.0 for _ in range(self.res)]
rv.partial_t = self.s0[0][f"∂({n[0]})/∂t"]
if initialize_indicators:
for indicator in n[1]["indicators"].values():
indicator.samples = np.random.normal(
indicator.mean * np.array(n[1]["rv"].dataset),
scale=0.01,
) | 0.002366 |
def collate(self, graph):
"""
:type graph: VariantGraph
"""
# prepare the token index
self.token_index.prepare()
self.vertex_array = [None] * len(self.token_index.token_array)
# Build the variant graph for the first witness
# this is easy: generate a vertex for every token
first_witness = self.collation.witnesses[0]
tokens = first_witness.tokens()
token_to_vertex = self.merge(graph, first_witness.sigil, tokens)
# print("> token_to_vertex=", token_to_vertex)
self.update_token_position_to_vertex(token_to_vertex)
self.update_token_to_vertex_array(tokens, first_witness, self.token_position_to_vertex)
# align witness 2 - n
for x in range(1, len(self.collation.witnesses)):
witness = self.collation.witnesses[x]
tokens = witness.tokens()
# print("\nwitness", witness.sigil)
variant_graph_ranking = VariantGraphRanking.of(graph)
# print("> x =", x, ", variant_graph_ranking =", variant_graph_ranking.byRank)
variant_graph_ranks = list(set(map(lambda v: variant_graph_ranking.byVertex.get(v), graph.vertices())))
# we leave in the rank of the start vertex, but remove the rank of the end vertex
variant_graph_ranks.pop()
# now the vertical stuff
tokens_as_index_list = self.as_index_list(tokens)
match_cube = MatchCube(self.token_index, witness, self.vertex_array, variant_graph_ranking,
self.properties_filter)
# print("> match_cube.matches=", match_cube.matches)
self.fill_needleman_wunsch_table(variant_graph_ranks, tokens_as_index_list, match_cube)
aligned = self.align_matching_tokens(match_cube)
# print("> aligned=", aligned)
# print("self.token_index.token_array=", self.token_index.token_array)
# alignment = self.align_function(superbase, next_witness, token_to_vertex, match_cube)
# merge
witness_token_to_generated_vertex = self.merge(graph, witness.sigil, witness.tokens(), aligned)
# print("> witness_token_to_generated_vertex =", witness_token_to_generated_vertex)
token_to_vertex.update(witness_token_to_generated_vertex)
# print("> token_to_vertex =", token_to_vertex)
self.update_token_position_to_vertex(token_to_vertex, aligned)
witness_token_position_to_vertex = {}
for p in self.token_index.get_range_for_witness(witness.sigil):
# print("> p= ", p)
witness_token_position_to_vertex[p] = self.token_position_to_vertex[p]
self.update_token_to_vertex_array(tokens, witness, witness_token_position_to_vertex)
# print("> vertex_array =", self.vertex_array)
# print("actual")
# self._debug_edit_graph_table(self.table)
# print("expected")
# self._debug_edit_graph_table(self.table2)
# change superbase
# superbase = self.new_superbase
if self.detect_transpositions:
detector = TranspositionDetection(self)
detector.detect() | 0.004202 |
def delete_event_ticket_class(self, id, ticket_class_id, **data):
"""
DELETE /events/:id/ticket_classes/:ticket_class_id/
Deletes the ticket class. Returns ``{"deleted": true}``.
"""
return self.delete("/events/{0}/ticket_classes/{0}/".format(id,ticket_class_id), data=data) | 0.01548 |
def m_b(mbmb, scale, f, alphasMZ=0.1185, loop=3):
r"""Get running b quark mass in the MSbar scheme at the scale `scale`
in the theory with `f` dynamical quark flavours starting from $m_b(m_b)$"""
if scale == mbmb and f == 5:
return mbmb # nothing to do
_sane(scale, f)
alphas_mb = alpha_s(mbmb, 5, alphasMZ=alphasMZ, loop=loop)
crd = rundec.CRunDec()
if f == 5:
alphas_scale = alpha_s(scale, f, alphasMZ=alphasMZ, loop=loop)
return crd.mMS2mMS(mbmb, alphas_mb, alphas_scale, f, loop)
elif f == 4:
crd.nfMmu.Mth = 4.8
crd.nfMmu.muth = 4.8
crd.nfMmu.nf = 5
return crd.mH2mL(mbmb, alphas_mb, mbmb, crd.nfMmu, scale, loop)
elif f == 3:
mc = 1.3
crd.nfMmu.Mth = 4.8
crd.nfMmu.muth = 4.8
crd.nfMmu.nf = 5
mbmc = crd.mH2mL(mbmb, alphas_mb, mbmb, crd.nfMmu, mc, loop)
crd.nfMmu.Mth = mc
crd.nfMmu.muth = mc
crd.nfMmu.nf = 4
alphas_mc = alpha_s(mc, 4, alphasMZ=alphasMZ, loop=loop)
return crd.mH2mL(mbmc, alphas_mc, mc, crd.nfMmu, scale, loop)
elif f == 6:
crd.nfMmu.Mth = 170
crd.nfMmu.muth = 170
crd.nfMmu.nf = 6
return crd.mL2mH(mbmb, alphas_mb, mbmb, crd.nfMmu, scale, loop)
else:
raise ValueError("Invalid input: f={}, scale={}".format(f, scale)) | 0.000734 |
def _get_pwned(prefix):
"""
Fetches a dict of all hash suffixes from Pwned Passwords for a
given SHA-1 prefix.
"""
try:
response = requests.get(
url=API_ENDPOINT.format(prefix),
headers={'User-Agent': USER_AGENT},
timeout=getattr(
settings,
'PWNED_PASSWORDS_API_TIMEOUT',
REQUEST_TIMEOUT,
),
)
response.raise_for_status()
except requests.RequestException as e:
# Gracefully handle timeouts and HTTP error response codes.
log.warning(
'Skipped Pwned Passwords check due to error: %r', e
)
return None
results = {}
for line in response.text.splitlines():
line_suffix, _, times = line.partition(':')
results[line_suffix] = int(times)
return results | 0.001157 |
def save(self, *args):
""" Save cache to file using pickle.
Parameters
----------
*args:
All but the last argument are inputs to the cached function. The
last is the actual value of the function.
"""
with open(self.file_root + '.pkl', "wb") as f:
pickle.dump(args, f, protocol=pickle.HIGHEST_PROTOCOL) | 0.005181 |
def find_repositories_by_walking_and_following_symlinks(path):
"""Walk a tree and return a sequence of (directory, dotdir) pairs."""
repos = []
# This is for detecting symlink loops and escaping them. This is similar to
# http://stackoverflow.com/questions/36977259/avoiding-infinite-recursion-with-os-walk/36977656#36977656
def inode(path):
stats = os.stat(path)
return stats.st_dev, stats.st_ino
seen_inodes = {inode(path)}
for dirpath, dirnames, filenames in os.walk(path, followlinks=True):
inodes = [inode(os.path.join(dirpath, p)) for p in dirnames]
dirnames[:] = [p for p, i in zip(dirnames, inodes)
if not i in seen_inodes]
seen_inodes.update(inodes)
for dotdir in set(dirnames) & DOTDIRS:
repos.append((dirpath, dotdir))
return repos | 0.002326 |
def filter_image_sep2d(image, fh, fv, impl='numpy', padding=None):
"""Filter an image with a separable filter.
Parameters
----------
image : 2D array-like
The image to be filtered. It must have a real (vs. complex) dtype.
fh, fv : 1D array-like
Horizontal (axis 0) and vertical (axis 1) filters. Their sizes
can be at most the image sizes in the respective axes.
impl : {'numpy', 'pyfftw'}, optional
FFT backend to use. The ``pyfftw`` backend requires the
``pyfftw`` package to be installed. It is usually significantly
faster than the NumPy backend.
padding : positive int, optional
Amount of zeros added to the left and right of the image in all
axes before FFT. This helps avoiding wraparound artifacts due to
large boundary values.
For ``None``, the padding is computed as ::
padding = min(max(len(fh), len(fv)) - 1, 64)
A padding of ``len(filt) - 1`` ensures that errors in FFT-based
convolutions are small. At the same time, the padding should not
be excessive to retain efficiency.
Returns
-------
filtered : 2D `numpy.ndarray`
The image filtered horizontally by ``fh`` and vertically by ``fv``.
It has the same shape as ``image``, and its dtype is
``np.result_type(image, fh, fv)``.
"""
# TODO: generalize for nD
impl, impl_in = str(impl).lower(), impl
if impl not in ('numpy', 'pyfftw'):
raise ValueError('`impl` {!r} not understood'
''.format(impl_in))
image = np.asarray(image)
if image.ndim != 2:
raise ValueError('`image` must be 2-dimensional, got image with '
'ndim={}'.format(image.ndim))
if image.size == 0:
raise ValueError('`image` cannot have size 0')
if not np.issubsctype(image.dtype, np.floating):
image = image.astype(float)
fh = np.asarray(fh).astype(image.dtype)
if fh.ndim != 1:
raise ValueError('`fh` must be one-dimensional')
elif fh.size == 0:
raise ValueError('`fh` cannot have size 0')
elif fh.size > image.shape[0]:
raise ValueError('`fh` can be at most `image.shape[0]`, got '
'{} > {}'.format(fh.size, image.shape[0]))
fv = np.asarray(fv).astype(image.dtype)
if fv.ndim != 1:
raise ValueError('`fv` must be one-dimensional')
elif fv.size == 0:
raise ValueError('`fv` cannot have size 0')
elif fv.size > image.shape[0]:
raise ValueError('`fv` can be at most `image.shape[1]`, got '
'{} > {}'.format(fv.size, image.shape[1]))
# Pad image with zeros
if padding is None:
padding = min(max(len(fh), len(fv)) - 1, 64)
if padding != 0:
image_padded = np.pad(image, padding, mode='constant')
else:
image_padded = image.copy() if impl == 'pyfftw' else image
# Prepare filters for the convolution
def prepare_for_fft(filt, n_new):
"""Return padded and shifted filter ready for FFT.
The filter is padded with zeros to the new size, and then shifted
such that such that the middle element of old filter, i.e., the
one at index ``(len(filt) - 1) // 2`` ends up at index 0.
"""
mid = (len(filt) - 1) // 2
padded = np.zeros(n_new, dtype=filt.dtype)
padded[:len(filt) - mid] = filt[mid:]
padded[len(padded) - mid:] = filt[:mid]
return padded
fh = prepare_for_fft(fh, image_padded.shape[0])
fv = prepare_for_fft(fv, image_padded.shape[1])
# Perform the multiplication in Fourier space and apply inverse FFT
if impl == 'numpy':
image_ft = np.fft.rfftn(image_padded)
fh_ft = np.fft.fft(fh)
fv_ft = np.fft.rfft(fv)
image_ft *= fh_ft[:, None]
image_ft *= fv_ft[None, :]
# Important to specify the shape since `irfftn` cannot know the
# original shape
conv = np.fft.irfftn(image_ft, s=image_padded.shape)
if conv.dtype != image.dtype:
conv = conv.astype(image.dtype)
elif impl == 'pyfftw':
if not PYFFTW_AVAILABLE:
raise ValueError(
'`pyfftw` package is not available; you need to install it '
'to use the pyfftw backend')
import pyfftw
import multiprocessing
# Generate output arrays, for half-complex transform of image and
# vertical filter, and full FT of the horizontal filter
out_img_shape = (image_padded.shape[0], image_padded.shape[1] // 2 + 1)
out_img_dtype = np.result_type(image_padded, 1j)
out_img = np.empty(out_img_shape, out_img_dtype)
out_fh_shape = out_img_shape[0]
out_fh_dtype = np.result_type(fh, 1j)
fh_c = fh.astype(out_fh_dtype) # need to make this a C2C trafo
out_fh = np.empty(out_fh_shape, out_fh_dtype)
out_fv_shape = out_img_shape[1]
out_fv_dtype = np.result_type(fv, 1j)
out_fv = np.empty(out_fv_shape, out_fv_dtype)
# Perform the forward transforms of image and filters. We use
# the `FFTW_ESTIMATE` flag to not allow the planner to destroy
# the input.
plan = pyfftw.FFTW(image_padded, out_img, axes=(0, 1),
direction='FFTW_FORWARD',
flags=['FFTW_ESTIMATE'],
threads=multiprocessing.cpu_count())
plan(image_padded, out_img)
plan = pyfftw.FFTW(fh_c, out_fh, axes=(0,),
direction='FFTW_FORWARD',
flags=['FFTW_ESTIMATE'],
threads=multiprocessing.cpu_count())
plan(fh_c, out_fh)
plan = pyfftw.FFTW(fv, out_fv, axes=(0,),
direction='FFTW_FORWARD',
flags=['FFTW_ESTIMATE'],
threads=multiprocessing.cpu_count())
plan(fv, out_fv)
# Fourier space multiplication
out_img *= out_fh[:, None]
out_img *= out_fv[None, :]
# Inverse trafo
conv = image_padded # Overwrite
plan = pyfftw.FFTW(out_img.copy(), conv, axes=(0, 1),
direction='FFTW_BACKWARD',
flags=['FFTW_ESTIMATE'],
threads=multiprocessing.cpu_count())
plan(out_img, conv)
else:
raise ValueError('unsupported `impl` {!r}'.format(impl_in))
if padding:
return conv[padding:-padding, padding:-padding]
else:
return conv | 0.000151 |
def print_location(proc_obj):
"""Show where we are. GUI's and front-end interfaces often
use this to update displays. So it is helpful to make sure
we give at least some place that's located in a file.
"""
i_stack = proc_obj.curindex
if i_stack is None or proc_obj.stack is None:
return False
core_obj = proc_obj.core
dbgr_obj = proc_obj.debugger
intf_obj = dbgr_obj.intf[-1]
# Evaluation routines like "exec" don't show useful location
# info. In these cases, we will use the position before that in
# the stack. Hence the looping below which in practices loops
# once and sometimes twice.
remapped_file = None
source_text = None
while i_stack >= 0:
frame_lineno = proc_obj.stack[i_stack]
i_stack -= 1
frame, lineno = frame_lineno
# # Next check to see that local variable breadcrumb exists and
# # has the magic dynamic value.
# # If so, it's us and we don't normally show this.a
# if 'breadcrumb' in frame.f_locals:
# if self.run == frame.f_locals['breadcrumb']:
# break
filename = Mstack.frame2file(core_obj, frame, canonic=False)
if '<string>' == filename and dbgr_obj.eval_string:
remapped_file = filename
filename = pyficache.unmap_file(filename)
if '<string>' == filename:
remapped = cmdfns.source_tempfile_remap('eval_string',
dbgr_obj.eval_string)
pyficache.remap_file(filename, remapped)
filename = remapped
lineno = pyficache.unmap_file_line(filename, lineno)
pass
pass
elif '<string>' == filename:
source_text = deparse_fn(frame.f_code)
filename = "<string: '%s'>" % source_text
pass
else:
m = re.search('^<frozen (.*)>', filename)
if m and m.group(1) in pyficache.file2file_remap:
remapped_file = pyficache.file2file_remap[m.group(1)]
pass
elif filename in pyficache.file2file_remap:
remapped_file = pyficache.unmap_file(filename)
# FIXME: a remapped_file shouldn't be the same as its unmapped version
if remapped_file == filename:
remapped_file = None
pass
pass
elif m and m.group(1) in sys.modules:
remapped_file = m.group(1)
pyficache.remap_file(filename, remapped_file)
pass
opts = {
'reload_on_change' : proc_obj.settings('reload'),
'output' : proc_obj.settings('highlight')
}
if 'style' in proc_obj.debugger.settings:
opts['style'] = proc_obj.settings('style')
pyficache.update_cache(filename)
line = pyficache.getline(filename, lineno, opts)
if not line:
if (not source_text and
filename.startswith("<string: ") and proc_obj.curframe.f_code):
# Deparse the code object into a temp file and remap the line from code
# into the corresponding line of the tempfile
co = proc_obj.curframe.f_code
temp_filename, name_for_code = deparse_and_cache(co, proc_obj.errmsg)
lineno = 1
# _, lineno = pyficache.unmap_file_line(temp_filename, lineno, True)
if temp_filename:
filename = temp_filename
pass
else:
# FIXME:
if source_text:
lines = source_text.split("\n")
temp_name='string-'
else:
# try with good ol linecache and consider fixing pyficache
lines = linecache.getlines(filename)
temp_name = filename
if lines:
# FIXME: DRY code with version in cmdproc.py print_location
prefix = os.path.basename(temp_name).split('.')[0]
fd = tempfile.NamedTemporaryFile(suffix='.py',
prefix=prefix,
delete=False)
with fd:
fd.write(''.join(lines))
remapped_file = fd.name
pyficache.remap_file(remapped_file, filename)
fd.close()
pass
line = linecache.getline(filename, lineno,
proc_obj.curframe.f_globals)
if not line:
m = re.search('^<frozen (.*)>', filename)
if m and m.group(1):
remapped_file = m.group(1)
try_module = sys.modules.get(remapped_file)
if (try_module and inspect.ismodule(try_module) and
hasattr(try_module, '__file__')):
remapped_file = sys.modules[remapped_file].__file__
pyficache.remap_file(filename, remapped_file)
line = linecache.getline(remapped_file, lineno,
proc_obj.curframe.f_globals)
else:
remapped_file = m.group(1)
code = proc_obj.curframe.f_code
filename, line = cmdfns.deparse_getline(code, remapped_file,
lineno, opts)
pass
pass
try:
match, reason = Mstack.check_path_with_frame(frame, filename)
if not match:
if filename not in warned_file_mismatches:
proc_obj.errmsg(reason)
warned_file_mismatches.add(filename)
except:
pass
fn_name = frame.f_code.co_name
last_i = frame.f_lasti
print_source_location_info(intf_obj.msg, filename, lineno, fn_name,
remapped_file = remapped_file,
f_lasti = last_i)
if line and len(line.strip()) != 0:
if proc_obj.event:
print_source_line(intf_obj.msg, lineno, line,
proc_obj.event2short[proc_obj.event])
pass
if '<string>' != filename: break
pass
if proc_obj.event in ['return', 'exception']:
val = proc_obj.event_arg
intf_obj.msg('R=> %s' % proc_obj._saferepr(val))
pass
return True | 0.002661 |
def count_subgraph_sizes(graph: BELGraph, annotation: str = 'Subgraph') -> Counter[int]:
"""Count the number of nodes in each subgraph induced by an annotation.
:param annotation: The annotation to group by and compare. Defaults to 'Subgraph'
:return: A dictionary from {annotation value: number of nodes}
"""
return count_dict_values(group_nodes_by_annotation(graph, annotation)) | 0.007481 |
def raise_302(instance, location):
"""Abort the current request with a 302 (Found) response code. Sets the
Location header correctly. If the location does not start with a slash,
the path of the current request is prepended.
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 302
"""
_set_location(instance, location)
instance.response.status = 302
raise ResponseException(instance.response) | 0.001792 |
def get_query_batch_request(self, batch_id, job_id=None):
""" Fetch the request sent for the batch. Note should only used for query batches """
if not job_id:
job_id = self.lookup_job_id(batch_id)
url = self.endpoint + "/job/{}/batch/{}/request".format(job_id, batch_id)
resp = requests.get(url, headers=self.headers())
self.check_status(resp)
return resp.text | 0.009501 |
def _extract_alphabet(self, grammar):
"""
Extract an alphabet from the given grammar.
"""
alphabet = set([])
for terminal in grammar.Terminals:
alphabet |= set([x for x in terminal])
self.alphabet = list(alphabet) | 0.007326 |
def update_from_item(self, item):
"""
Update from i3status output. returns if item has changed.
"""
if not self.is_time_module:
# correct the output
# Restore the name/instance.
item["name"] = self.name
item["instance"] = self.instance
# change color good/bad is set specifically for module
if "color" in item and item["color"] in self.color_map:
item["color"] = self.color_map[item["color"]]
# have we updated?
is_updated = self.item != item
self.item = item
else:
# If no timezone or a minute has passed update timezone
t = time()
if self.time_zone_check_due < t:
# If we are late for our timezone update then schedule the next
# update to happen when we next get new data from i3status
interval = self.i3status.update_interval
if not self.set_time_zone(item):
# we had an issue with an invalid time zone probably due to
# suspending. re check the time zone when we next can.
self.time_zone_check_due = 0
elif self.time_zone_check_due and (
t - self.time_zone_check_due > 5 + interval
):
self.time_zone_check_due = 0
else:
# Check again in 30 mins. We do this in case the timezone
# used has switched to/from summer time
self.time_zone_check_due = ((int(t) // 1800) * 1800) + 1800
if not self.time_started:
self.time_started = True
self.i3status.py3_wrapper.timeout_queue_add(self)
is_updated = False
# update time to be shown
return is_updated | 0.001047 |
def read_namespaced_pod_disruption_budget(self, name, namespace, **kwargs):
"""
read the specified PodDisruptionBudget
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_pod_disruption_budget(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodDisruptionBudget (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1beta1PodDisruptionBudget
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_pod_disruption_budget_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_pod_disruption_budget_with_http_info(name, namespace, **kwargs)
return data | 0.005416 |
def from_ase_atoms(cls, atoms):
"""Create an instance of the own class from an ase molecule
Args:
molecule (:class:`ase.atoms.Atoms`):
Returns:
Cartesian:
"""
return cls(atoms=atoms.get_chemical_symbols(), coords=atoms.positions) | 0.00678 |
def _get_result_paths(self, data):
"""Gets the results for a run of bwa index.
bwa index outputs 5 files when the index is created. The filename
prefix will be the same as the input fasta, unless overridden with
the -p option, and the 5 extensions are listed below:
.amb
.ann
.bwt
.pac
.sa
and these extentions (including the period) are the keys to the
dictionary that is returned.
"""
# determine the names of the files. The name will be the same as the
# input fasta file unless overridden with the -p option
if self.Parameters['-p'].isOn():
prefix = self.Parameters['-p'].Value
else:
prefix = data['fasta_in']
# the 5 output file suffixes
suffixes = ['.amb', '.ann', '.bwt', '.pac', '.sa']
out_files = {}
for suffix in suffixes:
out_files[suffix] = ResultPath(prefix + suffix, IsWritten=True)
return out_files | 0.001955 |
def save(self):
"""
Write data to user config file.
"""
with open(self._user_config_file, 'w', encoding='utf-8') as f:
self.write(f) | 0.011364 |
def addSignal(self, s):
"""
Adds a L{Signal} to the interface
"""
if s.nargs == -1:
s.nargs = len([a for a in marshal.genCompleteTypes(s.sig)])
self.signals[s.name] = s
self._xml = None | 0.008163 |
def dynamic_presence(self):
"""
Determine presence based on bed heating level and end presence
time reported by the api.
Idea originated from Alex Lee Yuk Cheung SmartThings Code.
"""
# self.heating_stats()
if not self.presence:
if self.heating_level > 50:
# Can likely make this better
if not self.now_heating:
self.presence = True
elif self.heating_level - self.target_heating_level >= 8:
self.presence = True
elif self.heating_level > 25:
# Catch rising edge
if self.past_heating_level(0) - self.past_heating_level(1) >= 2 \
and self.past_heating_level(1) - self.past_heating_level(2) >= 2 \
and self.past_heating_level(2) - self.past_heating_level(3) >= 2:
# Values are increasing so we are likely in bed
if not self.now_heating:
self.presence = True
elif self.heating_level - self.target_heating_level >= 8:
self.presence = True
elif self.presence:
if self.heating_level <= 15:
# Failsafe, very slow
self.presence = False
elif self.heating_level < 50:
if self.past_heating_level(0) - self.past_heating_level(1) < 0 \
and self.past_heating_level(1) - self.past_heating_level(2) < 0 \
and self.past_heating_level(2) - self.past_heating_level(3) < 0:
# Values are decreasing so we are likely out of bed
self.presence = False
# Last seen can lag real-time by up to 35min so this is
# mostly a backup to using the heat values.
# seen_delta = datetime.fromtimestamp(time.time()) \
# - datetime.strptime(self.last_seen, '%Y-%m-%dT%H:%M:%S')
# _LOGGER.debug('%s Last seen time delta: %s', self.side,
# seen_delta.total_seconds())
# if self.presence and seen_delta.total_seconds() > 2100:
# self.presence = False
_LOGGER.debug('%s Presence Results: %s', self.side, self.presence) | 0.003492 |
def is_valid_python(tkn: str) -> bool:
"""Determine whether tkn is a valid python identifier
:param tkn:
:return:
"""
try:
root = ast.parse(tkn)
except SyntaxError:
return False
return len(root.body) == 1 and isinstance(root.body[0], ast.Expr) and isinstance(root.body[0].value, ast.Name) | 0.006006 |
def treat(request_body):
"""
Treat a notification and guarantee its authenticity.
:param request_body: The request body in plain text.
:type request_body: string
:return: A safe APIResource
:rtype: APIResource
"""
# Python 3+ support
if isinstance(request_body, six.binary_type):
request_body = request_body.decode('utf-8')
try:
data = json.loads(request_body)
except ValueError:
raise exceptions.UnknownAPIResource('Request body is malformed JSON.')
unsafe_api_resource = APIResource.factory(data)
try:
consistent_api_resource = unsafe_api_resource.get_consistent_resource()
except AttributeError:
raise exceptions.UnknownAPIResource('The API resource provided is invalid.')
return consistent_api_resource | 0.002463 |
async def prover_get_credential(wallet_handle: int,
cred_id: str) -> str:
"""
Gets human readable credential by the given id.
:param wallet_handle: wallet handler (created by open_wallet).
:param cred_id: Identifier by which requested credential is stored in the wallet
:return: credential json
{
"referent": string, // cred_id in the wallet
"attrs": {"key1":"raw_value1", "key2":"raw_value2"},
"schema_id": string,
"cred_def_id": string,
"rev_reg_id": Optional<string>,
"cred_rev_id": Optional<string>
}
"""
logger = logging.getLogger(__name__)
logger.debug("prover_get_credential: >>> wallet_handle: %r, cred_id: %r",
wallet_handle,
cred_id)
if not hasattr(prover_get_credential, "cb"):
logger.debug("prover_get_credential: Creating callback")
prover_get_credential.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_cred_id = c_char_p(cred_id.encode('utf-8'))
credentials_json = await do_call('indy_prover_get_credential',
c_wallet_handle,
c_cred_id,
prover_get_credential.cb)
res = credentials_json.decode()
logger.debug("prover_get_credential: <<< res: %r", res)
return res | 0.002063 |
def admin_permission_factory(admin_view):
"""Default factory for creating a permission for an admin.
It tries to load a :class:`invenio_access.permissions.Permission`
instance if `invenio_access` is installed.
Otherwise, it loads a :class:`flask_principal.Permission` instance.
:param admin_view: Instance of administration view which is currently being
protected.
:returns: Permission instance.
"""
try:
pkg_resources.get_distribution('invenio-access')
from invenio_access import Permission
except pkg_resources.DistributionNotFound:
from flask_principal import Permission
return Permission(action_admin_access) | 0.001456 |
def subscribe(self, name, params=[], callback=None):
"""Subscribe to a collection
Arguments:
name - the name of the publication
params - the subscription parameters
Keyword Arguments:
callback - a function callback that returns an error (if exists)"""
self._wait_for_connect()
def subscribed(error, sub_id):
if error:
self._remove_sub_by_id(sub_id)
if callback:
callback(error.get('reason'))
return
if callback:
callback(None)
self.emit('subscribed', name)
if name in self.subscriptions:
raise MeteorClientException('Already subcribed to {}'.format(name))
sub_id = self.ddp_client.subscribe(name, params, subscribed)
self.subscriptions[name] = {
'id': sub_id,
'params': params
} | 0.002141 |
def prepare_project(project, project_dir, binaries, ips, urls):
"""
Generates blacklists / whitelists
"""
# Get Various Lists / Project Waivers
lists = get_lists.GetLists()
# Get file name black list and project waivers
file_audit_list, file_audit_project_list = lists.file_audit_list(project)
# Get file content black list and project waivers
flag_list, ignore_list = lists.file_content_list(project)
# Get File Ignore Lists
file_ignore = lists.file_ignore()
ignore_directories = lists.ignore_directories(project)
# Get URL Ignore Lists
url_ignore = lists.url_ignore(project)
# Get IP Ignore Lists
ip_ignore = lists.ip_ignore(project)
# Get Binary Ignore Lists
hashlist = get_lists.GetLists()
if binaries or ips or urls:
try:
apikey = os.environ["VT_KEY"]
except KeyError:
logger.error("Please set your virustotal.com API key as an environment variable")
sys.exit(1)
try:
vt_rate_type = config.get('config', 'vt_rate_type')
except six.moves.configparser.NoSectionError:
logger.error("A config section is required for vt_rate_type with a public | private option")
sys.exit(1)
patten = re.compile(r'\bpublic\b|\bprivate\b')
if not patten.match(vt_rate_type):
logger.error("Unrecognized %s option for vt_rate_type", vt_rate_type)
sys.exit(1)
# Perform rudimentary scans
scan_file(project, project_dir, binaries, ips, urls, file_audit_list,
file_audit_project_list, flag_list, ignore_list, hashlist,
file_ignore, ignore_directories, url_ignore, ip_ignore, apikey) | 0.002316 |
def get_offsets(self):
"""
returns
-------
a dictionary with these entries
header_start:
byte offset from beginning of the file to the start
of the header
data_start:
byte offset from beginning of the file to the start
of the data section
data_end:
byte offset from beginning of the file to the end
of the data section
Note these are also in the information dictionary, which
you can access with get_info()
"""
return dict(
header_start=self._info['header_start'],
data_start=self._info['data_start'],
data_end=self._info['data_end'],
) | 0.002695 |
def sanitize(s,
normalize_whitespace=True,
normalize_unicode=True,
form='NFKC',
enforce_encoding=True,
encoding='utf-8'):
"""Normalize a string
Args:
s (unicode string): input unicode string
normalize_whitespace (bool): if True, normalize all whitespace to single spaces (including newlines),
strip whitespace at start/end
normalize_unicode (bool): if True, normalize unicode form to 'form'
form (str): unicode form
enforce_encoding (bool): if True, encode string to target encoding and re-decode, ignoring errors
and stripping all characters not part of the encoding
encoding (str): target encoding for the above
Returns:
str: unicode output string
"""
if enforce_encoding:
s = s.encode(encoding, errors='ignore').decode(encoding, errors='ignore')
if normalize_unicode:
s = unicodedata.normalize(form, s)
if normalize_whitespace:
s = re.sub(r'\s+', ' ', s).strip()
return s | 0.004437 |
def write(self, psd_data_or_future, time_start, time_stop, samples):
"""Write PSD of one frequency hop"""
try:
# Wait for result of future
f_array, pwr_array = psd_data_or_future.result()
except AttributeError:
f_array, pwr_array = psd_data_or_future
try:
step = f_array[1] - f_array[0]
row = [
time_stop.strftime('%Y-%m-%d'), time_stop.strftime('%H:%M:%S'),
f_array[0], f_array[-1] + step, step, samples
]
row += list(pwr_array)
self.output.write('{}\n'.format(', '.join(str(x) for x in row)))
self.output.flush()
except Exception as e:
logging.exception('Error writing to output file:') | 0.002561 |
def _position_for_dispense(self, location=None, clearance=0.5):
"""
Position this :any:`Pipette` for an dispense
"""
if location:
if isinstance(location, Placeable):
location = location.bottom(min(location.z_size(), clearance))
self.move_to(location) | 0.006192 |
def createCircle(self, cx, cy, r, strokewidth=1, stroke='black', fill='none'):
"""
Creates a circle
@type cx: string or int
@param cx: starting x-coordinate
@type cy: string or int
@param cy: starting y-coordinate
@type r: string or int
@param r: radius
@type strokewidth: string or int
@param strokewidth: width of the pen used to draw
@type stroke: string (either css constants like "black" or numerical values like "#FFFFFF")
@param stroke: color with which to draw the outer limits
@type fill: string (either css constants like "black" or numerical values like "#FFFFFF")
@param fill: color with which to fill the element (default: no filling)
@return: a circle object
"""
style_dict = {'fill':fill, 'stroke-width':strokewidth, 'stroke':stroke}
myStyle = StyleBuilder(style_dict)
c = Circle(cx, cy, r)
c.set_style(myStyle.getStyle())
return c | 0.010669 |
def date_time_this_decade(
self,
before_now=True,
after_now=False,
tzinfo=None):
"""
Gets a DateTime object for the decade year.
:param before_now: include days in current decade before today
:param after_now: include days in current decade after today
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('2012-04-04 11:02:02')
:return DateTime
"""
now = datetime.now(tzinfo)
this_decade_start = datetime(
now.year - (now.year % 10), 1, 1, tzinfo=tzinfo)
next_decade_start = datetime(
min(this_decade_start.year + 10, MAXYEAR), 1, 1, tzinfo=tzinfo)
if before_now and after_now:
return self.date_time_between_dates(
this_decade_start, next_decade_start, tzinfo)
elif not before_now and after_now:
return self.date_time_between_dates(now, next_decade_start, tzinfo)
elif not after_now and before_now:
return self.date_time_between_dates(this_decade_start, now, tzinfo)
else:
return now | 0.001711 |
def floyd_warshall_get_path(self, distance, nextn, i, j):
'''
API:
floyd_warshall_get_path(self, distance, nextn, i, j):
Description:
Finds shortest path between i and j using distance and nextn
dictionaries.
Pre:
(1) distance and nextn are outputs of floyd_warshall method.
(2) The graph does not have a negative cycle, , ie.
distance[(i,i)] >=0 for all node i.
Return:
Returns the list of nodes on the path from i to j, ie. [i,...,j]
'''
if distance[(i,j)]=='infinity':
return None
k = nextn[(i,j)]
path = self.floyd_warshall_get_path
if i==k:
return [i, j]
else:
return path(distance, nextn, i,k) + [k] + path(distance, nextn, k,j) | 0.010689 |
async def arun_process(path: Union[Path, str], target: Callable, *,
args: Tuple[Any]=(),
kwargs: Dict[str, Any]=None,
callback: Callable[[Set[Tuple[Change, str]]], Awaitable]=None,
watcher_cls: Type[AllWatcher]=PythonWatcher,
debounce=400,
min_sleep=100):
"""
Run a function in a subprocess using multiprocessing.Process, restart it whenever files change in path.
"""
watcher = awatch(path, watcher_cls=watcher_cls, debounce=debounce, min_sleep=min_sleep)
start_process = partial(_start_process, target=target, args=args, kwargs=kwargs)
process = await watcher.run_in_executor(start_process)
reloads = 0
async for changes in watcher:
callback and await callback(changes)
await watcher.run_in_executor(_stop_process, process)
process = await watcher.run_in_executor(start_process)
reloads += 1
return reloads | 0.012821 |
def subscribe(self, topic, qos=0):
# type: (str, int) -> Tuple[int, int]
"""
Subscribe to a certain topic.
:param topic: a string specifying the subscription topic to
subscribe to.
:param qos: the desired quality of service level for the subscription.
Defaults to 0.
:rtype: (int, int)
:result: (result, mid)
A topic is a UTF-8 string, which is used by the broker to filter
messages for each connected client. A topic consists of one or more
topic levels. Each topic level is separated by a forward slash
(topic level separator).
The function returns a tuple (result, mid), where result is
MQTT_ERR_SUCCESS to indicate success or (MQTT_ERR_NO_CONN, None) if the
client is not currently connected. mid is the message ID for the
subscribe request. The mid value can be used to track the subscribe
request by checking against the mid argument in the on_subscribe()
callback if it is defined.
**Topic example:** `myhome/groundfloor/livingroom/temperature`
"""
# TODO: add support for list of topics
# don't subscribe if already subscribed
# try to subscribe
result, mid = self.client.subscribe(topic=topic, qos=qos)
# if successful add to topics
if result == MQTT_ERR_SUCCESS:
self.topics[topic] = TopicQos(topic=topic, qos=qos)
logger.debug('Subscribed to topic: {0}, qos: {1}'
.format(topic, qos))
else:
logger.error('Error {0} subscribing to topic: {1}'
.format(result, topic))
return (result, mid) | 0.001723 |
def modify_karma(self, words):
"""
Given a regex object, look through the groups and modify karma
as necessary
"""
# 'user': karma
k = defaultdict(int)
if words:
# For loop through all of the group members
for word_tuple in words:
word = word_tuple[0]
ending = word[-1]
# This will either end with a - or +, if it's a - subract 1
# kara, if it ends with a +, add 1 karma
change = -1 if ending == '-' else 1
# Now strip the ++ or -- from the end
if '-' in ending:
word = word.rstrip('-')
elif '+' in ending:
word = word.rstrip('+')
# Check if surrounded by parens, if so, remove them
if word.startswith('(') and word.endswith(')'):
word = word[1:-1]
# Finally strip whitespace
word = word.strip()
# Add the user to the dict
if word:
k[word] += change
return k | 0.001736 |
def do_map(input_dict, feature_format, positive_class=None):
'''
Maps a new example to a set of features.
'''
# Context of the unseen example(s)
train_context = input_dict['train_ctx']
test_context = input_dict['test_ctx']
# Currently known examples & background knowledge
features = input_dict['features']
format = input_dict['output_format']
evaluations = domain_map(features, feature_format, train_context,
test_context, format=format,
positive_class=positive_class)
return {'evaluations': evaluations} | 0.001642 |
def _verify_time_range(payload_dict):
"""Verifies the issued at and expiration from a JWT payload.
Makes sure the current time (in UTC) falls between the issued at and
expiration for the JWT (with some skew allowed for via
``CLOCK_SKEW_SECS``).
Args:
payload_dict: dict, A dictionary containing a JWT payload.
Raises:
AppIdentityError: If there is no ``'iat'`` field in the payload
dictionary.
AppIdentityError: If there is no ``'exp'`` field in the payload
dictionary.
AppIdentityError: If the JWT expiration is too far in the future (i.e.
if the expiration would imply a token lifetime
longer than what is allowed.)
AppIdentityError: If the token appears to have been issued in the
future (up to clock skew).
AppIdentityError: If the token appears to have expired in the past
(up to clock skew).
"""
# Get the current time to use throughout.
now = int(time.time())
# Make sure issued at and expiration are in the payload.
issued_at = payload_dict.get('iat')
if issued_at is None:
raise AppIdentityError(
'No iat field in token: {0}'.format(payload_dict))
expiration = payload_dict.get('exp')
if expiration is None:
raise AppIdentityError(
'No exp field in token: {0}'.format(payload_dict))
# Make sure the expiration gives an acceptable token lifetime.
if expiration >= now + MAX_TOKEN_LIFETIME_SECS:
raise AppIdentityError(
'exp field too far in future: {0}'.format(payload_dict))
# Make sure (up to clock skew) that the token wasn't issued in the future.
earliest = issued_at - CLOCK_SKEW_SECS
if now < earliest:
raise AppIdentityError('Token used too early, {0} < {1}: {2}'.format(
now, earliest, payload_dict))
# Make sure (up to clock skew) that the token isn't already expired.
latest = expiration + CLOCK_SKEW_SECS
if now > latest:
raise AppIdentityError('Token used too late, {0} > {1}: {2}'.format(
now, latest, payload_dict)) | 0.000447 |
def kerr_lightring_velocity(chi):
"""Return the velocity at the Kerr light ring"""
# If chi > 0.9996, the algorithm cannot solve the function
if chi >= 0.9996:
return brentq(kerr_lightring, 0, 0.8, args=(0.9996))
else:
return brentq(kerr_lightring, 0, 0.8, args=(chi)) | 0.003333 |
def list_calculations(db, job_type, user_name):
"""
Yield a summary of past calculations.
:param db: a :class:`openquake.server.dbapi.Db` instance
:param job_type: 'hazard' or 'risk'
:param user_name: an user name
"""
jobs = db('SELECT *, %s FROM job WHERE user_name=?x '
'AND job_type=?x ORDER BY start_time' % JOB_TYPE,
user_name, job_type)
out = []
if len(jobs) == 0:
out.append('None')
else:
out.append('job_id | status | start_time | '
' description')
for job in jobs:
descr = job.description
start_time = job.start_time
out.append('%6d | %10s | %s | %s' % (
job.id, job.status, start_time, descr))
return out | 0.001247 |
def _from_rest_ignore(model, props):
""" Purge fields that are completely unknown """
fields = model.all_fields
for prop in props.keys():
if prop not in fields:
del props[prop] | 0.004762 |
def deactivate_version(self, service_id, version_number):
"""Deactivate the current version."""
content = self._fetch("/service/%s/version/%d/deactivate" % (service_id, version_number), method="PUT")
return FastlyVersion(self, content) | 0.024896 |
def publish_model(cursor, model, publisher, message):
"""Publishes the ``model`` and return its ident_hash."""
publishers = publisher
if isinstance(publishers, list) and len(publishers) > 1:
raise ValueError("Only one publisher is allowed. '{}' "
"were given: {}"
.format(len(publishers), publishers))
module_ident, ident_hash = _insert_metadata(cursor, model,
publisher, message)
for resource in getattr(model, 'resources', []):
_insert_resource_file(cursor, module_ident, resource)
if isinstance(model, Document):
html = bytes(cnxepub.DocumentContentFormatter(model))
sha1 = hashlib.new('sha1', html).hexdigest()
cursor.execute("SELECT fileid FROM files WHERE sha1 = %s", (sha1,))
try:
fileid = cursor.fetchone()[0]
except TypeError:
file_args = {
'media_type': 'text/html',
'data': psycopg2.Binary(html),
}
cursor.execute("""\
insert into files (file, media_type)
VALUES (%(data)s, %(media_type)s)
returning fileid""", file_args)
fileid = cursor.fetchone()[0]
args = {
'module_ident': module_ident,
'filename': 'index.cnxml.html',
'fileid': fileid,
}
cursor.execute("""\
INSERT INTO module_files
(module_ident, fileid, filename)
VALUES
(%(module_ident)s, %(fileid)s, %(filename)s)""", args)
elif isinstance(model, Binder):
tree = cnxepub.model_to_tree(model)
tree = _insert_tree(cursor, tree)
return ident_hash | 0.000574 |
def draw(self, **kwargs):
"""
Creates the bar chart of the cross-validated scores generated from the
fit method and places a dashed horizontal line that represents the
average value of the scores.
"""
color = kwargs.pop("color", "b")
width = kwargs.pop("width", 0.3)
linewidth = kwargs.pop("linewidth", 1)
xvals = np.arange(1, len(self.cv_scores_) + 1, 1)
self.ax.bar(xvals, self.cv_scores_, width=width)
self.ax.axhline(
self.cv_scores_mean_, color=color,
label="Mean score = {:0.3f}".format(self.cv_scores_mean_),
linestyle='--', linewidth=linewidth
)
return self.ax | 0.002813 |
def ignore(self, tube):
"""Remove the given tube from the watchlist.
:param tube: Name of tube to remove from the watchlist
If all tubes are :func:`ignore()` d, beanstalk will auto-add "default" to the watchlist
to prevent the list from being empty. See :func:`watch()` for more unformation.
"""
with self._sock_ctx() as socket:
if tube not in self._watchlist:
raise KeyError(tube)
if tube != 'default':
self.desired_watchlist.remove(tube)
if tube in self._watchlist:
self._send_message('ignore {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.remove(tube)
if not self._watchlist:
self._watchlist.add('default') | 0.004854 |
def build_menus(self, order_cats=[]):
""" 生成全局内存菜单"""
menus = [{'category': _cat, 'items': []} for _cat in order_cats]
for path in self.routes:
route = self.routes[path]
for menu in menus:
if route['category'] == menu['category']:
menu['items'].append(route)
return menus | 0.00551 |
def to_fits(self, filename, wavelengths=None, **kwargs):
"""Write the reddening law to a FITS file.
:math:`R(V)` column is automatically named 'Av/E(B-V)'.
Parameters
----------
filename : str
Output filename.
wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None`
Wavelength values for sampling.
If not a Quantity, assumed to be in Angstrom.
If `None`, ``self.waveset`` is used.
kwargs : dict
Keywords accepted by :func:`~synphot.specio.write_fits_spec`.
"""
w, y = self._get_arrays(wavelengths)
kwargs['flux_col'] = 'Av/E(B-V)'
kwargs['flux_unit'] = self._internal_flux_unit
# No need to trim/pad zeroes, unless user chooses to do so.
if 'pad_zero_ends' not in kwargs:
kwargs['pad_zero_ends'] = False
if 'trim_zero' not in kwargs:
kwargs['trim_zero'] = False
# There are some standard keywords that should be added
# to the extension header.
bkeys = {'tdisp1': 'G15.7', 'tdisp2': 'G15.7'}
if 'expr' in self.meta:
bkeys['expr'] = (self.meta['expr'], 'synphot expression')
if 'ext_header' in kwargs:
kwargs['ext_header'].update(bkeys)
else:
kwargs['ext_header'] = bkeys
specio.write_fits_spec(filename, w, y, **kwargs) | 0.001395 |
async def release(self):
"Releases the already acquired lock"
expected_token = self.local.token
if expected_token is None:
raise LockError("Cannot release an unlocked lock")
self.local.token = None
await self.do_release(expected_token) | 0.006969 |
def get_views(self, path, year=None, month=None, day=None, hour=None):
""" Get the number of views for a Telegraph article
:param path: Path to the Telegraph page
:param year: Required if month is passed. If passed, the number of
page views for the requested year will be returned
:param month: Required if day is passed. If passed, the number of
page views for the requested month will be returned
:param day: Required if hour is passed. If passed, the number of
page views for the requested day will be returned
:param hour: If passed, the number of page views for
the requested hour will be returned
"""
return self._telegraph.method('getViews', path=path, values={
'year': year,
'month': month,
'day': day,
'hour': hour
}) | 0.002137 |
def get_default_database(self):
"""DEPRECATED - Get the database named in the MongoDB connection URI.
>>> uri = 'mongodb://host/my_database'
>>> client = MongoClient(uri)
>>> db = client.get_default_database()
>>> assert db.name == 'my_database'
>>> db = client.get_database()
>>> assert db.name == 'my_database'
Useful in scripts where you want to choose which database to use
based only on the URI in a configuration file.
.. versionchanged:: 3.5
Deprecated, use :meth:`get_database` instead.
"""
warnings.warn("get_default_database is deprecated. Use get_database "
"instead.", DeprecationWarning, stacklevel=2)
if self.__default_database_name is None:
raise ConfigurationError('No default database defined')
return self[self.__default_database_name] | 0.002188 |
def _get_field_schema(self):
"""
Get a list of all of the default fields for this query type.
If data is available in the monitor type, a list of field definitions
will be returned ahead of the actual data, providing insight into
the available fields. If no data is available in a monitor, this will
block on recv().
:return: list of dictionary fields with the field schema
"""
self.update_format(DetailedFormat())
for fields in self.execute():
if 'fields' in fields:
return fields['fields'] | 0.004934 |
def get_request_data(self, path, action, body=None):
"""Get the default data and status code of the given path + action request.
Args:
path: path of the request.
action: action of the request(get, post, delete...)
body: body sent, used to sent it back for post request.
Returns:
A tuple with the default response data and status code
In case of default status_code, use 0
"""
body = body or ''
path_name, path_spec = self.get_path_spec(path)
response = {}
# Get all status code
if path_spec is not None and action in path_spec.keys():
for status_code in path_spec[action]['responses'].keys():
resp = path_spec[action]['responses'][status_code]
try:
response[int(status_code)] = self.get_response_example(resp)
except ValueError:
response[status_code] = self.get_response_example(resp)
# If there is no status_code add a default 400
if response == {}:
response[400] = ''
return response | 0.003457 |
def later(timeout, actor, method, *args, **kwargs):
'''
Sets a timer that will call the *method* of the *actor* past *timeout*
seconds.
See example in :ref:`sample_inter`
:param int timeout: seconds until the method is called.
:param Proxy actor: actor to which make the call after *time* seconds.
:param Str. method: method of the *actor* to be called.
:param list args: arguments for *method*.
:return: manager of the later (Timer in thread,
Greenlet in green_thread)
'''
call = getattr(actor, method, None)
if not callable(call):
raise IntervalError("later: The actor %s does not have the method \
%s." % (actor.get_id(), method))
if call.__class__.__name__ in ["TellWrapper", "TellRefWrapper"]:
# As with the interval, args have already been dumped.
if call.__class__.__name__ is "TellRefWrapper":
call.__call__ = TellWrapper.__call__
return intervals.later(timeout, call, *args, **kwargs)
else:
raise IntervalError("The callable for the later must be a tell \
method of the actor.") | 0.00086 |
def from_url(cls, url, **kwargs):
"""
Creates an instance of the KubeConfig class from a single URL (useful
for interacting with kubectl proxy).
"""
doc = {
"clusters": [
{
"name": "self",
"cluster": {
"server": url,
},
},
],
"contexts": [
{
"name": "self",
"context": {
"cluster": "self",
},
}
],
"current-context": "self",
}
self = cls(doc, **kwargs)
return self | 0.002778 |
def guess_depth(self, root_dir):
"""
Try to guess the depth of a directory repository (i.e. whether it has
sub-folders for multiple subjects or visits, depending on where files
and/or derived label files are found in the hierarchy of
sub-directories under the root dir.
Parameters
----------
root_dir : str
Path to the root directory of the repository
"""
deepest = -1
for path, dirs, files in os.walk(root_dir):
depth = self.path_depth(path)
filtered_files = self._filter_files(files, path)
if filtered_files:
logger.info("Guessing depth of directory repository at '{}' is"
" {} due to unfiltered files ('{}') in '{}'"
.format(root_dir, depth,
"', '".join(filtered_files), path))
return depth
if self.PROV_DIR in dirs:
depth_to_return = max(depth - 1, 0)
logger.info("Guessing depth of directory repository at '{}' is"
"{} due to \"Derived label file\" in '{}'"
.format(root_dir, depth_to_return, path))
return depth_to_return
if depth >= self.MAX_DEPTH:
logger.info("Guessing depth of directory repository at '{}' is"
" {} as '{}' is already at maximum depth"
.format(root_dir, self.MAX_DEPTH, path))
return self.MAX_DEPTH
try:
for fpath in chain(filtered_files,
self._filter_dirs(dirs, path)):
Fileset.from_path(fpath)
except ArcanaError:
pass
else:
if depth > deepest:
deepest = depth
if deepest == -1:
raise ArcanaRepositoryError(
"Could not guess depth of '{}' repository as did not find "
"a valid session directory within sub-directories."
.format(root_dir))
return deepest | 0.000912 |
def delete_all_objects(self, async_=False):
"""
Deletes all objects from this container.
By default the call will block until all objects have been deleted. By
passing True for the 'async_' parameter, this method will not block, and
instead return an object that can be used to follow the progress of the
deletion. When deletion is complete the bulk deletion object's
'results' attribute will be populated with the information returned
from the API call. In synchronous mode this is the value that is
returned when the call completes. It is a dictionary with the following
keys:
deleted - the number of objects deleted
not_found - the number of objects not found
status - the HTTP return status code. '200 OK' indicates success
errors - a list of any errors returned by the bulk delete call
"""
nms = self.list_object_names(full_listing=True)
return self.object_manager.delete_all_objects(nms, async_=async_) | 0.002825 |
def abort(*args, **kwargs):
"""
Usage:
return abort("...") => will sys.exit() by default
return abort("...", fatal=True) => Will sys.exit()
# Not fatal, but will log/print message:
return abort("...", fatal=False) => Will return False
return abort("...", fatal=(False, None)) => Will return None
return abort("...", fatal=(False, -1)) => Will return -1
# Not fatal, will not log/print any message:
return abort("...", fatal=None) => Will return None
return abort("...", fatal=(None, None)) => Will return None
return abort("...", fatal=(None, -1)) => Will return -1
:param args: Args passed through for error reporting
:param kwargs: Args passed through for error reporting
:return: kwargs["return_value"] (default: -1) to signify failure to non-fatal callers
"""
code = kwargs.pop("code", 1)
logger = kwargs.pop("logger", LOG.error if code else LOG.info)
fatal = kwargs.pop("fatal", True)
return_value = fatal
if isinstance(fatal, tuple) and len(fatal) == 2:
fatal, return_value = fatal
if logger and fatal is not None and args:
if logging.root.handlers:
logger(*args, **kwargs)
else:
sys.stderr.write("%s\n" % formatted_string(*args))
if fatal:
if isinstance(fatal, type) and issubclass(fatal, BaseException):
raise fatal(code)
if AbortException is not None:
if isinstance(AbortException, type) and issubclass(AbortException, BaseException):
raise AbortException(code)
return AbortException(code)
return return_value | 0.001786 |
def check_inlet(self, helper):
"""
check the Inlets of Raritan PDUs
"""
# walk the data
try:
inlet_values = self.sess.walk_oid(self.oids['oid_inlet_value'])
inlet_units = self.sess.walk_oid(self.oids['oid_inlet_unit'])
inlet_digits = self.sess.walk_oid(self.oids['oid_inlet_digits'])
inlet_states = self.sess.walk_oid(self.oids['oid_inlet_state'])
inlet_warning_uppers = self.sess.walk_oid(self.oids['oid_inlet_warning_upper'])
inlet_critical_uppers = self.sess.walk_oid(self.oids['oid_inlet_critical_upper'])
inlet_critical_lowers = self.sess.walk_oid(self.oids['oid_inlet_critical_lower'])
inlet_warning_lowers = self.sess.walk_oid(self.oids['oid_inlet_warning_lower'])
except health_monitoring_plugins.SnmpException as e:
helper.exit(summary=str(e), exit_code=unknown, perfdata='')
# just print the summary, that the inlet sensors are checked
helper.add_summary("Inlet")
# all list must have the same length, if not something went wrong. that makes it easier and we need less loops
# translate the data in human readable units with help of the dicts
for x in range(len(inlet_values)):
inlet_unit = units[int(inlet_units[x].val)]
inlet_digit = inlet_digits[x].val
inlet_state = states[int(inlet_states[x].val)]
inlet_value = real_value(inlet_values[x].val, inlet_digit)
inlet_warning_upper = real_value(inlet_warning_uppers[x].val, inlet_digit)
inlet_critical_upper = real_value(inlet_critical_uppers[x].val, inlet_digit)
inlet_warning_lower = real_value(inlet_warning_lowers[x].val, inlet_digit)
inlet_critical_lower = real_value(inlet_critical_lowers[x].val, inlet_digit)
if inlet_state != "normal":
# we don't want to use the thresholds. we rely on the state value of the device
helper.add_summary("%s %s is %s" % (inlet_value, inlet_unit, inlet_state))
helper.status(critical)
# we always want to see the values in the long output and in the perf data
helper.add_summary("%s %s" % (inlet_value, inlet_unit))
helper.add_long_output("%s %s: %s" % (inlet_value, inlet_unit, inlet_state))
helper.add_metric("Sensor " + str(x) + " -%s-" % inlet_unit, inlet_value,
inlet_warning_lower +\
":" + inlet_warning_upper, inlet_critical_lower + ":" +\
inlet_critical_upper, "", "", "") | 0.007474 |
def split_data(data, squeeze=False):
"""
Split 1D or 2D into two parts, using the last axis
Parameters
----------
data:
squeeze : squeeze results to remove unnecessary dimensions
"""
vdata = np.atleast_2d(data)
nr_freqs = int(vdata.shape[1] / 2)
part1 = vdata[:, 0:nr_freqs]
part2 = vdata[:, nr_freqs:]
if(squeeze):
part1 = part1.squeeze()
part2 = part2.squeeze()
return part1, part2 | 0.002212 |
def _compute_primary_smooths(self):
"""Compute fixed-span smooths with all of the default spans."""
for span in DEFAULT_SPANS:
smooth = smoother.perform_smooth(self.x, self.y, span)
self._primary_smooths.append(smooth) | 0.007752 |
def _set_group_best(self, v, load=False):
"""
Setter method for group_best, mapped from YANG variable /routing_system/route_map/content/match/additional_paths/advertise_set/group_best (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_group_best is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_group_best() directly.
YANG Description: BGP Add-Path advertise group-best path
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="group-best", rest_name="group-best", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'BGP Add-Path advertise group-best path', u'code-name': u'additional-paths-advertise-set-group-best', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """group_best must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="group-best", rest_name="group-best", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'BGP Add-Path advertise group-best path', u'code-name': u'additional-paths-advertise-set-group-best', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='empty', is_config=True)""",
})
self.__group_best = t
if hasattr(self, '_set'):
self._set() | 0.005414 |
def process_class_docstrings(app, what, name, obj, options, lines):
"""
For those classes for which we use ::
:template: autosummary/class_without_autosummary.rst
the documented attributes/methods have to be listed in the class
docstring. However, if one of those lists is empty, we use 'None',
which then generates warnings in sphinx / ugly html output.
This "autodoc-process-docstring" event connector removes that part
from the processed docstring.
"""
if what == "class":
joined = '\n'.join(lines)
templates = [
""".. rubric:: Attributes
.. autosummary::
:toctree:
None
""",
""".. rubric:: Methods
.. autosummary::
:toctree:
None
"""
]
for template in templates:
if template in joined:
joined = joined.replace(template, '')
lines[:] = joined.split('\n') | 0.001099 |
def _parse_date_time_time_zone(self, date_time_time_zone):
""" Parses and convert to protocol timezone a dateTimeTimeZone resource
This resource is a dict with a date time and a windows timezone
This is a common structure on Microsoft apis so it's included here.
"""
if date_time_time_zone is None:
return None
local_tz = self.protocol.timezone
if isinstance(date_time_time_zone, dict):
try:
timezone = pytz.timezone(
get_iana_tz(date_time_time_zone.get(self._cc('timeZone'), 'UTC')))
except pytz.UnknownTimeZoneError:
timezone = local_tz
date_time = date_time_time_zone.get(self._cc('dateTime'), None)
try:
date_time = timezone.localize(parse(date_time)) if date_time else None
except OverflowError as e:
log.debug('Could not parse dateTimeTimeZone: {}. Error: {}'.format(date_time_time_zone, str(e)))
date_time = None
if date_time and timezone != local_tz:
date_time = date_time.astimezone(local_tz)
else:
# Outlook v1.0 api compatibility (fallback to datetime string)
try:
date_time = local_tz.localize(parse(date_time_time_zone)) if date_time_time_zone else None
except Exception as e:
log.debug('Could not parse dateTimeTimeZone: {}. Error: {}'.format(date_time_time_zone, str(e)))
date_time = None
return date_time | 0.004433 |
def get_abs_file_path(self, rel_file_path,
quit_on_error=None, check_relative_to_path=True):
"""
Returns the absolute file path of the given [relative] file path
to either this script or to the config file.
May throw a RuntimeError if quit_on_error is True.
:param str rel_file_path: relative file path
:param bool quit_on_error: determines if the script may throw an
exception
:return str: absolute file path of the given relative file path
:raises RuntimeError: if the file path does not exist and
quit_on_error is True
"""
if self.cfg_file_path is not None and \
check_relative_to_path and \
not self.cfg.section('Files')['relative_to_start_processes_file']:
script_dir = os.path.dirname(self.cfg_file_path)
else:
# absolute dir this script is in
script_dir = os.path.dirname(__file__)
abs_file_path = os.path.abspath(
os.path.join(script_dir, rel_file_path))
if not os.path.exists(abs_file_path):
self.log.error(abs_file_path + " does not exist.")
if quit_on_error is True:
raise RuntimeError("Imported file not found. Quit.")
return abs_file_path | 0.002901 |
def update_timestep(self, modelparams, expparams):
r"""
Returns a set of model parameter vectors that is the update of an
input set of model parameter vectors, such that the new models are
conditioned on a particular experiment having been performed.
By default, this is the trivial function
:math:`\vec{x}(t_{k+1}) = \vec{x}(t_k)`.
:param np.ndarray modelparams: Set of model parameter vectors to be
updated.
:param np.ndarray expparams: An experiment parameter array describing
the experiment that was just performed.
:return np.ndarray: Array of shape
``(n_models, n_modelparams, n_experiments)`` describing the update
of each model according to each experiment.
"""
return np.tile(modelparams, (expparams.shape[0],1,1)).transpose((1,2,0)) | 0.010045 |
def reply_message(self, message_url, body):
"""回复某条站内消息
:param message_url: 该条消息的页面 URL
:param body: 内容(不能超过 1024 个字符)
"""
id = re.findall(r'(\d+)/?$', message_url)[0]
api = 'http://www.shanbay.com/api/v1/message/%s/reply/'
url = api % id
data = {
'body': body
}
response = self.request(url, 'post', data=data)
return response.json()['status_code'] == 0 | 0.004405 |
def _encrypt(cipher, key, data, iv, padding):
"""
Encrypts plaintext
:param cipher:
A kSecAttrKeyType* value that specifies the cipher to use
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
The padding mode to use, specified as a kSecPadding*Key value - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
if not isinstance(key, byte_cls):
raise TypeError(pretty_message(
'''
key must be a byte string, not %s
''',
type_name(key)
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
if cipher != Security.kSecAttrKeyTypeRC4 and not isinstance(iv, byte_cls):
raise TypeError(pretty_message(
'''
iv must be a byte string, not %s
''',
type_name(iv)
))
if cipher != Security.kSecAttrKeyTypeRC4 and not padding:
raise ValueError('padding must be specified')
cf_dict = None
cf_key = None
cf_data = None
cf_iv = None
sec_key = None
sec_transform = None
try:
cf_dict = CFHelpers.cf_dictionary_from_pairs([(Security.kSecAttrKeyType, cipher)])
cf_key = CFHelpers.cf_data_from_bytes(key)
cf_data = CFHelpers.cf_data_from_bytes(data)
error_pointer = new(CoreFoundation, 'CFErrorRef *')
sec_key = Security.SecKeyCreateFromData(cf_dict, cf_key, error_pointer)
handle_cf_error(error_pointer)
sec_transform = Security.SecEncryptTransformCreate(sec_key, error_pointer)
handle_cf_error(error_pointer)
if cipher != Security.kSecAttrKeyTypeRC4:
Security.SecTransformSetAttribute(sec_transform, Security.kSecModeCBCKey, null(), error_pointer)
handle_cf_error(error_pointer)
Security.SecTransformSetAttribute(sec_transform, Security.kSecPaddingKey, padding, error_pointer)
handle_cf_error(error_pointer)
cf_iv = CFHelpers.cf_data_from_bytes(iv)
Security.SecTransformSetAttribute(sec_transform, Security.kSecIVKey, cf_iv, error_pointer)
handle_cf_error(error_pointer)
Security.SecTransformSetAttribute(
sec_transform,
Security.kSecTransformInputAttributeName,
cf_data,
error_pointer
)
handle_cf_error(error_pointer)
ciphertext = Security.SecTransformExecute(sec_transform, error_pointer)
handle_cf_error(error_pointer)
return CFHelpers.cf_data_to_bytes(ciphertext)
finally:
if cf_dict:
CoreFoundation.CFRelease(cf_dict)
if cf_key:
CoreFoundation.CFRelease(cf_key)
if cf_data:
CoreFoundation.CFRelease(cf_data)
if cf_iv:
CoreFoundation.CFRelease(cf_iv)
if sec_key:
CoreFoundation.CFRelease(sec_key)
if sec_transform:
CoreFoundation.CFRelease(sec_transform) | 0.001992 |
def _load_converted_gssha_data_from_lsm(self, gssha_var, lsm_var, load_type, time_step=None):
"""
This function loads data from LSM and converts to GSSHA format
"""
if 'radiation' in gssha_var:
conversion_factor = self.netcdf_attributes[gssha_var]['conversion_factor'][load_type]
if gssha_var.startswith('direct_radiation') and not isinstance(lsm_var, basestring):
# direct_radiation = (1-DIFFUSIVE_FRACION)*global_radiation
global_radiation_var, diffusive_fraction_var = lsm_var
global_radiation = self._load_lsm_data(global_radiation_var, conversion_factor)
diffusive_fraction = self._load_lsm_data(diffusive_fraction_var)
if gssha_var.endswith("cc"):
diffusive_fraction /= 100.0
self.data = ((1-diffusive_fraction)*global_radiation)
elif gssha_var.startswith('diffusive_radiation') and not isinstance(lsm_var, basestring):
# diffusive_radiation = DIFFUSIVE_FRACION*global_radiation
global_radiation_var, diffusive_fraction_var = lsm_var
global_radiation = self._load_lsm_data(global_radiation_var, conversion_factor)
diffusive_fraction = self._load_lsm_data(diffusive_fraction_var)
if gssha_var.endswith("cc"):
diffusive_fraction /= 100
self.data = (diffusive_fraction*global_radiation)
elif isinstance(lsm_var, basestring):
self.data = self._load_lsm_data(lsm_var, self.netcdf_attributes[gssha_var]['conversion_factor'][load_type])
else:
raise ValueError("Invalid LSM variable ({0}) for GSSHA variable {1}".format(lsm_var, gssha_var))
elif gssha_var == 'relative_humidity' and not isinstance(lsm_var, str):
##CONVERSION ASSUMPTIONS:
##1) These equations are for liquid water and are less accurate below 0 deg C
##2) Not adjusting the pressure for the fact that the temperature
## and moisture measurements are given at 2 m AGL.
##Neither of these should have a significant impact on RH values
##given the uncertainty in the model values themselves.
specific_humidity_var, pressure_var, temperature_var = lsm_var
specific_humidity = self._load_lsm_data(specific_humidity_var)
pressure = self._load_lsm_data(pressure_var)
temperature = self._load_lsm_data(temperature_var)
##To compute the relative humidity at 2m,
##given T, Q (water vapor mixing ratio) at 2 m and PSFC (surface pressure):
##Es(saturation vapor pressure in Pa)
##Qs(saturation mixing ratio)=(0.622*es)/(PSFC-es)
##RH = 100*Q/Qs
es = esat(temperature)
self.data = 100 * specific_humidity/((0.622*es)/(pressure-es))
elif gssha_var == 'relative_humidity_dew':
# https://software.ecmwf.int/wiki/display/CKB/Do+ERA+datasets+contain+parameters+for+near-surface+humidity
# temperature in Kelvin
# RH = 100 * es(Td)/es(T)
dew_point_temp_var, temperature_var = lsm_var
dew_point_temp = self._load_lsm_data(dew_point_temp_var)
temperature = self._load_lsm_data(temperature_var)
self.data = 100 * esat(dew_point_temp)/esat(temperature)
elif gssha_var == 'wind_speed' and not isinstance(lsm_var, str):
# WRF: http://www.meteo.unican.es/wiki/cordexwrf/OutputVariables
u_vector_var, v_vector_var = lsm_var
conversion_factor = self.netcdf_attributes[gssha_var]['conversion_factor'][load_type]
u_vector = self._load_lsm_data(u_vector_var, conversion_factor)
v_vector = self._load_lsm_data(v_vector_var, conversion_factor)
self.data = (xu.sqrt(u_vector**2 + v_vector**2))
elif 'precipitation' in gssha_var and not isinstance(lsm_var, str):
# WRF: http://www.meteo.unican.es/wiki/cordexwrf/OutputVariables
rain_c_var, rain_nc_var = lsm_var
conversion_factor = self.netcdf_attributes[gssha_var]['conversion_factor'][load_type]
rain_c = self._load_lsm_data(rain_c_var, conversion_factor)
rain_nc = self._load_lsm_data(rain_nc_var, conversion_factor)
self.data = rain_c + rain_nc
else:
self.data = self._load_lsm_data(lsm_var,
self.netcdf_attributes[gssha_var]['conversion_factor'][load_type],
self.netcdf_attributes[gssha_var].get('calc_4d_method'),
self.netcdf_attributes[gssha_var].get('calc_4d_dim'),
time_step=time_step)
conversion_function = self.netcdf_attributes[gssha_var].get('conversion_function')
if conversion_function:
self.data.values = self.netcdf_attributes[gssha_var]['conversion_function'][load_type](self.data.values)
if 'precipitation' in gssha_var:
# NOTE: Precipitation is converted from mm/s to mm/hr
# with the conversion factor when it is a rate.
if 'units' in self.data.attrs:
if self.data.attrs['units'] == 'm':
# convert from m to mm
self.data.values *= 1000
if load_type == 'ascii' or load_type == 'netcdf':
# CONVERT TO INCREMENTAL
if gssha_var == 'precipitation_acc':
self.data.values = np.lib.pad(self.data.diff(self.lsm_time_dim).values,
((1, 0), (0, 0), (0, 0)),
'constant',
constant_values=0)
# CONVERT PRECIP TO RADAR (mm/hr) IN FILE
if gssha_var == 'precipitation_inc' or gssha_var == 'precipitation_acc':
# convert from mm to mm/hr
time_step_hours = np.diff(self.xd[self.lsm_time_var].values)[0]/np.timedelta64(1, 'h')
self.data.values /= time_step_hours
# convert to dataset
gssha_data_var_name = self.netcdf_attributes[gssha_var]['gssha_name']
self.data = self.data.to_dataset(name=gssha_data_var_name)
self.data.rename(
{
self.lsm_lon_dim: 'x',
self.lsm_lat_dim: 'y',
self.lsm_lon_var: 'lon',
self.lsm_lat_var: 'lat'
},
inplace=True
)
self.data.attrs = {'proj4': self.xd.lsm.projection.ExportToProj4()}
self.data[gssha_data_var_name].attrs = {
'standard_name': self.netcdf_attributes[gssha_var]['standard_name'],
'long_name': self.netcdf_attributes[gssha_var]['long_name'],
'units': self.netcdf_attributes[gssha_var]['units'][load_type],
} | 0.004925 |
def verify(verified_entity, verification_key):
"""
Метод должен райзить ошибки
:param verified_entity: сущность
:param verification_key: ключ
:return:
"""
verification = get_object_or_none(Verification, verified_entity=verified_entity)
if verification is None:
raise ServerError(VerificationHandler.STATUS_VERIFICATION_NOT_FOUND)
if not verification.verify(verification_key):
raise ServerError(VerificationHandler.STATUS_INVALID_VERIFICATION_KEY)
verification.verified = True
verification.save() | 0.00821 |
def add_const(features):
"""
Prepend the constant feature 1 as first feature and return the modified
feature set.
Parameters
----------
features : ndarray or DataFrame
"""
content = np.empty((features.shape[0], features.shape[1] + 1), dtype='float64')
content[:, 0] = 1.
if isinstance(features, np.ndarray):
content[:, 1:] = features
return content
content[:, 1:] = features.iloc[:, :].values
cols = ['Constant'] + features.columns.tolist()
return pd.DataFrame(data=content, index=features.index, columns=cols, dtype='float64') | 0.005034 |
def register_service(
self,
clazz,
service,
properties,
send_event=True,
factory=False,
prototype=False,
):
# type: (Union[List[Any], type, str], object, dict, bool, bool, bool) -> ServiceRegistration
"""
Registers a service
:param clazz: Class or Classes (list) implemented by this service
:param service: The service instance
:param properties: The services properties (dictionary)
:param send_event: If not, doesn't trigger a service registered event
:param factory: If True, the given service is a service factory
:param prototype: If True, the given service is a prototype service
factory (the factory argument is considered True)
:return: A ServiceRegistration object
:raise BundleException: An error occurred while registering the service
"""
return self.__framework.register_service(
self.__bundle,
clazz,
service,
properties,
send_event,
factory,
prototype,
) | 0.004344 |
def input(self, _in, out, **kw):
"""Input filtering."""
args = [self.binary or 'cleancss'] + self.rebase_opt
if self.extra_args:
args.extend(self.extra_args)
self.subprocess(args, out, _in) | 0.008584 |
def expand_path(path):
'''
Given a compressed path, return a new path that has all the segments
in it interpolated.
'''
expanded = []
if len(path) < 2:
return expanded
for i in range(len(path)-1):
expanded += bresenham(path[i], path[i + 1])
expanded += [path[:-1]]
return expanded | 0.003012 |
def make_url_fetcher(dispatcher=None,
next_fetcher=weasyprint.default_url_fetcher):
"""Return an function suitable as a ``url_fetcher`` in WeasyPrint.
You generally don’t need to call this directly.
If ``dispatcher`` is not provided, :func:`make_flask_url_dispatcher`
is called to get one. This requires a request context.
Otherwise, it must be a callable that take an URL and return either
``None`` or a ``(wsgi_callable, base_url, path)`` tuple. For None
``next_fetcher`` is used. (By default, fetch normally over the network.)
For a tuple the request is made at the WSGI level.
``wsgi_callable`` must be a Flask application or another WSGI callable.
``base_url`` is the root URL for the application while ``path``
is the path within the application.
Typically ``base_url + path`` is equal or equivalent to the passed URL.
"""
if dispatcher is None:
dispatcher = make_flask_url_dispatcher()
def flask_url_fetcher(url):
redirect_chain = set()
while 1:
result = dispatcher(url)
if result is None:
return next_fetcher(url)
app, base_url, path = result
client = Client(app, response_wrapper=Response)
if isinstance(path, unicode):
# TODO: double-check this. Apparently Werzeug %-unquotes bytes
# but not Unicode URLs. (IRI vs. URI or something.)
path = path.encode('utf8')
response = client.get(path, base_url=base_url)
if response.status_code == 200:
return dict(
string=response.data,
mime_type=response.mimetype,
encoding=response.charset,
redirected_url=url)
# The test client can follow redirects, but do it ourselves
# to get access to the redirected URL.
elif response.status_code in (301, 302, 303, 305, 307):
redirect_chain.add(url)
url = response.location
if url in redirect_chain:
raise ClientRedirectError('loop detected')
else:
raise ValueError('Flask-WeasyPrint got HTTP status %s for %s%s'
% (response.status, base_url, path))
return flask_url_fetcher | 0.000419 |
def connect_inputs(self, datas):
"""
Connects input ``Pipers`` to "datas" input data in the correct order
determined, by the ``Piper.ornament`` attribute and the ``Dagger._cmp``
function.
It is assumed that the input data is in the form of an iterator and
that all inputs have the same number of input items. A pipeline will
**deadlock** otherwise.
Arguments:
- datas (sequence of sequences) An ordered sequence of inputs for
all input ``Pipers``.
"""
start_pipers = self.get_inputs()
self.log.debug('%s trying to connect inputs in the order %s' % \
(repr(self), repr(start_pipers)))
for piper, data in izip(start_pipers, datas):
piper.connect([data])
self.log.debug('%s succesfuly connected inputs' % repr(self)) | 0.012141 |
def pmdec(self,*args,**kwargs):
"""
NAME:
pmdec
PURPOSE:
return proper motion in declination (in mas/yr)
INPUT:
t - (optional) time at which to get pmdec (can be Quantity)
obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer
in the Galactocentric frame
(in kpc and km/s) (default=[8.0,0.,0.,0.,220.,0.]; entries can be Quantities)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity)
vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity)
OUTPUT:
pm_dec(t) in mas/yr
HISTORY:
2011-02-24 - Written - Bovy (NYU)
"""
out= self._orb.pmdec(*args,**kwargs)
if len(out) == 1: return out[0]
else: return out | 0.010899 |
def data_iterator_cache(uri,
batch_size,
shuffle=False,
rng=None,
normalize=True,
with_memory_cache=True,
epoch_begin_callbacks=[],
epoch_end_callbacks=[]):
'''data_iterator_cache
Get data from the cache directory.
Cache files are read from the local file system.
For example,
.. code-block:: python
batch = data_iterator_cache('CACHE_DIR', batch_size, shuffle=True)
Args:
uri (str): Location of directory with cache files.
batch_size (int): Size of data unit.
shuffle (bool):
Indicates whether the dataset is shuffled or not.
Default value is False.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
normalize (bool): If True, each sample in the data gets normalized by a factor of 255.
Default is True.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator
'''
ds = CacheDataSource(uri,
shuffle=shuffle,
rng=rng,
normalize=normalize)
return data_iterator(ds,
batch_size=batch_size,
with_memory_cache=with_memory_cache,
epoch_begin_callbacks=epoch_begin_callbacks,
epoch_end_callbacks=epoch_end_callbacks) | 0.002233 |
def lessThan(self, value):
"""
Sets the operator type to Query.Op.LessThan and sets the
value to the inputted value.
:param value <variant>
:return <Query>
:sa lessThan
:usage |>>> from orb import Query as Q
|>>> query = Q('test').lessThan(1)
|>>> print query
|test less_than 1
"""
newq = self.copy()
newq.setOp(Query.Op.LessThan)
newq.setValue(value)
return newq | 0.010292 |
def Jobs(self, crawlId=None):
"""
Create a JobClient for listing and creating jobs.
The JobClient inherits the confId from the Nutch client.
:param crawlId: crawlIds to use for this client. If not provided, will be generated
by nutch.defaultCrawlId()
:return: a JobClient
"""
crawlId = crawlId if crawlId else defaultCrawlId()
return JobClient(self.server, crawlId, self.confId) | 0.006623 |
def make_at_least_n_items_valid(flag_list, n):
"""
tries to make at least min(len(flag_list, n) items True in flag_list
Args:
flag_list (list): list of booleans
n (int): number of items to ensure are True
CommandLine:
python -m utool.util_dev --test-make_at_least_n_items_valid
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> # build test data
>>> flag_list = [False, True, False, False, False, False, False, True]
>>> n = 5
>>> # execute function
>>> flag_list = make_at_least_n_items_valid(flag_list, n)
>>> # verify results
>>> result = str(flag_list)
>>> print(result)
[ True True True True False False False True]
"""
flag_list = np.array(flag_list)
num_valid = flag_list.sum()
# Find how many places we need to make true
num_extra = min(len(flag_list) - num_valid, n - num_valid)
# make_at_least_n_items_valid
# Add in some extra daids to show if there are not enough
for index in range(len(flag_list)):
if num_extra <= 0:
break
if not flag_list[index]:
flag_list[index] = True
num_extra -= 1
return flag_list | 0.000788 |
def paintEvent(self, event):
"""
Overloads the paint event to handle painting pointers for the popup \
mode.
:param event | <QPaintEvent>
"""
# use the base technique for the dialog mode
if self.currentMode() == XPopupWidget.Mode.Dialog:
super(XPopupWidget, self).paintEvent(event)
return
# setup the coloring options
palette = self.palette()
with XPainter(self) as painter:
pen = QPen(palette.color(palette.Window).darker(130))
pen.setWidthF(1.75)
painter.setPen(pen)
painter.setRenderHint(painter.Antialiasing)
painter.setBrush(palette.color(palette.Window))
painter.drawPath(self.borderPath()) | 0.007273 |
def to_xdr_object(self):
"""Creates an XDR Transaction object that represents this
:class:`Transaction`.
"""
source_account = account_xdr_object(self.source)
memo = self.memo.to_xdr_object()
operations = [o.to_xdr_object() for o in self.operations]
ext = Xdr.nullclass()
ext.v = 0
return Xdr.types.Transaction(source_account, self.fee, self.sequence,
self.time_bounds, memo, operations, ext) | 0.003992 |
def select(self, space, key=None, **kwargs) -> _MethodRet:
"""
Select request coroutine.
Examples:
.. code-block:: pycon
>>> await conn.select('tester')
<Response sync=3 rowcount=2 data=[
<TarantoolTuple id=1 name='one'>,
<TarantoolTuple id=2 name='two'>
]>
>>> res = await conn.select('_space', ['tester'], index='name')
>>> res.data
[<TarantoolTuple id=512
owner=1
name='tester'
engine='memtx'
field_count=0
flags={}
format=[
{'name': 'id', 'type': 'unsigned'},
{'name': 'name', 'type': 'string'}
]>]
:param space: space id or space name.
:param key: key to select
:param offset: offset to use
:param limit: limit to use
:param index: index id or name
:param iterator: one of the following
* iterator id (int number),
* :class:`asynctnt.Iterator` object
* string with an iterator name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.select(space, key, **kwargs) | 0.001259 |
def initialize(self, runtime=None):
"""Initializes this manager.
A manager is initialized once at the time of creation.
arg: runtime (osid.OsidRuntimeManager): the runtime
environment
raise: CONFIGURATION_ERROR - an error with implementation
configuration
raise: ILLEGAL_STATE - this manager has already been
initialized by the OsidRuntime
raise: NullArgument - runtime is null
raise: OperationFailed - unable to complete request
compliance: mandatory - This method must be implemented.
implementation notes: In addition to loading its runtime
configuration an implementation may create shared resources such
as connection pools to be shared among all sessions of this
service and released when this manager is closed. Providers must
thread-protect any data stored in the manager. To maximize
interoperability, providers should not honor a second call to
initialize() and must set an ILLEGAL_STATE error.
"""
if self._runtime is not None:
raise IllegalState()
self._runtime = runtime
config = runtime.get_configuration()
parameter_id = Id('parameter:hostName@dlkit_service')
host = config.get_value_by_parameter(parameter_id).get_string_value()
if host is not None:
self._host = host
parameter_id = Id('parameter:appKey@dlkit_service')
app_key = config.get_value_by_parameter(parameter_id).get_string_value()
if app_key is not None:
self._app_key = app_key | 0.001819 |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(HttpdCollector, self).get_default_config()
config.update({
'path': 'httpd',
'urls': ['localhost http://localhost:8080/server-status?auto']
})
return config | 0.00597 |
def list_users(self, objectmask=None, objectfilter=None):
"""Lists all users on an account
:param string objectmask: Used to overwrite the default objectmask.
:param dictionary objectfilter: If you want to use an objectfilter.
:returns: A list of dictionaries that describe each user
Example::
result = mgr.list_users()
"""
if objectmask is None:
objectmask = """mask[id, username, displayName, userStatus[name], hardwareCount, virtualGuestCount,
email, roles]"""
return self.account_service.getUsers(mask=objectmask, filter=objectfilter) | 0.006033 |
def _process_items(cls, vals):
"Processes list of items assigning unique paths to each."
if type(vals) is cls:
return vals.data
elif not isinstance(vals, (list, tuple)):
vals = [vals]
items = []
counts = defaultdict(lambda: 1)
cls._unpack_paths(vals, items, counts)
items = cls._deduplicate_items(items)
return items | 0.00495 |
def templates(self) -> List['Template']:
"""Return a list of templates as template objects."""
_lststr = self._lststr
_type_to_spans = self._type_to_spans
return [
Template(_lststr, _type_to_spans, span, 'Template')
for span in self._subspans('Template')] | 0.006431 |
def _get_to_many_relationship_value(self, obj, column):
"""
Get the resulting datas for a One To many or a many to many relationship
:param obj obj: The instance we manage
:param dict column: The column description dictionnary
:returns: The associated value
"""
related_key = column.get('related_key', None)
related = getattr(obj, column['__col__'].key)
value = {}
if related:
total = len(related)
for index, rel_obj in enumerate(related):
if related_key:
compiled_res = self._get_formatted_val(
rel_obj, related_key, column
)
else:
compiled_res = column['__prop__'].compile_obj(
rel_obj
)
value['item_%d' % index] = compiled_res
value[str(index)] = compiled_res
value["_" + str(index)] = compiled_res
if index == 0:
value['first'] = compiled_res
if index == total - 1:
value['last'] = compiled_res
return value | 0.002483 |
def to_meshcode(lat, lon, level):
"""緯度経度から指定次の地域メッシュコードを算出する。
Args:
lat: 世界測地系の緯度(度単位)
lon: 世界測地系の経度(度単位)
level: 地域メッシュコードの次数
1次(80km四方):1
40倍(40km四方):40000
20倍(20km四方):20000
16倍(16km四方):16000
2次(10km四方):2
8倍(8km四方):8000
5倍(5km四方):5000
4倍(4km四方):4000
2.5倍(2.5km四方):2500
2倍(2km四方):2000
3次(1km四方):3
4次(500m四方):4
5次(250m四方):5
6次(125m四方):6
Return:
指定次の地域メッシュコード
"""
if not 0 <= lat < 66.66:
raise ValueError('the latitude is out of bound.')
if not 100 <= lon < 180:
raise ValueError('the longitude is out of bound.')
# reminder of latitude and longitude by its unit in degree of mesh level.
rem_lat_lv0 = lambda lat: lat
rem_lon_lv0 = lambda lon: lon % 100
rem_lat_lv1 = lambda lat: rem_lat_lv0(lat) % _unit_lat_lv1()
rem_lon_lv1 = lambda lon: rem_lon_lv0(lon) % _unit_lon_lv1()
rem_lat_40000 = lambda lat: rem_lat_lv1(lat) % _unit_lat_40000()
rem_lon_40000 = lambda lon: rem_lon_lv1(lon) % _unit_lon_40000()
rem_lat_20000 = lambda lat: rem_lat_40000(lat) % _unit_lat_20000()
rem_lon_20000 = lambda lon: rem_lon_40000(lon) % _unit_lon_20000()
rem_lat_16000 = lambda lat: rem_lat_lv1(lat) % _unit_lat_16000()
rem_lon_16000 = lambda lon: rem_lon_lv1(lon) % _unit_lon_16000()
rem_lat_lv2 = lambda lat: rem_lat_lv1(lat) % _unit_lat_lv2()
rem_lon_lv2 = lambda lon: rem_lon_lv1(lon) % _unit_lon_lv2()
rem_lat_8000 = lambda lat: rem_lat_lv1(lat) % _unit_lat_8000()
rem_lon_8000 = lambda lon: rem_lon_lv1(lon) % _unit_lon_8000()
rem_lat_5000 = lambda lat: rem_lat_lv2(lat) % _unit_lat_5000()
rem_lon_5000 = lambda lon: rem_lon_lv2(lon) % _unit_lon_5000()
rem_lat_4000 = lambda lat: rem_lat_8000(lat) % _unit_lat_4000()
rem_lon_4000 = lambda lon: rem_lon_8000(lon) % _unit_lon_4000()
rem_lat_2500 = lambda lat: rem_lat_5000(lat) % _unit_lat_2500()
rem_lon_2500 = lambda lon: rem_lon_5000(lon) % _unit_lon_2500()
rem_lat_2000 = lambda lat: rem_lat_lv2(lat) % _unit_lat_2000()
rem_lon_2000 = lambda lon: rem_lon_lv2(lon) % _unit_lon_2000()
rem_lat_lv3 = lambda lat: rem_lat_lv2(lat) % _unit_lat_lv3()
rem_lon_lv3 = lambda lon: rem_lon_lv2(lon) % _unit_lon_lv3()
rem_lat_lv4 = lambda lat: rem_lat_lv3(lat) % _unit_lat_lv4()
rem_lon_lv4 = lambda lon: rem_lon_lv3(lon) % _unit_lon_lv4()
rem_lat_lv5 = lambda lat: rem_lat_lv4(lat) % _unit_lat_lv5()
rem_lon_lv5 = lambda lon: rem_lon_lv4(lon) % _unit_lon_lv5()
rem_lat_lv6 = lambda lat: rem_lat_lv5(lat) % _unit_lat_lv6()
rem_lon_lv6 = lambda lon: rem_lon_lv5(lon) % _unit_lon_lv6()
def meshcode_lv1(lat, lon):
ab = int(rem_lat_lv0(lat) / _unit_lat_lv1())
cd = int(rem_lon_lv0(lon) / _unit_lon_lv1())
return str(ab) + str(cd)
def meshcode_40000(lat, lon):
e = int(rem_lat_lv1(lat) / _unit_lat_40000())*2 + int(rem_lon_lv1(lon) / _unit_lon_40000()) + 1
return meshcode_lv1(lat, lon) + str(e)
def meshcode_20000(lat, lon):
f = int(rem_lat_40000(lat) / _unit_lat_20000())*2 + int(rem_lon_40000(lon) / _unit_lon_20000()) + 1
g = 5
return meshcode_40000(lat, lon) + str(f) + str(g)
def meshcode_16000(lat, lon):
e = int(rem_lat_lv1(lat) / _unit_lat_16000())*2
f = int(rem_lon_lv1(lon) / _unit_lon_16000())*2
g = 7
return meshcode_lv1(lat, lon) + str(e) + str(f) + str(g)
def meshcode_lv2(lat, lon):
e = int(rem_lat_lv1(lat) / _unit_lat_lv2())
f = int(rem_lon_lv1(lon) / _unit_lon_lv2())
return meshcode_lv1(lat, lon) + str(e) + str(f)
def meshcode_8000(lat, lon):
e = int(rem_lat_lv1(lat) / _unit_lat_8000())
f = int(rem_lon_lv1(lon) / _unit_lon_8000())
g = 6
return meshcode_lv1(lat, lon) + str(e) + str(f) + str(g)
def meshcode_5000(lat, lon):
g = int(rem_lat_lv2(lat) / _unit_lat_5000())*2 + int(rem_lon_lv2(lon) / _unit_lon_5000()) + 1
return meshcode_lv2(lat, lon) + str(g)
def meshcode_4000(lat, lon):
h = int(rem_lat_8000(lat) / _unit_lat_4000())*2 + int(rem_lon_8000(lon) / _unit_lon_4000()) + 1
i = 7
return meshcode_8000(lat, lon) + str(h) + str(i)
def meshcode_2500(lat, lon):
h = int(rem_lat_5000(lat) / _unit_lat_2500())*2 + int(rem_lon_5000(lon) / _unit_lon_2500()) + 1
i = 6
return meshcode_5000(lat, lon) + str(h) + str(i)
def meshcode_2000(lat, lon):
g = int(rem_lat_lv2(lat) / _unit_lat_2000())*2
h = int(rem_lon_lv2(lon) / _unit_lon_2000())*2
i = 5
return meshcode_lv2(lat, lon) + str(g) + str(h) + str(i)
def meshcode_lv3(lat, lon):
g = int(rem_lat_lv2(lat) / _unit_lat_lv3())
h = int(rem_lon_lv2(lon) / _unit_lon_lv3())
return meshcode_lv2(lat, lon) + str(g) + str(h)
def meshcode_lv4(lat, lon):
i = int(rem_lat_lv3(lat) / _unit_lat_lv4())*2 + int(rem_lon_lv3(lon) / _unit_lon_lv4()) + 1
return meshcode_lv3(lat, lon) + str(i)
def meshcode_lv5(lat, lon):
j = int(rem_lat_lv4(lat) / _unit_lat_lv5())*2 + int(rem_lon_lv4(lon) / _unit_lon_lv5()) + 1
return meshcode_lv4(lat, lon) + str(j)
def meshcode_lv6(lat, lon):
k = int(rem_lat_lv5(lat) / _unit_lat_lv6())*2 + int(rem_lon_lv5(lon) / _unit_lon_lv6()) + 1
return meshcode_lv5(lat, lon) + str(k)
if level == 1:
return meshcode_lv1(lat, lon)
if level == 40000:
return meshcode_40000(lat, lon)
if level == 20000:
return meshcode_20000(lat, lon)
if level == 16000:
return meshcode_16000(lat, lon)
if level == 2:
return meshcode_lv2(lat, lon)
if level == 8000:
return meshcode_8000(lat, lon)
if level == 5000:
return meshcode_5000(lat, lon)
if level == 4000:
return meshcode_4000(lat, lon)
if level == 2500:
return meshcode_2500(lat, lon)
if level == 2000:
return meshcode_2000(lat, lon)
if level == 3:
return meshcode_lv3(lat, lon)
if level == 4:
return meshcode_lv4(lat, lon)
if level == 5:
return meshcode_lv5(lat, lon)
if level == 6:
return meshcode_lv6(lat, lon)
raise ValueError("the level is unsupported.") | 0.005994 |
def load(cls, filename, format=None):
""" Return an instance of the class that is saved in the file with the
given filename in the specified format.
"""
if format is None:
# try to derive protocol from file extension
format = format_from_extension(filename)
with file(filename, 'rbU') as fp:
obj = cls.load_from_file_object(fp, format)
obj.filename = filename
return obj | 0.004219 |
def findWhere(self, attrs=None):
"""
Convenience version of a common use case of `find`:
getting the first object
containing specific `key:value` pairs.
"""
return self._wrap(self._clean.where(attrs, True)) | 0.007874 |
def asin(x):
"""
Inverse sine
"""
if isinstance(x, UncertainFunction):
mcpts = np.arcsin(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.arcsin(x) | 0.005 |
def _valid_ip(ip_address):
'''
Check if the IP address is valid and routable
Return either True or False
'''
try:
address = ipaddress.IPv4Address(ip_address)
except ipaddress.AddressValueError:
return False
if address.is_unspecified or \
address.is_loopback or \
address.is_link_local or \
address.is_multicast or \
address.is_reserved:
return False
return True | 0.004425 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.