text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def search(query, model):
""" Performs a search query and returns the object ids """
query = query.strip()
LOGGER.debug(query)
sqs = SearchQuerySet()
results = sqs.raw_search('{}*'.format(query)).models(model)
if not results:
results = sqs.raw_search('*{}'.format(query)).models(model)
if not results:
results = sqs.raw_search('*{}*'.format(query)).models(model)
return [o.pk for o in results] | 0.002262 |
def _on_message(self, bus, message): # pylint: disable=unused-argument
"""When a message is received from Gstreamer."""
if message.type == Gst.MessageType.EOS:
self.stop()
elif message.type == Gst.MessageType.ERROR:
self.stop()
err, _ = message.parse_error()
_LOGGER.error('%s', err) | 0.005618 |
def remap_link_target(path, absolute=False):
""" remap a link target to a static URL if it's prefixed with @ """
if path.startswith('@'):
# static resource
return static_url(path[1:], absolute=absolute)
if absolute:
# absolute-ify whatever the URL is
return urllib.parse.urljoin(flask.request.url, path)
return path | 0.00274 |
def spl(self):
"""
Sound Pressure Level - defined as 20 * log10(p/p0),
where p is the RMS of the sound wave in Pascals and p0 is
20 micro Pascals.
Since we would need to know calibration information about the
microphone used to record the sound in order to transform
the PCM values of this audiosegment into Pascals, we can't really
give an accurate SPL measurement.
However, we can give a reasonable guess that can certainly be used
to compare two sounds taken from the same microphone set up.
Be wary about using this to compare sounds taken under different recording
conditions however, except as a simple approximation.
Returns a scalar float representing the dB SPL of this audiosegment.
"""
arr = self.to_numpy_array()
if len(arr) == 0:
return 0.0
else:
rms = self.rms
ratio = rms / P_REF_PCM
return 20.0 * np.log10(ratio + 1E-9) | 0.002933 |
def build_intent(self, intent_name):
"""Builds an Intent object of the given name"""
# TODO: contexts
is_fallback = self.assist._intent_fallbacks[intent_name]
contexts = self.assist._required_contexts[intent_name]
events = self.assist._intent_events[intent_name]
new_intent = Intent(intent_name, fallback_intent=is_fallback, contexts=contexts, events=events)
self.build_action(new_intent)
self.build_user_says(new_intent) # TODO
return new_intent | 0.00578 |
def parse_args_and_kwargs(self, cmdline):
'''
cmdline: list
returns tuple of: args (list), kwargs (dict)
'''
# Parse args and kwargs
args = []
kwargs = {}
if len(cmdline) > 1:
for item in cmdline[1:]:
if '=' in item:
(key, value) = item.split('=', 1)
kwargs[key] = value
else:
args.append(item)
return (args, kwargs) | 0.004049 |
def render_profile_data(self, as_parsed):
"""Render the chosen profile entry, as it was parsed."""
try:
return deep_map(self._render_profile_data, as_parsed)
except RecursionException:
raise DbtProfileError(
'Cycle detected: Profile input has a reference to itself',
project=as_parsed
) | 0.005291 |
def dirs(self, before=time.time(), exited=True):
"""
Provider a generator of container state directories.
If exited is None, all are returned. If it is False, unexited
containers are returned. If it is True, only exited containers are
returned.
"""
timestamp = iso(before)
root = os.path.join(self.root, "start-time")
os.chdir(root)
by_t = (d for d in glob.iglob("????-??-??T*.*Z") if d < timestamp)
if exited is None:
def predicate(directory):
return True
else:
def predicate(directory):
exit = os.path.join(directory, "exit")
return os.path.exists(exit) is exited
return (os.path.join(root, d) for d in by_t if predicate(d)) | 0.002491 |
def make_cookie_content(name, load, sign_key, domain=None, path=None,
timestamp="", enc_key=None, max_age=0,
sign_alg='SHA256'):
"""
Create and return a cookies content
If you only provide a `seed`, a HMAC gets added to the cookies value
and this is checked, when the cookie is parsed again.
If you provide both `seed` and `enc_key`, the cookie gets protected
by using AEAD encryption. This provides both a MAC over the whole cookie
and encrypts the `load` in a single step.
The `seed` and `enc_key` parameters should be byte strings of at least
16 bytes length each. Those are used as cryptographic keys.
:param name: Cookie name
:type name: text
:param load: Cookie load
:type load: text
:param sign_key: A sign_key key for payload signing
:type sign_key: A :py:class:`cryptojwt.jwk.hmac.SYMKey` instance
:param domain: The domain of the cookie
:param path: The path specification for the cookie
:param timestamp: A time stamp
:type timestamp: text
:param enc_key: The key to use for payload encryption.
:type enc_key: A :py:class:`cryptojwt.jwk.hmac.SYMKey` instance
:param max_age: The time in seconds for when a cookie will be deleted
:type max_age: int
:return: A SimpleCookie instance
"""
if not timestamp:
timestamp = str(int(time.time()))
_cookie_value = sign_enc_payload(load, timestamp, sign_key=sign_key,
enc_key=enc_key, sign_alg=sign_alg)
content = {name: {"value": _cookie_value}}
if path is not None:
content[name]["path"] = path
if domain is not None:
content[name]["domain"] = domain
content[name]['httponly'] = True
if max_age:
content[name]["expires"] = in_a_while(seconds=max_age)
return content | 0.000534 |
def ms_bot_framework(self) -> dict:
"""Returns MS Bot Framework compatible state of the Button instance.
Creates MS Bot Framework CardAction (button) with postBack value return.
Returns:
control_json: MS Bot Framework representation of Button state.
"""
card_action = {}
card_action['type'] = 'postBack'
card_action['title'] = self.name
card_action['value'] = self.callback = self.callback
return card_action | 0.006073 |
def __parse(self, stream):
"""Parse Sorting Hat stream"""
if not stream:
raise InvalidFormatError(cause="stream cannot be empty or None")
json = self.__load_json(stream)
self.__parse_organizations(json)
self.__parse_identities(json)
self.__parse_blacklist(json) | 0.006173 |
def Ribbon(line1, line2, c="m", alpha=1, res=(200, 5)):
"""Connect two lines to generate the surface inbetween.
.. hint:: |ribbon| |ribbon.py|_
"""
if isinstance(line1, Actor):
line1 = line1.coordinates()
if isinstance(line2, Actor):
line2 = line2.coordinates()
ppoints1 = vtk.vtkPoints() # Generate the polyline1
ppoints1.SetData(numpy_to_vtk(line1, deep=True))
lines1 = vtk.vtkCellArray()
lines1.InsertNextCell(len(line1))
for i in range(len(line1)):
lines1.InsertCellPoint(i)
poly1 = vtk.vtkPolyData()
poly1.SetPoints(ppoints1)
poly1.SetLines(lines1)
ppoints2 = vtk.vtkPoints() # Generate the polyline2
ppoints2.SetData(numpy_to_vtk(line2, deep=True))
lines2 = vtk.vtkCellArray()
lines2.InsertNextCell(len(line2))
for i in range(len(line2)):
lines2.InsertCellPoint(i)
poly2 = vtk.vtkPolyData()
poly2.SetPoints(ppoints2)
poly2.SetLines(lines2)
# build the lines
lines1 = vtk.vtkCellArray()
lines1.InsertNextCell(poly1.GetNumberOfPoints())
for i in range(poly1.GetNumberOfPoints()):
lines1.InsertCellPoint(i)
polygon1 = vtk.vtkPolyData()
polygon1.SetPoints(ppoints1)
polygon1.SetLines(lines1)
lines2 = vtk.vtkCellArray()
lines2.InsertNextCell(poly2.GetNumberOfPoints())
for i in range(poly2.GetNumberOfPoints()):
lines2.InsertCellPoint(i)
polygon2 = vtk.vtkPolyData()
polygon2.SetPoints(ppoints2)
polygon2.SetLines(lines2)
mergedPolyData = vtk.vtkAppendPolyData()
mergedPolyData.AddInputData(polygon1)
mergedPolyData.AddInputData(polygon2)
mergedPolyData.Update()
rsf = vtk.vtkRuledSurfaceFilter()
rsf.CloseSurfaceOff()
rsf.SetRuledModeToResample()
rsf.SetResolution(res[0], res[1])
rsf.SetInputData(mergedPolyData.GetOutput())
rsf.Update()
actor = Actor(rsf.GetOutput(), c=c, alpha=alpha)
settings.collectable_actors.append(actor)
return actor | 0.000502 |
def as_url(cls, api=None, name_prefix='', url_prefix=''):
""" Generate url for resource.
:return RegexURLPattern: Django URL
"""
url_prefix = url_prefix and "%s/" % url_prefix
name_prefix = name_prefix and "%s-" % name_prefix
url_regex = '^%s%s/?$' % (
url_prefix, cls._meta.url_regex.lstrip('^').rstrip('/$'))
url_regex = url_regex.replace('//', '/')
url_name = '%s%s' % (name_prefix, cls._meta.url_name)
return url(url_regex, cls.as_view(api=api), name=url_name) | 0.003623 |
def __to_file(self, message_no):
""" Write a single message to file """
filename = self.__create_file_name(message_no)
try:
with codecs.open(filename, mode='w',
encoding=self.messages[message_no].encoding)\
as file__:
file__.write(self.messages[message_no].output)
except IOError as excep:
print 'Unable for open the file \'{0}\' for writing. The '\
'following exception was raised:'.format(filename)
print excep
print 'Exiting!'
sys.exit(2)
return filename | 0.003125 |
def create_session(self):
"""Create a session on the frontier silicon device."""
req_url = '%s/%s' % (self.__webfsapi, 'CREATE_SESSION')
sid = yield from self.__session.get(req_url, params=dict(pin=self.pin),
timeout = self.timeout)
text = yield from sid.text(encoding='utf-8')
doc = objectify.fromstring(text)
return doc.sessionId.text | 0.009346 |
def create_textview(self, wrap_mode=Gtk.WrapMode.WORD_CHAR, justify=Gtk.Justification.LEFT, visible=True, editable=True):
"""
Function creates a text view with wrap_mode
and justification
"""
text_view = Gtk.TextView()
text_view.set_wrap_mode(wrap_mode)
text_view.set_editable(editable)
if not editable:
text_view.set_cursor_visible(False)
else:
text_view.set_cursor_visible(visible)
text_view.set_justification(justify)
return text_view | 0.005464 |
def mydot(A, B):
r"""Dot-product that can handle dense and sparse arrays
Parameters
----------
A : numpy ndarray or scipy sparse matrix
The first factor
B : numpy ndarray or scipy sparse matrix
The second factor
Returns
C : numpy ndarray or scipy sparse matrix
The dot-product of A and B
"""
if issparse(A) :
return A.dot(B)
elif issparse(B):
return (B.T.dot(A.T)).T
else:
return np.dot(A, B) | 0.004107 |
def sort_direction(self):
"""
Return the direction in which the linked table is is sorted by
this column ("asc" or "desc"), or None this column is unsorted.
"""
if self.table._meta.order_by == self.name:
return "asc"
elif self.table._meta.order_by == ("-" + self.name):
return "desc"
else:
return None | 0.005051 |
def is_mastercard(n):
"""Checks if credit card number fits the mastercard format."""
n, length = str(n), len(str(n))
if length >= 16 and length <= 19:
if ''.join(n[:2]) in strings_between(51, 56):
return True
return False | 0.003876 |
def _license_from_classifiers(data):
"""try to get a license from the classifiers"""
classifiers = data.get('classifiers', [])
found_license = None
for c in classifiers:
if c.startswith("License :: OSI Approved :: "):
found_license = c.replace("License :: OSI Approved :: ", "")
return found_license | 0.00295 |
def is_locked(self, key):
"""
Checks the lock for the specified key. If the lock is acquired, it returns ``true``. Otherwise, it returns ``false``.
**Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), the key that is checked for lock
:return: (bool), ``true`` if lock is acquired, ``false`` otherwise.
"""
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_is_locked_codec, key_data, key=key_data) | 0.007474 |
def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None):
""" Helper function for element-wise operation.
The function will perform numpy-like broadcasting if needed and call different functions.
Parameters
--------
lhs : NDArray or numeric value
Left-hand side operand.
rhs : NDArray or numeric value
Right-hand operand,
fn_array : function
Function to be called if both lhs and rhs are of ``NDArray`` type.
fn_scalar : function
Function to be called if both lhs and rhs are numeric values.
lfn_scalar : function
Function to be called if lhs is ``NDArray`` while rhs is numeric value
rfn_scalar : function
Function to be called if lhs is numeric value while rhs is ``NDArray``;
if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar
Returns
--------
NDArray
result array
"""
if isinstance(lhs, numeric_types):
if isinstance(rhs, numeric_types):
return fn_scalar(lhs, rhs)
else:
if rfn_scalar is None:
# commutative function
return lfn_scalar(rhs, float(lhs))
else:
return rfn_scalar(rhs, float(lhs))
elif isinstance(rhs, numeric_types):
return lfn_scalar(lhs, float(rhs))
elif isinstance(rhs, NDArray):
return fn_array(lhs, rhs)
else:
raise TypeError('type %s not supported' % str(type(rhs))) | 0.001976 |
def waveguide(path,
points,
finish,
bend_radius,
number_of_points=0.01,
direction=None,
layer=0,
datatype=0):
'''
Easy waveguide creation tool with absolute positioning.
path : starting `gdspy.Path`
points : coordinates along which the waveguide will travel
finish : end point of the waveguide
bend_radius : radius of the turns in the waveguide
number_of_points : same as in `path.turn`
direction : starting direction
layer : GDSII layer number
datatype : GDSII datatype number
Return `path`.
'''
if direction is not None:
path.direction = direction
axis = 0 if path.direction[1] == 'x' else 1
points.append(finish[(axis + len(points)) % 2])
n = len(points)
if points[0] > (path.x, path.y)[axis]:
path.direction = ['+x', '+y'][axis]
else:
path.direction = ['-x', '-y'][axis]
for i in range(n):
path.segment(
abs(points[i] - (path.x, path.y)[axis]) - bend_radius,
layer=layer,
datatype=datatype)
axis = 1 - axis
if i < n - 1:
goto = points[i + 1]
else:
goto = finish[axis]
if (goto > (path.x, path.y)[axis]) ^ ((path.direction[0] == '+') ^
(path.direction[1] == 'x')):
bend = 'l'
else:
bend = 'r'
path.turn(
bend_radius,
bend,
number_of_points=number_of_points,
layer=layer,
datatype=datatype)
return path.segment(
abs(finish[axis] - (path.x, path.y)[axis]),
layer=layer,
datatype=datatype) | 0.000548 |
def commit(self, commit):
'''
.. seealso:: :attr:`commit`
'''
c = self.commit
if c:
if not commit:
commit = c[0]
if commit in c:
self._checkout(treeish=commit) | 0.007813 |
def cluster(args):
"""
%prog cluster prefix fastqfiles
Use `vsearch` to remove duplicate reads. This routine is heavily influenced
by PyRAD: <https://github.com/dereneaton/pyrad>.
"""
p = OptionParser(cluster.__doc__)
add_consensus_options(p)
p.set_align(pctid=95)
p.set_outdir()
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
prefix = args[0]
fastqfiles = args[1:]
cpus = opts.cpus
pctid = opts.pctid
mindepth = opts.mindepth
minlength = opts.minlength
fastafile, qualfile = fasta(fastqfiles + ["--seqtk",
"--outdir={0}".format(opts.outdir),
"--outfile={0}".format(prefix + ".fasta")])
prefix = op.join(opts.outdir, prefix)
pf = prefix + ".P{0}".format(pctid)
derepfile = prefix + ".derep"
if need_update(fastafile, derepfile):
derep(fastafile, derepfile, minlength, cpus)
userfile = pf + ".u"
notmatchedfile = pf + ".notmatched"
if need_update(derepfile, userfile):
cluster_smallmem(derepfile, userfile, notmatchedfile,
minlength, pctid, cpus)
clustfile = pf + ".clust"
if need_update((derepfile, userfile, notmatchedfile), clustfile):
makeclust(derepfile, userfile, notmatchedfile, clustfile,
mindepth=mindepth)
clustSfile = pf + ".clustS"
if need_update(clustfile, clustSfile):
parallel_musclewrap(clustfile, cpus)
statsfile = pf + ".stats"
if need_update(clustSfile, statsfile):
makestats(clustSfile, statsfile, mindepth=mindepth) | 0.001197 |
def prepare_communication (self):
"""
Find the subdomain rank (tuple) for each processor and
determine the neighbor info.
"""
nsd_ = self.nsd
if nsd_<1:
print('Number of space dimensions is %d, nothing to do' %nsd_)
return
self.subd_rank = [-1,-1,-1]
self.subd_lo_ix = [-1,-1,-1]
self.subd_hi_ix = [-1,-1,-1]
self.lower_neighbors = [-1,-1,-1]
self.upper_neighbors = [-1,-1,-1]
num_procs = self.num_procs
my_id = self.my_id
num_subds = 1
for i in range(nsd_):
num_subds = num_subds*self.num_parts[i]
if my_id==0:
print("# subds=", num_subds)
# should check num_subds againt num_procs
offsets = [1, 0, 0]
# find the subdomain rank
self.subd_rank[0] = my_id%self.num_parts[0]
if nsd_>=2:
offsets[1] = self.num_parts[0]
self.subd_rank[1] = my_id/offsets[1]
if nsd_==3:
offsets[1] = self.num_parts[0]
offsets[2] = self.num_parts[0]*self.num_parts[1]
self.subd_rank[1] = (my_id%offsets[2])/self.num_parts[0]
self.subd_rank[2] = my_id/offsets[2]
print("my_id=%d, subd_rank: "%my_id, self.subd_rank)
if my_id==0:
print("offsets=", offsets)
# find the neighbor ids
for i in range(nsd_):
rank = self.subd_rank[i]
if rank>0:
self.lower_neighbors[i] = my_id-offsets[i]
if rank<self.num_parts[i]-1:
self.upper_neighbors[i] = my_id+offsets[i]
k = self.global_num_cells[i]/self.num_parts[i]
m = self.global_num_cells[i]%self.num_parts[i]
ix = rank*k+max(0,rank+m-self.num_parts[i])
self.subd_lo_ix[i] = ix
ix = ix+k
if rank>=(self.num_parts[i]-m):
ix = ix+1 # load balancing
if rank<self.num_parts[i]-1:
ix = ix+1 # one cell of overlap
self.subd_hi_ix[i] = ix
print("subd_rank:",self.subd_rank,\
"lower_neig:", self.lower_neighbors, \
"upper_neig:", self.upper_neighbors)
print("subd_rank:",self.subd_rank,"subd_lo_ix:", self.subd_lo_ix, \
"subd_hi_ix:", self.subd_hi_ix) | 0.016367 |
def disable(self):
"""
Disable the plugin.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
self.client.api.disable_plugin(self.name)
self.reload() | 0.00738 |
def load_data(self, data_np):
"""
Load raw numpy data into the viewer.
"""
image = AstroImage.AstroImage(logger=self.logger)
image.set_data(data_np)
self.set_image(image) | 0.009132 |
def tilt_axes(self):
"""The current tilt along the (X, Y) axes of the tablet's
current logical orientation, in degrees off the tablet's Z axis
and whether they have changed in this event.
That is, if the tool is perfectly orthogonal to the tablet,
the tilt angle is 0. When the top tilts towards the logical top/left
of the tablet, the x/y tilt angles are negative, if the top tilts
towards the logical bottom/right of the tablet, the x/y tilt angles
are positive.
If these axes do not exist on the current tool, this property returns
((0, 0), :obj:`False`).
Returns:
((float, float), bool): The current value of the axes in degrees
and whether it has changed.
"""
tilt_x = self._libinput.libinput_event_tablet_tool_get_tilt_x(
self._handle)
tilt_y = self._libinput.libinput_event_tablet_tool_get_tilt_y(
self._handle)
x_changed = self._libinput. \
libinput_event_tablet_tool_tilt_x_has_changed(self._handle)
y_changed = self._libinput. \
libinput_event_tablet_tool_tilt_y_has_changed(self._handle)
return (tilt_x, tilt_y), x_changed or y_changed | 0.022686 |
def intersect_ranges(self, starts, stops):
"""Intersect with a set of ranges.
Parameters
----------
starts : array_like, int
Range start values.
stops : array_like, int
Range stop values.
Returns
-------
idx : SortedIndex
Examples
--------
>>> import allel
>>> import numpy as np
>>> idx = allel.SortedIndex([3, 6, 11, 20, 35])
>>> ranges = np.array([[0, 2], [6, 17], [12, 15], [31, 35],
... [100, 120]])
>>> starts = ranges[:, 0]
>>> stops = ranges[:, 1]
>>> idx.intersect_ranges(starts, stops)
<SortedIndex shape=(3,) dtype=int64>
[6, 11, 35]
"""
loc = self.locate_ranges(starts, stops, strict=False)
return self.compress(loc, axis=0) | 0.002301 |
def stop(self):
"""
::
POST /:login/machines/:id?action=stop
Initiate shutdown of the remote machine.
"""
action = {'action': 'stop'}
j, r = self.datacenter.request('POST', self.path, params=action)
r.raise_for_status() | 0.013115 |
def export(self, class_name, method_name, **kwargs):
"""
Port a trained estimator to the syntax of a chosen programming language.
Parameters
----------
:param class_name : string
The name of the class in the returned result.
:param method_name : string
The name of the method in the returned result.
Returns
-------
:return : string
The transpiled algorithm with the defined placeholders.
"""
# Arguments:
self.class_name = class_name
self.method_name = method_name
# Estimator:
est = self.estimator
self.hidden_activation = est.activation
self.n_layers = est.n_layers_
self.n_hidden_layers = est.n_layers_ - 2
self.n_inputs = len(est.coefs_[0])
self.n_outputs = est.n_outputs_
self.hidden_layer_sizes = est.hidden_layer_sizes
if isinstance(self.hidden_layer_sizes, int):
self.hidden_layer_sizes = [self.hidden_layer_sizes]
self.hidden_layer_sizes = list(self.hidden_layer_sizes)
self.layer_units = \
[self.n_inputs] + self.hidden_layer_sizes + [est.n_outputs_]
# Weights:
self.coefficients = est.coefs_
# Bias:
self.intercepts = est.intercepts_
if self.target_method == 'predict':
return self.predict() | 0.002119 |
def move_item(self, item, origin, destination):
"""
Moves an item from one cluster to anoter cluster.
:param item: the item to be moved.
:param origin: the originating cluster.
:param destination: the target cluster.
"""
if self.equality:
item_index = 0
for i, element in enumerate(origin):
if self.equality(element, item):
item_index = i
break
else:
item_index = origin.index(item)
destination.append(origin.pop(item_index)) | 0.003384 |
def serialize_upload(name, storage, url):
"""
Serialize uploaded file by name and storage. Namespaced by the upload url.
"""
if isinstance(storage, LazyObject):
# Unwrap lazy storage class
storage._setup()
cls = storage._wrapped.__class__
else:
cls = storage.__class__
return signing.dumps({
'name': name,
'storage': '%s.%s' % (cls.__module__, cls.__name__)
}, salt=url) | 0.002242 |
def equal(self, line1, line2):
'''
return true if exactly equal or if equal but modified,
otherwise return false
return type: BooleanPlus
'''
eqLine = line1 == line2
if eqLine:
return BooleanPlus(True, False)
else:
unchanged_count = self.count_similar_words(line1, line2)
similarity_percent = (
(2.0 * unchanged_count) /
(len(line1.split()) + len(line2.split()))
)
if similarity_percent >= 0.50:
return BooleanPlus(True, True)
return BooleanPlus(False, False) | 0.003106 |
def _create_graph(self, return_target_sources=None):
"""
Create a DiGraph out of the existing edge map.
:param return_target_sources: Used for making up those missing returns
:returns: A networkx.DiGraph() object
"""
if return_target_sources is None:
# We set it to a defaultdict in order to be consistent with the
# actual parameter.
return_target_sources = defaultdict(list)
cfg = networkx.DiGraph()
# The corner case: add a node to the graph if there is only one block
if len(self._nodes) == 1:
cfg.add_node(self._nodes[next(iter(self._nodes.keys()))])
# Adding edges
for tpl, targets in self._exit_targets.items():
basic_block = self._nodes[tpl] # Cannot fail :)
for ex, jumpkind in targets:
if ex in self._nodes:
target_bbl = self._nodes[ex]
cfg.add_edge(basic_block, target_bbl, jumpkind=jumpkind)
# Add edges for possibly missing returns
if basic_block.addr in return_target_sources:
for src_irsb_key in \
return_target_sources[basic_block.addr]:
cfg.add_edge(self._nodes[src_irsb_key],
basic_block, jumpkind="Ijk_Ret")
else:
# Debugging output
def addr_formalize(addr):
if addr is None:
return "None"
else:
return "%#08x" % addr
s = "(["
for addr in ex[:-1]:
s += addr_formalize(addr) + ", "
s += "] %s)" % addr_formalize(ex[-1])
l.warning("Key %s does not exist.", s)
return cfg | 0.00205 |
def log_uniform(low, high, size:Optional[List[int]]=None)->FloatOrTensor:
"Draw 1 or shape=`size` random floats from uniform dist: min=log(`low`), max=log(`high`)."
res = uniform(log(low), log(high), size)
return exp(res) if size is None else res.exp_() | 0.022642 |
def get_image_registry_url(self, image_name):
"""
Helper function for obtain registry url of image from it's name
:param image_name: str, short name of an image, example:
- conu:0.5.0
:return: str, image registry url, example:
- 172.30.1.1:5000/myproject/conu:0.5.0
"""
c = self._oc_command(["get", "is", image_name,
"--output=jsonpath=\'{ .status.dockerImageRepository }\'"])
try:
internal_registry_name = run_cmd(c, return_output=True)
except subprocess.CalledProcessError as ex:
raise ConuException("oc get is failed: %s" % ex)
logger.info("Image registry url: %s", internal_registry_name)
return internal_registry_name.replace("'", "").replace('"', '') | 0.003676 |
def update_input(filelist, ivmlist=None, removed_files=None):
"""
Removes files flagged to be removed from the input filelist.
Removes the corresponding ivm files if present.
"""
newfilelist = []
if removed_files == []:
return filelist, ivmlist
else:
sci_ivm = list(zip(filelist, ivmlist))
for f in removed_files:
result=[sci_ivm.remove(t) for t in sci_ivm if t[0] == f ]
ivmlist = [el[1] for el in sci_ivm]
newfilelist = [el[0] for el in sci_ivm]
return newfilelist, ivmlist | 0.005319 |
def list_cands(candsfile, threshold=0.):
""" Prints candidate info in time order above some threshold """
loc, prop, d0 = pc.read_candidates(candsfile, snrmin=threshold, returnstate=True)
if 'snr2' in d0['features']:
snrcol = d0['features'].index('snr2')
elif 'snr1' in d0['features']:
snrcol = d0['features'].index('snr1')
dmindcol = d0['featureind'].index('dmind')
if len(loc):
snrs = prop[:, snrcol]
times = pc.int2mjd(d0, loc)
times = times - times[0]
logger.info('Getting candidates...')
logger.info('candnum: loc, SNR, DM (pc/cm3), time (s; rel)')
for i in range(len(loc)):
logger.info("%d: %s, %.1f, %.1f, %.1f" % (i, str(loc[i]), prop[i, snrcol], np.array(d0['dmarr'])[loc[i,dmindcol]], times[i])) | 0.004944 |
def session_from_client_secrets_file(client_secrets_file, scopes, **kwargs):
"""Creates a :class:`requests_oauthlib.OAuth2Session` instance from a
Google-format client secrets file.
Args:
client_secrets_file (str): The path to the `client secrets`_ .json
file.
scopes (Sequence[str]): The list of scopes to request during the
flow.
kwargs: Any additional parameters passed to
:class:`requests_oauthlib.OAuth2Session`
Returns:
Tuple[requests_oauthlib.OAuth2Session, Mapping[str, Any]]: The new
oauthlib session and the validated client configuration.
.. _client secrets:
https://developers.google.com/api-client-library/python/guide
/aaa_client_secrets
"""
with open(client_secrets_file, 'r') as json_file:
client_config = json.load(json_file)
return session_from_client_config(client_config, scopes, **kwargs) | 0.001054 |
def run(self):
""" Spawns the metric reporting thread """
self.thr = threading.Thread(target=self.collect_and_report)
self.thr.daemon = True
self.thr.name = "Instana Metric Collection"
self.thr.start() | 0.008299 |
def tf2zpk(b, a):
"""Return zero, pole, gain (z,p,k) representation from a numerator,
denominator representation of a linear filter.
Convert zero-pole-gain filter parameters to transfer function form
:param ndarray b: numerator polynomial.
:param ndarray a: numerator and denominator polynomials.
:return:
* z : ndarray Zeros of the transfer function.
* p : ndarray Poles of the transfer function.
* k : float System gain.
If some values of b are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
.. doctest::
>>> import scipy.signal
>>> from spectrum.transfer import tf2zpk
>>> [b, a] = scipy.signal.butter(3.,.4)
>>> z, p ,k = tf2zpk(b,a)
.. seealso:: :func:`zpk2tf`
.. note:: wrapper of scipy function tf2zpk
"""
import scipy.signal
z,p,k = scipy.signal.tf2zpk(b, a)
return z,p,k | 0.005192 |
def include_file(filename, lineno, local_first):
""" Performs a file inclusion (#include) in the preprocessor.
Writes down that "filename" was included at the current file,
at line <lineno>.
If local_first is True, then it will first search the file in the
local path before looking for it in the include path chain.
This is used when doing a #include "filename".
"""
global CURRENT_DIR
filename = search_filename(filename, lineno, local_first)
if filename not in INCLUDED.keys():
INCLUDED[filename] = []
if len(CURRENT_FILE) > 0: # Added from which file, line
INCLUDED[filename].append((CURRENT_FILE[-1], lineno))
CURRENT_FILE.append(filename)
CURRENT_DIR = os.path.dirname(filename)
return LEXER.include(filename) | 0.001264 |
def _build_path(self):
'''
Constructs the actual request URL with accompanying query if any.
Returns:
None: But does modify self.path, which contains the final
request path sent to the server.
'''
if not self.path:
self.path = '/'
if self.uri_parameters:
self.path = self.path + ';' + requote_uri(self.uri_parameters)
if self.query:
self.path = (self.path + '?' + self.query)
if self.params:
try:
if self.query:
self.path = self.path + self._dict_to_query(
self.params, base_query=True)
else:
self.path = self.path + self._dict_to_query(self.params)
except AttributeError:
self.path = self.path + '?' + self.params
self.path = requote_uri(self.path)
self.req_url = urlunparse(
(self.scheme, self.host, (self.path or ''), '', '', '')) | 0.001938 |
def add_robot(self, controller):
"""Add a robot controller"""
# connect to the controller
# -> this is to support module robots
controller.on_mode_change(self._on_robot_mode_change)
self.robots.append(controller) | 0.007905 |
def format_meta_lines(cls, meta, labels, offset, **kwargs):
'''Return all information from a given meta dictionary in a list of lines'''
lines = []
# Name and underline
name = meta['package_name']
if 'version' in meta:
name += '-' + meta['version']
if 'custom_location' in kwargs:
name += ' ({loc})'.format(loc=kwargs['custom_location'])
lines.append(name)
lines.append(len(name)*'=')
lines.append('')
# Summary
lines.extend(meta['summary'].splitlines())
lines.append('')
# Description
if meta.get('description', ''):
lines.extend(meta['description'].splitlines())
lines.append('')
# Other metadata
data = []
for item in labels:
if meta.get(item, '') != '': # We want to process False and 0
label = (cls._nice_strings[item] + ':').ljust(offset + 2)
data.append(label + cls._format_field(meta[item]))
lines.extend(data)
return lines | 0.004617 |
def remove_node(cls, cluster_id_label, private_dns, parameters=None):
"""
Add a node to an existing cluster
"""
conn = Qubole.agent(version=Cluster.api_version)
parameters = {} if not parameters else parameters
data = {"private_dns" : private_dns, "parameters" : parameters}
return conn.delete(cls.element_path(cluster_id_label) + "/nodes", data) | 0.00995 |
def generate_log_between_tags(self, older_tag, newer_tag):
"""
Generate log between 2 specified tags.
:param dict older_tag: All issues before this tag's date will be
excluded. May be special value, if new tag is
the first tag. (Means **older_tag** is when
the repo was created.)
:param dict newer_tag: All issues after this tag's date will be
excluded. May be title of unreleased section.
:rtype: str
:return: Generated ready-to-add tag section for newer tag.
"""
filtered_issues, filtered_pull_requests = \
self.filter_issues_for_tags(newer_tag, older_tag)
older_tag_name = older_tag["name"] if older_tag \
else self.detect_since_tag()
if not filtered_issues and not filtered_pull_requests:
# do not generate an unreleased section if it would be empty
return ""
return self.generate_log_for_tag(
filtered_pull_requests, filtered_issues,
newer_tag, older_tag_name) | 0.001733 |
def _print_value(self):
"""Generates the table values."""
for line in range(self.Lines_num):
for col, length in zip(self.Table, self.AttributesLength):
vals = list(col.values())[0]
val = vals[line] if len(vals) != 0 and line < len(vals) else ''
self.StrTable += "| "
self.StrTable += self._pad_string(val, length * 2)
self.StrTable += "|" + '\n'
self._print_divide() | 0.004149 |
def update(self, callback=None, errback=None, **kwargs):
"""
Update scope group configuration. Pass a list of keywords and their values to
update. For the list of keywords available for address configuration, see :attr:`ns1.rest.ipam.Scopegroups.INT_FIELDS` and :attr:`ns1.rest.ipam.Scopegroups.PASSTHRU_FIELDS`
"""
if not self.data:
raise ScopegroupException('Scope Group not loaded')
def success(result, *args):
self.data = result
self.id = result['id']
self.dhcp4 = result['dhcp4']
self.dhcp6 = result['dhcp6']
self.name = result['name']
self.service_group_id = result['service_group_id']
if callback:
return callback(self)
else:
return self
return self._rest.update(self.id, callback=success, errback=errback, **kwargs) | 0.005429 |
def init_search(self):
"""Call the generators to generate the initial architectures for the search."""
if self.verbose:
logger.info("Initializing search.")
for generator in self.generators:
graph = generator(self.n_classes, self.input_shape).generate(
self.default_model_len, self.default_model_width
)
model_id = self.model_count
self.model_count += 1
self.training_queue.append((graph, -1, model_id))
self.descriptors.append(graph.extract_descriptor())
if self.verbose:
logger.info("Initialization finished.") | 0.004573 |
def read_lsm_scaninfo(fh):
"""Read LSM ScanInfo structure from file and return as dict."""
block = {}
blocks = [block]
unpack = struct.unpack
if struct.unpack('<I', fh.read(4))[0] != 0x10000000:
# not a Recording sub block
log.warning('read_lsm_scaninfo: invalid LSM ScanInfo structure')
return block
fh.read(8)
while True:
entry, dtype, size = unpack('<III', fh.read(12))
if dtype == 2:
# ascii
value = bytes2str(stripnull(fh.read(size)))
elif dtype == 4:
# long
value = unpack('<i', fh.read(4))[0]
elif dtype == 5:
# rational
value = unpack('<d', fh.read(8))[0]
else:
value = 0
if entry in TIFF.CZ_LSMINFO_SCANINFO_ARRAYS:
blocks.append(block)
name = TIFF.CZ_LSMINFO_SCANINFO_ARRAYS[entry]
newobj = []
block[name] = newobj
block = newobj
elif entry in TIFF.CZ_LSMINFO_SCANINFO_STRUCTS:
blocks.append(block)
newobj = {}
block.append(newobj)
block = newobj
elif entry in TIFF.CZ_LSMINFO_SCANINFO_ATTRIBUTES:
name = TIFF.CZ_LSMINFO_SCANINFO_ATTRIBUTES[entry]
block[name] = value
elif entry == 0xffffffff:
# end sub block
block = blocks.pop()
else:
# unknown entry
block['Entry0x%x' % entry] = value
if not blocks:
break
return block | 0.000645 |
def update(self, resource, timeout=-1):
"""
Updates a Logical Switch.
Args:
resource (dict): Object to update.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView, just stop waiting for its completion.
Returns:
dict: Updated resource.
"""
self.__set_default_values(resource)
uri = self._client.build_uri(resource['logicalSwitch']['uri'])
return self._client.update(resource, uri=uri, timeout=timeout) | 0.005 |
def filter_skiplines(code, errors):
"""Filter lines by `noqa`.
:return list: A filtered errors
"""
if not errors:
return errors
enums = set(er.lnum for er in errors)
removed = set([
num for num, l in enumerate(code.split('\n'), 1)
if num in enums and SKIP_PATTERN(l)
])
if removed:
errors = [er for er in errors if er.lnum not in removed]
return errors | 0.002353 |
def to_text(self):
"""Render a Text MessageElement as plain text.
:returns: The plain text representation of the Text MessageElement.
:rtype: basestring
"""
if self.items is None:
return
else:
text = ''
for item in self.items:
text += ' - %s\n' % item.to_text()
return text | 0.005168 |
def is_applicable(cls, conf):
"""Return whether this promoter is applicable for given conf"""
return all((
URLPromoter.is_applicable(conf),
not cls.needs_firefox(conf),
)) | 0.009132 |
def _get_baremetal_connections(self, port,
only_active_switch=False,
from_segment=False):
"""Get switch ips and interfaces from baremetal transaction.
This method is used to extract switch/interface
information from transactions where VNIC_TYPE is
baremetal.
:param port: Received port transaction
:param only_active_switch: Indicator for selecting
connections with switches that are active
:param from_segment: only return interfaces from the
segment/transaction as opposed to
say port channels which are learned.
:Returns: list of switch_ip, intf_type, port_id, is_native
"""
connections = []
is_native = False if self.trunk.is_trunk_subport(port) else True
all_link_info = port[bc.portbindings.PROFILE]['local_link_information']
for link_info in all_link_info:
# Extract port info
intf_type, port = nexus_help.split_interface_name(
link_info['port_id'])
# Determine if this switch is to be skipped
switch_info = self._get_baremetal_switch_info(
link_info)
if not switch_info:
continue
switch_ip = switch_info['switch_ip']
# If not for Nexus
if not self._switch_defined(switch_ip):
continue
# Requested connections for only active switches
if (only_active_switch and
not self.is_switch_active(switch_ip)):
continue
ch_grp = 0
if not from_segment:
try:
reserved = nxos_db.get_switch_if_host_mappings(
switch_ip,
nexus_help.format_interface_name(
intf_type, port))
if reserved[0].ch_grp > 0:
ch_grp = reserved[0].ch_grp
intf_type, port = nexus_help.split_interface_name(
'', ch_grp)
except excep.NexusHostMappingNotFound:
pass
connections.append((switch_ip, intf_type, port,
is_native, ch_grp))
return connections | 0.00206 |
def component_type(self):
"""
Returns the classname of the elements.
:return: the class of the elements
:rtype: str
"""
cls = javabridge.call(self.jobject, "getClass", "()Ljava/lang/Class;")
comptype = javabridge.call(cls, "getComponentType", "()Ljava/lang/Class;")
return javabridge.call(comptype, "getName", "()Ljava/lang/String;") | 0.007538 |
def lstrip(self, chars=None):
""" Like str.lstrip, except it returns the Colr instance. """
return self.__class__(
self._str_strip('lstrip', chars),
no_closing=chars and (closing_code in chars),
) | 0.008197 |
def acceptCompletion( self ):
"""
Accepts the current completion and inserts the code into the edit.
:return <bool> accepted
"""
tree = self._completerTree
if not tree:
return False
tree.hide()
item = tree.currentItem()
if not item:
return False
# clear the previously typed code for the block
cursor = self.textCursor()
text = cursor.block().text()
col = cursor.columnNumber()
end = col
while col:
col -= 1
if text[col] in ('.', ' '):
col += 1
break
# insert the current text
cursor.setPosition(cursor.position() - (end-col), cursor.KeepAnchor)
cursor.removeSelectedText()
self.insertPlainText(item.text(0))
return True | 0.014493 |
def _analyze_state(state: GlobalState):
"""
:param state: the current state
:return: returns the issues for that corresponding state
"""
instruction = state.get_current_instruction()
annotations = cast(
List[MultipleSendsAnnotation],
list(state.get_annotations(MultipleSendsAnnotation)),
)
if len(annotations) == 0:
log.debug("Creating annotation for state")
state.annotate(MultipleSendsAnnotation())
annotations = cast(
List[MultipleSendsAnnotation],
list(state.get_annotations(MultipleSendsAnnotation)),
)
calls = annotations[0].calls
if instruction["opcode"] in ["CALL", "DELEGATECALL", "STATICCALL", "CALLCODE"]:
call = get_call_from_state(state)
if call:
calls += [call]
else: # RETURN or STOP
if len(calls) > 1:
description_tail = (
"Consecutive calls are executed at the following bytecode offsets:\n"
)
for call in calls:
description_tail += "Offset: {}\n".format(
call.state.get_current_instruction()["address"]
)
description_tail += (
"Try to isolate each external call into its own transaction,"
" as external calls can fail accidentally or deliberately.\n"
)
issue = Issue(
contract=state.environment.active_account.contract_name,
function_name=state.environment.active_function_name,
address=instruction["address"],
swc_id=MULTIPLE_SENDS,
bytecode=state.environment.code.bytecode,
title="Multiple Calls in a Single Transaction",
severity="Medium",
description_head="Multiple sends are executed in one transaction.",
description_tail=description_tail,
gas_used=(state.mstate.min_gas_used, state.mstate.max_gas_used),
)
return [issue]
return [] | 0.002406 |
def form_valid(self, form):
"""After the form is valid lets let people know"""
ret = super(ProjectCopy, self).form_valid(form)
self.copy_relations()
# Good to make note of that
messages.add_message(self.request, messages.SUCCESS, 'Project %s copied' % self.object.name)
return ret | 0.009063 |
def candidates(self):
"""A list of candidate addresses (as dictionaries) from a geocode
operation"""
# convert x['location'] to a point from a json point struct
def cditer():
for candidate in self._json_struct['candidates']:
newcandidate = candidate.copy()
newcandidate['location'] = \
geometry.fromJson(newcandidate['location'])
yield newcandidate
return list(cditer()) | 0.004057 |
def parse(self, contents):
"""Parse the document.
:param contents: The text contents of the document.
:rtype: a *generator* of tokenized text.
"""
i = 0
for text in contents.split(self.delim):
if not len(text.strip()):
continue
words = text.split()
char_offsets = [0] + [
int(_) for _ in np.cumsum([len(x) + 1 for x in words])[:-1]
]
text = " ".join(words)
yield {
"text": text,
"words": words,
"pos_tags": [""] * len(words),
"ner_tags": [""] * len(words),
"lemmas": [""] * len(words),
"dep_parents": [0] * len(words),
"dep_labels": [""] * len(words),
"char_offsets": char_offsets,
"abs_char_offsets": char_offsets,
}
i += 1 | 0.002116 |
def _on_process_error(self, error):
"""
Display child process error in the text edit.
"""
if self is None:
return
err = PROCESS_ERROR_STRING[error]
self._formatter.append_message(err + '\r\n', output_format=OutputFormat.ErrorMessageFormat) | 0.010033 |
def resolve_pid(fetched_pid):
"""Retrieve the real PID given a fetched PID.
:param pid: fetched PID to resolve.
"""
return PersistentIdentifier.get(
pid_type=fetched_pid.pid_type,
pid_value=fetched_pid.pid_value,
pid_provider=fetched_pid.provider.pid_provider
) | 0.003268 |
def sbot_executable():
"""
Find shoebot executable
"""
gsettings=load_gsettings()
venv = gsettings.get_string('current-virtualenv')
if venv == 'Default':
sbot = which('sbot')
elif venv == 'System':
# find system python
env_venv = os.environ.get('VIRTUAL_ENV')
if not env_venv:
return which('sbot')
# First sbot in path that is not in current venv
for p in os.environ['PATH'].split(os.path.pathsep):
sbot='%s/sbot' % p
if not p.startswith(env_venv) and os.path.isfile(sbot):
return sbot
else:
sbot = os.path.join(venv, 'bin/sbot')
if not os.path.isfile(sbot):
print('Shoebot not found, reverting to System shoebot')
sbot = which('sbot')
return os.path.realpath(sbot) | 0.003563 |
def _countOverlapIndices(self, i, j):
"""
Return the overlap between bucket indices i and j
"""
if self.bucketMap.has_key(i) and self.bucketMap.has_key(j):
iRep = self.bucketMap[i]
jRep = self.bucketMap[j]
return self._countOverlap(iRep, jRep)
else:
raise ValueError("Either i or j don't exist") | 0.017699 |
def _status_code_check(self, response: Dict[str, Any]):
"""检查响应码并进行对不同的响应进行处理.
主要包括:
+ 编码在500~599段为服务异常,直接抛出对应异常
+ 编码在400~499段为调用异常,为对应ID的future设置异常
+ 编码在300~399段为警告,会抛出对应警告
+ 编码在200~399段为执行成功响应,将结果设置给对应ID的future.
+ 编码在100~199段为服务器响应,主要是处理验证响应和心跳响应
Parameters:
response (Dict[str, Any]): - 响应的python字典形式数据
Return:
(bool): - 如果是非服务异常类的响应,那么返回True
"""
code = response.get("CODE")
if self.debug:
print("resv:{}".format(response))
print(code)
if code >= 500:
if self.debug:
print("server error")
return self._server_error_handler(code)
elif 500 > code >= 400:
if self.debug:
print("call method error")
return self._method_error_handler(response)
elif 400 > code >= 200:
if code >= 300:
self._warning_handler(code)
if code in (200, 201, 202, 206, 300, 301):
if self.debug is True:
print("resv resp {}".format(response))
return self._method_response_handler(response)
elif 200 > code >= 100:
return self._server_response_handler(response)
else:
raise MprpcException("unknow status code {}".format(code)) | 0.001444 |
def check_token(token):
''' Verify http header token authentification '''
user = models.User.objects(api_key=token).first()
return user or None | 0.006452 |
def sqlinsert(table, row):
"""Generates SQL insert into table ...
Returns (sql, parameters)
>>> sqlinsert('mytable', {'field1': 2, 'field2': 'toto'})
('insert into mytable (field1, field2) values (%s, %s)', [2, 'toto'])
>>> sqlinsert('t2', {'id': 1, 'name': 'Toto'})
('insert into t2 (id, name) values (%s, %s)', [1, 'Toto'])
"""
validate_name(table)
fields = sorted(row.keys())
validate_names(fields)
values = [row[field] for field in fields]
sql = "insert into {} ({}) values ({})".format(
table, ', '.join(fields), ', '.join(['%s'] * len(fields)))
return sql, values | 0.001587 |
def remap_overlapping_column_names(table_op, root_table, data_columns):
"""Return an ``OrderedDict`` mapping possibly suffixed column names to
column names without suffixes.
Parameters
----------
table_op : TableNode
The ``TableNode`` we're selecting from.
root_table : TableNode
The root table of the expression we're selecting from.
data_columns : set or frozenset
The available columns to select from
Returns
-------
mapping : OrderedDict[str, str]
A map from possibly-suffixed column names to column names without
suffixes.
"""
if not isinstance(table_op, ops.Join):
return None
left_root, right_root = ops.distinct_roots(table_op.left, table_op.right)
suffixes = {
left_root: constants.LEFT_JOIN_SUFFIX,
right_root: constants.RIGHT_JOIN_SUFFIX,
}
column_names = [
({name, name + suffixes[root_table]} & data_columns, name)
for name in root_table.schema.names
]
mapping = OrderedDict(
(first(col_name), final_name)
for col_name, final_name in column_names
if col_name
)
return mapping | 0.000851 |
def to_dict(self):
"""Serialize the model into a Python dict of simple types.
Note that this method requires that the model is bound with
``@bind_schema``.
"""
try:
data, _ = self.schema.dump(self)
except ValidationError as ex:
raise ModelValidationError(
ex.messages, ex.field_names, ex.fields, ex.data, **ex.kwargs) from None
return data | 0.006865 |
def touch(self, edited=False):
"""Mark the node as dirty.
Args:
edited (bool): Whether to set the edited time.
"""
self._dirty = True
dt = datetime.datetime.utcnow()
self.timestamps.updated = dt
if edited:
self.timestamps.edited = dt | 0.006369 |
def signed_distance(mesh, points):
"""
Find the signed distance from a mesh to a list of points.
* Points OUTSIDE the mesh will have NEGATIVE distance
* Points within tol.merge of the surface will have POSITIVE distance
* Points INSIDE the mesh will have POSITIVE distance
Parameters
-----------
mesh : Trimesh object
points : (n,3) float, list of points in space
Returns
----------
signed_distance : (n,3) float, signed distance from point to mesh
"""
# make sure we have a numpy array
points = np.asanyarray(points, dtype=np.float64)
# find the closest point on the mesh to the queried points
closest, distance, triangle_id = closest_point(mesh, points)
# we only care about nonzero distances
nonzero = distance > tol.merge
if not nonzero.any():
return distance
inside = mesh.ray.contains_points(points[nonzero])
sign = (inside.astype(int) * 2) - 1
# apply sign to previously computed distance
distance[nonzero] *= sign
return distance | 0.000947 |
def visit_Name(self, node):
"""
Return dependencies for given variable.
It have to be register first.
"""
if node.id in self.naming:
return self.naming[node.id]
elif node.id in self.global_declarations:
return [frozenset([self.global_declarations[node.id]])]
elif isinstance(node.ctx, ast.Param):
deps = [frozenset()]
self.naming[node.id] = deps
return deps
else:
raise PythranInternalError("Variable '{}' use before assignment"
"".format(node.id)) | 0.003205 |
def _parse_keys(row, line_num):
""" Perform some sanity checks on they keys
Each key in the row should not be named None cause
(that's an overrun). A key named `type` MUST be
present on the row & have a string value.
:param row: dict
:param line_num: int
"""
link = 'tools.ietf.org/html/rfc4180#section-2'
none_keys = [key for key in row.keys() if key is None]
if none_keys:
fail('You have more fields defined on row number {} '
'than field headers in your CSV data. Please fix '
'your request body.'.format(line_num), link)
elif not row.get('type'):
fail('Row number {} does not have a type value defined. '
'Please fix your request body.'.format(line_num), link) | 0.002401 |
def replace_note(self, player, text):
"""Replace note text with text. (Overwrites previous note!)"""
note = self._find_note(player)
note.text = text | 0.011628 |
def cancel(self):
'''
Cancel a running workflow.
Args:
None
Returns:
None
'''
if not self.id:
raise WorkflowError('Workflow is not running. Cannot cancel.')
if self.batch_values:
self.workflow.batch_workflow_cancel(self.id)
else:
self.workflow.cancel(self.id) | 0.005168 |
def _update_project(self, request, data):
"""Update project info"""
domain_id = identity.get_domain_id_for_operation(request)
try:
project_id = data['project_id']
# add extra information
if keystone.VERSIONS.active >= 3:
EXTRA_INFO = getattr(settings, 'PROJECT_TABLE_EXTRA_INFO', {})
kwargs = dict((key, data.get(key)) for key in EXTRA_INFO)
else:
kwargs = {}
return api.keystone.tenant_update(
request,
project_id,
name=data['name'],
description=data['description'],
enabled=data['enabled'],
domain=domain_id,
**kwargs)
except exceptions.Conflict:
msg = _('Project name "%s" is already used.') % data['name']
self.failure_message = msg
return
except Exception as e:
LOG.debug('Project update failed: %s', e)
exceptions.handle(request, ignore=True)
return | 0.001837 |
def _get_option(config, supplement, section, option, fallback=None):
"""
Reads an option for a configuration file.
:param configparser.ConfigParser config: The main config file.
:param configparser.ConfigParser supplement: The supplement config file.
:param str section: The name of the section op the option.
:param str option: The name of the option.
:param str|None fallback: The fallback value of the option if it is not set in either configuration files.
:rtype: str
:raise KeyError:
"""
if supplement:
return_value = supplement.get(section, option, fallback=config.get(section, option, fallback=fallback))
else:
return_value = config.get(section, option, fallback=fallback)
if fallback is None and return_value is None:
raise KeyError("Option '{0!s}' is not found in section '{1!s}'.".format(option, section))
return return_value | 0.006073 |
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
try:
re_pat = _cache[pat]
except KeyError:
res = translate(pat)
if len(_cache) >= _MAXCACHE:
# _cache.clear()
globals()['_cache'] = {}
_cache[pat] = re_pat = re.compile(res)
return re_pat.match(name) is not None | 0.002165 |
def set_exception(self, exception):
"""Set the Future's exception."""
self._exception = exception
self._result_set = True
self._invoke_callbacks(self) | 0.010989 |
def save_object(filename, obj):
"""Compresses and pickles given object to the given filename."""
logging.info('saving {}...'.format(filename))
try:
with gzip.GzipFile(filename, 'wb') as f:
f.write(pickle.dumps(obj, 1))
except Exception as e:
logging.error('save failure: {}'.format(e))
raise | 0.002915 |
def index_modules(idx=None, path=None):
"""
Indexes objs from all modules
"""
suppress_output()
modules = defaultdict(list)
pkglist = pkgutil.walk_packages(onerror=lambda x: True)
print(pkglist)
if path:
pkglist = pkgutil.walk_packages(path, onerror=lambda x: True)
for modl, name, ispkg in pkglist:
try:
path = os.path.join(modl.path, name.split('.')[-1])
except AttributeError:
# Triggered on zipimport.zipimporter
continue
if os.path.isdir(path):
path = os.path.join(path, '__init__')
path += '.py'
objs = []
if os.path.exists(path):
try:
objs = read_objs_from_path(path)
except:
continue
elif not re.search(MODULE_BLACKLIST, name):
try:
mod = __import__(name)
objs = [k for k in dir(mod) if not k.startswith('__')]
except:
continue
else:
continue
for obj in objs:
if name not in modules[obj]:
modules[obj].append(name)
suppress_output(True)
return merge_dicts(idx, dict(modules)) | 0.002445 |
def ldap_plugin(self):
""" Configures the LDAP plugin """
name = self._ask_with_default("Authentication method name (will be displayed on the login page)", "LDAP")
prefix = self._ask_with_default("Prefix to append to the username before db storage. Usefull when you have more than one auth method with "
"common usernames.", "")
ldap_host = self._ask_with_default("LDAP Host", "ldap.your.domain.com")
encryption = 'none'
while True:
encryption = self._ask_with_default("Encryption (either 'ssl', 'tls', or 'none')", 'none')
if encryption not in ['none', 'ssl', 'tls']:
self._display_error("Invalid value")
else:
break
base_dn = self._ask_with_default("Base DN", "ou=people,c=com")
request = self._ask_with_default("Request to find a user. '{}' will be replaced by the username", "uid={}")
require_cert = self._ask_boolean("Require certificate validation?", encryption is not None)
return {
"plugin_module": "inginious.frontend.plugins.auth.ldap_auth",
"host": ldap_host,
"encryption": encryption,
"base_dn": base_dn,
"request": request,
"prefix": prefix,
"name": name,
"require_cert": require_cert
} | 0.005022 |
def relaxNGValidateFullElement(self, ctxt, elem):
"""Validate a full subtree when
xmlRelaxNGValidatePushElement() returned 0 and the content
of the node has been expanded. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlRelaxNGValidateFullElement(ctxt__o, self._o, elem__o)
return ret | 0.01559 |
def _bresenham(self, faces, dx):
r'''
A Bresenham line function to generate points to fill in for the fibers
'''
line_points = []
for face in faces:
# Get in hull order
fx = face[:, 0]
fy = face[:, 1]
fz = face[:, 2]
# Find the axis with the smallest spread and remove it to make 2D
if (np.std(fx) < np.std(fy)) and (np.std(fx) < np.std(fz)):
f2d = np.vstack((fy, fz)).T
elif (np.std(fy) < np.std(fx)) and (np.std(fy) < np.std(fz)):
f2d = np.vstack((fx, fz)).T
else:
f2d = np.vstack((fx, fy)).T
hull = sptl.ConvexHull(f2d, qhull_options='QJ Pp')
face = np.around(face[hull.vertices].astype(float), 6)
for i in range(len(face)):
vec = face[i]-face[i-1]
vec_length = np.linalg.norm(vec)
increments = np.ceil(vec_length/dx)
check_p_old = np.array([-1, -1, -1])
for x in np.linspace(0, 1, increments):
check_p_new = face[i-1]+(vec*x)
if np.sum(check_p_new - check_p_old) != 0:
line_points.append(check_p_new)
check_p_old = check_p_new
return np.asarray(line_points) | 0.001474 |
def train(
self,
true_sampler,
generative_model,
discriminative_model,
iter_n=100,
k_step=10
):
'''
Train.
Args:
true_sampler: Sampler which draws samples from the `true` distribution.
generative_model: Generator which draws samples from the `fake` distribution.
discriminative_model: Discriminator which discriminates `true` from `fake`.
iter_n: The number of training iterations.
k_step: The number of learning of the discriminative_model.
Returns:
Tuple data.
- trained Generator which is-a `GenerativeModel`.
- trained Discriminator which is-a `DiscriminativeModel`.
'''
if isinstance(true_sampler, TrueSampler) is False:
raise TypeError("The type of `true_sampler` must be `TrueSampler`.")
if isinstance(generative_model, AutoEncoderModel) is False:
raise TypeError("The type of `generative_model` must be `AutoEncoderModel`.")
if isinstance(discriminative_model, DiscriminativeModel) is False:
raise TypeError("The type of `discriminative_model` must be `DiscriminativeModel`.")
a_logs_list = []
d_logs_list = []
g_logs_list = []
try:
for n in range(iter_n):
self.__logger.debug("-" * 100)
self.__logger.debug("Iterations: (" + str(n+1) + "/" + str(iter_n) + ")")
self.__logger.debug("-" * 100)
self.__logger.debug(
"The `auto_encoder`'s turn."
)
self.__logger.debug("-" * 100)
generative_model, a_logs_list = self.train_auto_encoder(
generative_model,
a_logs_list
)
self.__logger.debug("-" * 100)
self.__logger.debug(
"The `discriminator`'s turn."
)
self.__logger.debug("-" * 100)
discriminative_model, d_logs_list = self.train_discriminator(
k_step,
true_sampler,
generative_model,
discriminative_model,
d_logs_list
)
self.__logger.debug("-" * 100)
self.__logger.debug(
"The `generator`'s turn."
)
self.__logger.debug("-" * 100)
generative_model, g_logs_list = self.train_generator(
generative_model,
discriminative_model,
g_logs_list
)
except KeyboardInterrupt:
print("Keyboard Interrupt.")
self.__logs_tuple = (a_logs_list, d_logs_list, g_logs_list)
return generative_model, discriminative_model | 0.003923 |
def get_digest_keys(self):
"""Returns a list of the type choices"""
digest_keys = []
for col in xrange(self.GetNumberCols()):
digest_key = self.GetCellValue(self.has_header, col)
if digest_key == "":
digest_key = self.digest_types.keys()[0]
digest_keys.append(digest_key)
return digest_keys | 0.005319 |
def aesthetics(cls):
"""
Return a set of all non-computed aesthetics for this stat.
stats should not override this method.
"""
aesthetics = cls.REQUIRED_AES.copy()
calculated = get_calculated_aes(cls.DEFAULT_AES)
for ae in set(cls.DEFAULT_AES) - set(calculated):
aesthetics.add(ae)
return aesthetics | 0.005319 |
def usage_example(phrase, format='json'):
"""Takes the source phrase and queries it to the urbandictionary API
:params phrase: word for which usage_example is to be found
:param format: response structure type. Defaults to: "json"
:returns: returns a json object as str, False if invalid phrase
"""
base_url = Vocabulary.__get_api_link("urbandict")
url = base_url.format(action="define", word=phrase)
word_examples = {}
json_obj = Vocabulary.__return_json(url)
if json_obj:
examples_list = json_obj["list"]
for i, example in enumerate(examples_list):
if example["thumbs_up"] > example["thumbs_down"]:
word_examples[i] = example["example"].replace("\r", "").replace("\n", "")
if word_examples:
# reforamatting "word_examples" using "__clean_dict()"
# return json.dumps(Vocabulary.__clean_dict(word_examples))
# return Vocabulary.__clean_dict(word_examples)
return Response().respond(Vocabulary.__clean_dict(word_examples), format)
else:
return False
else:
return False | 0.003247 |
def daily3D(inst, bin1, label1, bin2, label2, bin3, label3,
data_label, gate, returnBins=False):
"""3D Daily Occurrence Probability of data_label > gate over a season.
If data_label is greater than gate atleast once per day,
then a 100% occurrence probability results. Season delineated by
the bounds attached to Instrument object.
Prob = (# of times with at least one hit)/(# of times in bin)
Parameters
----------
inst: pysat.Instrument()
Instrument to use for calculating occurrence probability
binx: list
[min, max, number of bins]
labelx: string
name for data product for binx
data_label: list of strings
identifies data product(s) to calculate occurrence probability
gate: list of values
values that data_label must achieve to be counted as an occurrence
returnBins: Boolean
if True, return arrays with values of bin edges, useful for pcolor
Returns
-------
occur_prob : dictionary
A dict of dicts indexed by data_label. Each entry is dict with entries
'prob' for the probability and 'count' for the number of days with any
data; 'bin_x', 'bin_y', and 'bin_z' are also returned if requested. Note
that arrays are organized for direct plotting, z,y,x.
Note
----
Season delineated by the bounds attached to Instrument object.
"""
return _occurrence3D(inst, bin1, label1, bin2, label2, bin3, label3,
data_label, gate, returnBins=returnBins, by_orbit=False) | 0.009375 |
def comment_import(r, unused_dot_list):
"""Comment out import for {dot_str}."""
unused_dot_str = ".".join(unused_dot_list)
for n in r("ImportNode"):
if n.names()[0] == unused_dot_str:
# The "!" is inserted so that this line doesn't show up when searching for
# the comment pattern in code.
n.replace("#{}# {}".format("!", str(n)))
break | 0.00495 |
async def get_schemes(self) -> List[Scheme]:
"""Return supported uri schemes."""
return [
Scheme.make(**x)
for x in await self.services["avContent"]["getSchemeList"]()
] | 0.009217 |
def create_db(self, instance_name, instance_type,
admin_username, admin_password, security_groups=None, db_name=None,
storage_size_gb=DEFAULT_STORAGE_SIZE_GB,
timeout_s=DEFAULT_TIMEOUT_S):
"""
Creates a database instance.
This method blocks until the db instance is active, or until
:attr:`timeout_s` has elapsed.
By default, hpcloud *assigns* an automatically-generated set of
credentials for an admin user. In addition to launching the db
instance, this method uses the autogenerated credentials to login to
the server and create the intended admin user based on the credentials
supplied as method arguments.
:param str instance_name: A name to assign to the db instance.
:param str instance_type: The server instance type (e.g. ``medium``).
:param str admin_username: The admin username.
:param str admin_password: The admin password.
:param security_groups: *Not used in hpcloud*.
:param str db_name: The database name. If this is not specified, the
database will be named the same as the :attr:`instance_name`.
:param int storage_size_gb: The size of the storage volume in GB.
:param float timeout_s: The number of seconds to poll for an active
database server before failing. This value is also used when
attempting to connect to the running mysql server.
:rtype: :class:`dict`
"""
db = self._create_db(instance_name, instance_type,
storage_size_gb)
# hang on to these... hpcloud only provides a way to generate a new
# set of username/password - there is no way to retrieve the originals.
default_creds = db.credential
log.debug('Credentials for %s: %s' % (instance_name, default_creds))
instance = self._poll_instance_status(db, timeout_s)
# we're taking advantage of a security bug in hpcloud's dbaas security
# group rules. the default *security* is to allow connections from
# everywhere in the world.
def connect():
try:
return pymysql.connect(
host=instance.hostname,
port=instance.port,
# db=self.database,
user=default_creds['username'],
passwd=default_creds['password'],
connect_timeout=timeout_s,
)
except:
log.warn("Could not connect to db, %s" % instance_name)
# log.debug("Connection exception", exc_info=True)
log.info("Connecting to %s..." % instance_name)
db = poll_with_timeout(timeout_s, connect, 10)
cur = db.cursor()
cur.execute(
"grant all privileges on *.* "
"to '%s'@'%%' identified by '%s' "
"with grant option"
% (admin_username, admin_password)
)
cur.execute("flush privileges")
return db_to_dict(instance) | 0.002218 |
def print_logins(logins):
"""Prints out the login history for a user"""
table = formatting.Table(['Date', 'IP Address', 'Successufl Login?'])
for login in logins:
table.add_row([login.get('createDate'), login.get('ipAddress'), login.get('successFlag')])
return table | 0.006897 |
def start_optimisation(self, rounds: int, max_angle: float,
max_distance: float, temp: float=298.15,
stop_when=None, verbose=None):
"""Starts the loop fitting protocol.
Parameters
----------
rounds : int
The number of Monte Carlo moves to be evaluated.
max_angle : float
The maximum variation in rotation that can moved per
step.
max_distance : float
The maximum distance the can be moved per step.
temp : float, optional
Temperature used during fitting process.
stop_when : float, optional
Stops fitting when energy is less than or equal to this value.
"""
self._generate_initial_score()
self._mmc_loop(rounds, max_angle, max_distance, temp=temp,
stop_when=stop_when, verbose=verbose)
return | 0.006397 |
def write(self, fptr):
"""Write a Data Reference box to file.
"""
self._write_validate()
# Very similar to the way a superbox is written.
orig_pos = fptr.tell()
fptr.write(struct.pack('>I4s', 0, b'dtbl'))
# Write the number of data entry url boxes.
write_buffer = struct.pack('>H', len(self.DR))
fptr.write(write_buffer)
for box in self.DR:
box.write(fptr)
end_pos = fptr.tell()
fptr.seek(orig_pos)
fptr.write(struct.pack('>I', end_pos - orig_pos))
fptr.seek(end_pos) | 0.003361 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.