text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _set_igmps_prefix_list(self, v, load=False):
"""
Setter method for igmps_prefix_list, mapped from YANG variable /igmp_snooping/ip/igmp/ssm_map/igmps_prefix_list (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_igmps_prefix_list is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_igmps_prefix_list() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("igmps_prefix_list_name igmps_prefix_src_addr",igmps_prefix_list.igmps_prefix_list, yang_name="igmps-prefix-list", rest_name="igmps-prefix-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='igmps-prefix-list-name igmps-prefix-src-addr', extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IgmpsPrefixList', u'cli-suppress-mode': None, u'cli-suppress-list-no': None}}), is_container='list', yang_name="igmps-prefix-list", rest_name="igmps-prefix-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IgmpsPrefixList', u'cli-suppress-mode': None, u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """igmps_prefix_list must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("igmps_prefix_list_name igmps_prefix_src_addr",igmps_prefix_list.igmps_prefix_list, yang_name="igmps-prefix-list", rest_name="igmps-prefix-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='igmps-prefix-list-name igmps-prefix-src-addr', extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IgmpsPrefixList', u'cli-suppress-mode': None, u'cli-suppress-list-no': None}}), is_container='list', yang_name="igmps-prefix-list", rest_name="igmps-prefix-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IgmpsPrefixList', u'cli-suppress-mode': None, u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='list', is_config=True)""",
})
self.__igmps_prefix_list = t
if hasattr(self, '_set'):
self._set() | 0.004064 |
def validate(self):
"""
Asserts that every declared :term:`specification` can actually be
realized, meaning that all of its :term:`dependencies <dependency>` are
present and there are no self-dependencies or :term:`dependency cycles
<dependency cycle>`. If such a problem is found, a proper exception
(deriving from :py:class:`GraphValidationError`) is raised.
:raises:
:py:exc:`MissingDependencyError`,
:py:exc:`SelfDependencyError`,
:py:exc:`DependencyCycleError`
"""
# This method uses Tarjan's strongly connected components algorithm
# with added self-dependency check to find dependency cyclces.
# Index is just an integer, it's wrapped in a list as a workaround for
# Python 2's lack of `nonlocal` keyword, so the nested
# `strongconnect()` may modify it.
index = [0]
indices = {}
lowlinks = {}
stack = []
def strongconnect(specification):
# Set the depth index for the node to the smallest unused index.
indices[specification] = index[0]
lowlinks[specification] = index[0]
index[0] += 1
stack.append(specification)
provider = self.providers[specification]
dependencies = six.itervalues(provider.dependencies)
for dependency in dependencies:
if isinstance(dependency, Factory):
dependency = dependency.specification
if dependency not in self.providers:
raise MissingDependencyError(specification, dependency)
if dependency == specification:
raise SelfDependencyError(specification)
if dependency not in indices:
# Dependency has not yet been visited; recurse on it.
strongconnect(dependency)
lowlinks[specification] = min(
lowlinks[specification],
lowlinks[dependency]
)
elif dependency in stack:
# Dependency is in stack and hence in the current strongly
# connected component.
lowlinks[specification] = min(
lowlinks[specification],
indices[dependency]
)
if lowlinks[specification] == indices[specification]:
component = []
while True:
component.append(stack.pop())
if component[-1] == specification:
break
if len(component) > 1:
raise DependencyCycleError(reversed(component))
for specification, provider in six.iteritems(self.providers):
if specification not in indices:
strongconnect(specification) | 0.000674 |
def run(self):
""" Runs the printer loop in a subprocess. This is called by
multiprocessing.
"""
try:
self._loop()
except Exception:
# Send the exception through the exc_queue, so the parent
# process can check it.
typ, val, tb = sys.exc_info()
tb_lines = traceback.format_exception(typ, val, tb)
self.exc_queue.put((val, tb_lines)) | 0.004464 |
def iter_zeros(self):
"""Iterate through the indices of all zero items."""
num = quotient = 0
while num < self._len:
chunk = self.data[quotient]
if chunk & self.zero_mask:
remainder = 0
while remainder < self.width and num < self._len:
item = (chunk >> remainder) & 3
if item == PC_ZERO:
yield num
remainder += 2
num += 1
else:
num += (self.width >> 1)
quotient += 1 | 0.00339 |
def selection(x_bounds,
x_types,
clusteringmodel_gmm_good,
clusteringmodel_gmm_bad,
minimize_starting_points,
minimize_constraints_fun=None):
'''
Select the lowest mu value
'''
results = lib_acquisition_function.next_hyperparameter_lowest_mu(\
_ratio_scores, [clusteringmodel_gmm_good, clusteringmodel_gmm_bad],\
x_bounds, x_types, minimize_starting_points, \
minimize_constraints_fun=minimize_constraints_fun)
return results | 0.008681 |
def __from_format(jd: float, fmt: str) -> (int, float):
"""
Converts a Julian Day format into the "standard" Julian
day format.
Parameters
----------
jd
fmt
Returns
-------
(jd, fractional): (int, float)
A tuple representing a Julian day. The first number is the
Julian Day Number, and the second is the fractional component of the
day. A fractional component of 0.5 represents noon. Therefore
the standard julian day would be (jd + fractional + 0.5)
"""
if fmt.lower() == 'jd':
# If jd has a fractional component of 0, then we are 12 hours into
# the day
return math.floor(jd + 0.5), jd + 0.5 - math.floor(jd + 0.5)
elif fmt.lower() == 'mjd':
return __from_format(jd + 2400000.5, 'jd')
elif fmt.lower() == 'rjd':
return __from_format(jd + 2400000, 'jd')
else:
raise ValueError('Invalid Format') | 0.00106 |
def getLogger(name):
"""This is used by gcdt plugins to get a logger with the right level."""
logger = logging.getLogger(name)
# note: the level might be adjusted via '-v' option
logger.setLevel(logging_config['loggers']['gcdt']['level'])
return logger | 0.003676 |
def isFloat(nstr, schema):
"""
!~~isFloat
"""
if isinstance(nstr, (float, int, long)):
return True
elif not isinstance(nstr, basestring):
return False
try:
float(nstr)
except ValueError:
return False
return True | 0.00361 |
def GetCpuReservationMHz(self):
'''Retrieves the minimum processing power in MHz reserved for the virtual
machine. For information about setting a CPU reservation, see "Limits and
Reservations" on page 14.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetCpuReservationMHz(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value | 0.013015 |
def request(self, shards, full_response, return_status_tuple=False):
"""Request the API
This method is wrapped by similar functions
"""
try:
resp = self._request(shards)
if return_status_tuple:
return (self._parser(resp, full_response), True)
else:
return self._parser(resp, full_response)
except (ConflictError, CloudflareServerError, InternalServerError) as exc:
# The Retry system
if return_status_tuple:
return (None, False)
elif self.api_mother.do_retry:
# TODO
# request_limit = 0
sleep(self.api_mother.retry_sleep)
resp = self.request(shards, full_response, True)
while not resp[1]:
sleep(self.api_mother.retry_sleep)
resp = self.request(shards, full_response, True)
return resp[0]
else:
raise exc | 0.002907 |
def converter(val, current_unit, destination_unit):
"""Convert from one unit to another.
Parameters
----------
val : number
Number to convert.
current_unit : string
Current unit.
destination_unit : string
Destination unit.
Returns
-------
number
Converted value.
"""
x = val
for dic in dicts.values():
if current_unit in dic.keys() and destination_unit in dic.keys():
try:
native = eval(dic[current_unit][0])
except ZeroDivisionError:
native = np.inf
x = native # noqa: F841
try:
out = eval(dic[destination_unit][1])
except ZeroDivisionError:
out = np.inf
return out
# if all dictionaries fail
if current_unit is None and destination_unit is None:
pass
else:
warnings.warn(
"conversion {0} to {1} not valid: returning input".format(
current_unit, destination_unit
)
)
return val | 0.000922 |
def set_object_acl(self, obj):
""" Set object ACL on creation if not already present. """
if not obj._acl:
from nefertari_guards import engine as guards_engine
acl = self._factory(self.request).generate_item_acl(obj)
obj._acl = guards_engine.ACLField.stringify_acl(acl) | 0.006231 |
def get_requests(self, params={}):
"""
List requests
http://dev.wheniwork.com/#listing-requests
"""
if "status" in params:
params['status'] = ','.join(map(str, params['status']))
requests = []
users = {}
messages = {}
params['page'] = 0
while True:
param_list = [(k, params[k]) for k in sorted(params)]
url = "/2/requests/?%s" % urlencode(param_list)
data = self._get_resource(url)
for entry in data["users"]:
user = Users.user_from_json(entry)
users[user.user_id] = user
for entry in data["requests"]:
request = self.request_from_json(entry)
requests.append(request)
for entry in data["messages"]:
message = Messages.message_from_json(entry)
if message.request_id not in messages:
messages[message.request_id] = []
messages[message.request_id].append(message)
if not data['more']:
break
params['page'] += 1
for request in requests:
request.user = users.get(request.user_id, None)
request.messages = messages.get(request.request_id, [])
return requests | 0.001498 |
def metastable_sets(self):
"""
Crisp clustering using PCCA. This is only recommended for visualization purposes. You *cannot* compute any
actual quantity of the coarse-grained kinetics without employing the fuzzy memberships!
Returns
-------
A list of length equal to metastable states. Each element is an array with microstate indexes contained in it
"""
res = []
assignment = self.metastable_assignment
for i in range(self.m):
res.append(np.where(assignment == i)[0])
return res | 0.008621 |
def build_post_policy(self, expiration_time, conditions):
"""
Taken from the AWS book Python examples and modified for use with boto
"""
assert type(expiration_time) == time.struct_time, \
'Policy document must include a valid expiration Time object'
# Convert conditions object mappings to condition statements
return '{"expiration": "%s",\n"conditions": [%s]}' % \
(time.strftime(boto.utils.ISO8601, expiration_time), ",".join(conditions)) | 0.005825 |
def set(self, locs, values):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
values = conversion.ensure_datetime64ns(values, copy=False)
self.values[locs] = values | 0.007937 |
def get_fld2val(self, name, vals):
"""Describe summary statistics for a list of numbers."""
if vals:
return self._init_fld2val_stats(name, vals)
return self._init_fld2val_null(name) | 0.009217 |
def do_graphviz(self, args, arguments):
"""
::
Usage:
graphviz FILENAME
Export the data in cvs format to a file. Former cvs command
Arguments:
FILENAME The filename
"""
filename = arguments['FILENAME']
if platform.system() == 'Darwin':
if os.path.isfile(filename):
os.system("open -a '\''/Applications/Graphviz.app'\'' " + filename) | 0.008247 |
def is_arabicword(word):
""" Checks for an valid Arabic word.
An Arabic word not contains spaces, digits and pounctuation
avoid some spelling error, TEH_MARBUTA must be at the end.
@param word: input word
@type word: unicode
@return: True if all charaters are in Arabic block
@rtype: Boolean
"""
if len(word) == 0:
return False
elif re.search(u"([^\u0600-\u0652%s%s%s])" \
% (LAM_ALEF, LAM_ALEF_HAMZA_ABOVE, LAM_ALEF_MADDA_ABOVE), word):
return False
elif is_haraka(word[0]) or word[0] in (WAW_HAMZA, YEH_HAMZA):
return False
# if Teh Marbuta or Alef_Maksura not in the end
elif re.match(u"^(.)*[%s](.)+$" % ALEF_MAKSURA, word):
return False
elif re.match(u"^(.)*[%s]([^%s%s%s])(.)+$" % \
(TEH_MARBUTA, DAMMA, KASRA, FATHA), word):
return False
else:
return True | 0.006472 |
async def auto_add(self, device, recursive=None, automount=True):
"""
Automatically attempt to mount or unlock a device, but be quiet if the
device is not supported.
:param device: device object, block device path or mount path
:param bool recursive: recursively mount and unlock child devices
:returns: whether all attempted operations succeeded
"""
device, created = await self._find_device_losetup(device)
if created and recursive is False:
return device
if device.is_luks_cleartext and self.udisks.version_info >= (2, 7, 0):
await sleep(1.5) # temporary workaround for #153, unreliable
success = True
if not self.is_automount(device, automount):
pass
elif device.is_filesystem:
if not device.is_mounted:
success = await self.mount(device)
elif device.is_crypto:
if self._prompt and not device.is_unlocked:
success = await self.unlock(device)
if success and recursive:
await self.udisks._sync()
device = self.udisks[device.object_path]
success = await self.auto_add(
device.luks_cleartext_holder,
recursive=True)
elif recursive and device.is_partition_table:
tasks = [
self.auto_add(dev, recursive=True)
for dev in self.get_all_handleable()
if dev.is_partition and dev.partition_slave == device
]
results = await gather(*tasks)
success = all(results)
else:
self._log.debug(_('not adding {0}: unhandled device', device))
return success | 0.001125 |
def success(request, message, extra_tags='', fail_silently=False):
"""Adds a message with the ``SUCCESS`` level."""
add_message(request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently) | 0.004167 |
def plot_xtime(self, y, x='time', label='default', labelx=None,
labely=None ,title=None, shape='.', logx=False,
logy=True, base=10):
'''
make a simple plot of two columns against each other.
An example would be instance.plot_xtime('PB206', label='PB206 vs t_y'
Recomend using the plot function DataPlot.plot() it has more
functionality.
Parameters
----------
Y : string
Column on Y-axis.
X : string, optional
Column on X-axis. The default is "time".
label : string, optional
Legend label. The default is "default".
labelX : string, optional
The label on the X axis. The default is None.
labelY : string, optional
The label on the Y axis. The default is None.
title : string, optional
The Title of the Graph. The default is None.
shape : string, optional
What shape and colour the user would like their plot in.
The default is '.'.
logX : boolean, optional
A boolean of weather the user wants the x axis
logarithmically. The default is False.
logY : boolean, optional
A boolean of weather the user wants the Y axis
logarithmically. The default is True.
base : integer, optional
The base of the logarithm. The default is 10.
Notes
-----
For all possable choices visit,
<http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.plot>
'''
if label is 'default':
lab_str=y
else:
lab_str=label
try:
self.get(x)
except KeyError:
x='age'
DataPlot.plot(self,x,y,legend=lab_str,labelx=labelx, labely=labely,
title=title, shape=shape,logx=logx, logy=logy, base=base)
'''
print X,Y
xdat=self.get(X)
ydat=self.get(Y)
self.xdat = xdat
self.ydat = ydat
plot(xdat,log10(ydat),label=lab_str)
legend()
''' | 0.006912 |
def ner_net(source, destinations, width, height, wrap_around=False, radius=10):
"""Produce a shortest path tree for a given net using NER.
This is the kernel of the NER algorithm.
Parameters
----------
source : (x, y)
The coordinate of the source vertex.
destinations : iterable([(x, y), ...])
The coordinates of destination vertices.
width : int
Width of the system (nodes)
height : int
Height of the system (nodes)
wrap_around : bool
True if wrap-around links should be used, false if they should be
avoided.
radius : int
Radius of area to search from each node. 20 is arbitrarily selected in
the paper and shown to be acceptable in practice.
Returns
-------
(:py:class:`~.rig.place_and_route.routing_tree.RoutingTree`,
{(x,y): :py:class:`~.rig.place_and_route.routing_tree.RoutingTree`, ...})
A RoutingTree is produced rooted at the source and visiting all
destinations but which does not contain any vertices etc. For
convenience, a dictionarry mapping from destination (x, y) coordinates
to the associated RoutingTree is provided to allow the caller to insert
these items.
"""
# Map from (x, y) to RoutingTree objects
route = {source: RoutingTree(source)}
# Handle each destination, sorted by distance from the source, closest
# first.
for destination in sorted(destinations,
key=(lambda destination:
shortest_mesh_path_length(
to_xyz(source), to_xyz(destination))
if not wrap_around else
shortest_torus_path_length(
to_xyz(source), to_xyz(destination),
width, height))):
# We shall attempt to find our nearest neighbouring placed node.
neighbour = None
# Try to find a nearby (within radius hops) node in the routing tree
# that we can route to (falling back on just routing to the source).
#
# In an implementation according to the algorithm's original
# specification looks for nodes at each point in a growing set of rings
# of concentric hexagons. If it doesn't find any destinations this
# means an awful lot of checks: 1261 for the default radius of 20.
#
# An alternative (but behaviourally identical) implementation scans the
# list of all route nodes created so far and finds the closest node
# which is < radius hops (falling back on the origin if no node is
# closer than radius hops). This implementation requires one check per
# existing route node. In most routes this is probably a lot less than
# 1261 since most routes will probably have at most a few hundred route
# nodes by the time the last destination is being routed.
#
# Which implementation is best is a difficult question to answer:
# * In principle nets with quite localised connections (e.g.
# nearest-neighbour or centroids traffic) may route slightly more
# quickly with the original algorithm since it may very quickly find
# a neighbour.
# * In nets which connect very spaced-out destinations the second
# implementation may be quicker since in such a scenario it is
# unlikely that a neighbour will be found.
# * In extremely high-fan-out nets (e.g. broadcasts), the original
# method is very likely to perform *far* better than the alternative
# method since most iterations will complete immediately while the
# alternative method must scan *all* the route vertices.
# As such, it should be clear that neither method alone is 'best' and
# both have degenerate performance in certain completely reasonable
# styles of net. As a result, a simple heuristic is used to decide
# which technique to use.
#
# The following micro-benchmarks are crude estimate of the
# runtime-per-iteration of each approach (at least in the case of a
# torus topology)::
#
# $ # Original approach
# $ python -m timeit --setup 'x, y, w, h, r = 1, 2, 5, 10, \
# {x:None for x in range(10)}' \
# 'x += 1; y += 1; x %= w; y %= h; (x, y) in r'
# 1000000 loops, best of 3: 0.207 usec per loop
# $ # Alternative approach
# $ python -m timeit --setup 'from rig.geometry import \
# shortest_torus_path_length' \
# 'shortest_torus_path_length( \
# (0, 1, 2), (3, 2, 1), 10, 10)'
# 1000000 loops, best of 3: 0.666 usec per loop
#
# From this we can approximately suggest that the alternative approach
# is 3x more expensive per iteration. A very crude heuristic is to use
# the original approach when the number of route nodes is more than
# 1/3rd of the number of routes checked by the original method.
concentric_hexagons = memoized_concentric_hexagons(radius)
if len(concentric_hexagons) < len(route) / 3:
# Original approach: Start looking for route nodes in a concentric
# spiral pattern out from the destination node.
for x, y in concentric_hexagons:
x += destination[0]
y += destination[1]
if wrap_around:
x %= width
y %= height
if (x, y) in route:
neighbour = (x, y)
break
else:
# Alternative approach: Scan over every route node and check to see
# if any are < radius, picking the closest one if so.
neighbour = None
neighbour_distance = None
for candidate_neighbour in route:
if wrap_around:
distance = shortest_torus_path_length(
to_xyz(candidate_neighbour), to_xyz(destination),
width, height)
else:
distance = shortest_mesh_path_length(
to_xyz(candidate_neighbour), to_xyz(destination))
if distance <= radius and (neighbour is None or
distance < neighbour_distance):
neighbour = candidate_neighbour
neighbour_distance = distance
# Fall back on routing directly to the source if no nodes within radius
# hops of the destination was found.
if neighbour is None:
neighbour = source
# Find the shortest vector from the neighbour to this destination
if wrap_around:
vector = shortest_torus_path(to_xyz(neighbour),
to_xyz(destination),
width, height)
else:
vector = shortest_mesh_path(to_xyz(neighbour), to_xyz(destination))
# The longest-dimension-first route may inadvertently pass through an
# already connected node. If the route is allowed to pass through that
# node it would create a cycle in the route which would be VeryBad(TM).
# As a result, we work backward through the route and truncate it at
# the first point where the route intersects with a connected node.
ldf = longest_dimension_first(vector, neighbour, width, height)
i = len(ldf)
for direction, (x, y) in reversed(ldf):
i -= 1
if (x, y) in route:
# We've just bumped into a node which is already part of the
# route, this becomes our new neighbour and we truncate the LDF
# route. (Note ldf list is truncated just after the current
# position since it gives (direction, destination) pairs).
neighbour = (x, y)
ldf = ldf[i + 1:]
break
# Take the longest dimension first route.
last_node = route[neighbour]
for direction, (x, y) in ldf:
this_node = RoutingTree((x, y))
route[(x, y)] = this_node
last_node.children.append((Routes(direction), this_node))
last_node = this_node
return (route[source], route) | 0.000116 |
def load_patterns(filename):
"""Loads the patters contained in the filename and puts them into a list
of patterns, each pattern being a list of occurrence, and each
occurrence being a list of (onset, midi) pairs.
The input file must be formatted as described in MIREX 2013:
http://www.music-ir.org/mirex/wiki/2013:Discovery_of_Repeated_Themes_%26_Sections
Parameters
----------
filename : str
The input file path containing the patterns of a given piece using the
MIREX 2013 format.
Returns
-------
pattern_list : list
The list of patterns, containing all their occurrences,
using the following format::
onset_midi = (onset_time, midi_number)
occurrence = [onset_midi1, ..., onset_midiO]
pattern = [occurrence1, ..., occurrenceM]
pattern_list = [pattern1, ..., patternN]
where ``N`` is the number of patterns, ``M[i]`` is the number of
occurrences of the ``i`` th pattern, and ``O[j]`` is the number of
onsets in the ``j``'th occurrence. E.g.::
occ1 = [(0.5, 67.0), (1.0, 67.0), (1.5, 67.0), (2.0, 64.0)]
occ2 = [(4.5, 65.0), (5.0, 65.0), (5.5, 65.0), (6.0, 62.0)]
pattern1 = [occ1, occ2]
occ1 = [(10.5, 67.0), (11.0, 67.0), (11.5, 67.0), (12.0, 64.0),
(12.5, 69.0), (13.0, 69.0), (13.5, 69.0), (14.0, 67.0),
(14.5, 76.0), (15.0, 76.0), (15.5, 76.0), (16.0, 72.0)]
occ2 = [(18.5, 67.0), (19.0, 67.0), (19.5, 67.0), (20.0, 62.0),
(20.5, 69.0), (21.0, 69.0), (21.5, 69.0), (22.0, 67.0),
(22.5, 77.0), (23.0, 77.0), (23.5, 77.0), (24.0, 74.0)]
pattern2 = [occ1, occ2]
pattern_list = [pattern1, pattern2]
"""
# List with all the patterns
pattern_list = []
# Current pattern, which will contain all occs
pattern = []
# Current occurrence, containing (onset, midi)
occurrence = []
with _open(filename, mode='r') as input_file:
for line in input_file.readlines():
if "pattern" in line:
if occurrence != []:
pattern.append(occurrence)
if pattern != []:
pattern_list.append(pattern)
occurrence = []
pattern = []
continue
if "occurrence" in line:
if occurrence != []:
pattern.append(occurrence)
occurrence = []
continue
string_values = line.split(",")
onset_midi = (float(string_values[0]), float(string_values[1]))
occurrence.append(onset_midi)
# Add last occurrence and pattern to pattern_list
if occurrence != []:
pattern.append(occurrence)
if pattern != []:
pattern_list.append(pattern)
return pattern_list | 0.000337 |
def dominator_tree_to_dot(self, filename):
"""
Export the dominator tree of the function to a dot file
Args:
filename (str)
"""
def description(node):
desc ='{}\n'.format(node)
desc += 'id: {}'.format(node.node_id)
if node.dominance_frontier:
desc += '\ndominance frontier: {}'.format([n.node_id for n in node.dominance_frontier])
return desc
with open(filename, 'w', encoding='utf8') as f:
f.write('digraph{\n')
for node in self.nodes:
f.write('{}[label="{}"];\n'.format(node.node_id, description(node)))
if node.immediate_dominator:
f.write('{}->{};\n'.format(node.immediate_dominator.node_id, node.node_id))
f.write("}\n") | 0.007126 |
def find(self, filter=None, page=1, per_page=10, fields=None, context=None):
"""
Find records that match the filter.
Pro Tip: The fields could have nested fields names if the field is
a relationship type. For example if you were looking up an order
and also want to get the shipping address country then fields would be:
`['shipment_address', 'shipment_address.country']`
but country in this case is the ID of the country which is not very
useful if you don't already have a map. You can fetch the country code
by adding `'shipment_address.country.code'` to the fields.
:param filter: A domain expression (Refer docs for domain syntax)
:param page: The page to fetch to get paginated results
:param per_page: The number of records to fetch per page
:param fields: A list of field names to fetch.
:param context: Any overrides to the context.
"""
if filter is None:
filter = []
rv = self.client.session.get(
self.path,
params={
'filter': dumps(filter or []),
'page': page,
'per_page': per_page,
'field': fields,
'context': dumps(context or self.client.context),
}
)
response_received.send(rv)
return rv | 0.001432 |
def defaultSessionFactory(env={}, usePTY=False, *args, **kwargs):
"""Create a SSHChannel of the given :channelType: type
"""
return SSHSession(env, usePTY, *args, **kwargs) | 0.005435 |
def delete(self, config_file=None):
"""Deletes the credentials file specified in `config_file`. If no
file is specified, it deletes the default user credential file.
Args:
config_file (str): Path to configuration file. Defaults to delete
the user default location if `None`.
.. Tip::
To see if there is a default user credential file stored, do the
following::
>>> creds = Credentials()
>>> print(creds)
Credentials(username=eschbacher, key=abcdefg,
base_url=https://eschbacher.carto.com/)
"""
path_to_remove = config_file or _DEFAULT_PATH
try:
os.remove(path_to_remove)
print('Credentials at {} successfully removed.'.format(
path_to_remove))
except OSError as err:
warnings.warn('No credential file found at {}.'.format(
path_to_remove)) | 0.002 |
def directory_create_temp(self, template_name, mode, path, secure):
"""Creates a temporary directory in the guest.
in template_name of type str
Template for the name of the directory to create. This must
contain at least one 'X' character. The first group of consecutive
'X' characters in the template will be replaced by a random
alphanumeric string to produce a unique name.
in mode of type int
The UNIX-style access mode mask to create the directory with.
Whether/how all three access groups and associated access rights are
realized is guest OS dependent. The API does the best it can on each
OS.
This parameter is ignore if the @a secure parameter is set to @c true.
It is strongly recommended to use 0700.
in path of type str
The path to the directory in which the temporary directory should
be created. Guest path style.
in secure of type bool
Whether to fail if the directory can not be securely created.
Currently this means that another unprivileged user cannot
manipulate the path specified or remove the temporary directory
after it has been created. Also causes the mode specified to be
ignored. May not be supported on all guest types.
return directory of type str
On success this will contain the full path to the created
directory. Guest path style.
raises :class:`VBoxErrorNotSupported`
The operation is not possible as requested on this particular
guest type.
raises :class:`OleErrorInvalidarg`
Invalid argument. This includes an incorrectly formatted template,
or a non-absolute path.
raises :class:`VBoxErrorIprtError`
The temporary directory could not be created. Possible reasons
include a non-existing path or an insecure path when the secure
option was requested.
"""
if not isinstance(template_name, basestring):
raise TypeError("template_name can only be an instance of type basestring")
if not isinstance(mode, baseinteger):
raise TypeError("mode can only be an instance of type baseinteger")
if not isinstance(path, basestring):
raise TypeError("path can only be an instance of type basestring")
if not isinstance(secure, bool):
raise TypeError("secure can only be an instance of type bool")
directory = self._call("directoryCreateTemp",
in_p=[template_name, mode, path, secure])
return directory | 0.004026 |
def load(fp, **kwargs) -> BioCCollection:
"""
Deserialize fp (a .read()-supporting text file or binary file containing a JSON document) to
a BioCCollection object
Args:
fp: a file containing a JSON document
**kwargs:
Returns:
BioCCollection: a collection
"""
obj = json.load(fp, **kwargs)
return parse_collection(obj) | 0.005155 |
def get_instructor_term_list_name(instructor_netid, year, quarter):
"""
Return the list address of UW instructor email list for
the given year and quarter
"""
return "{uwnetid}_{quarter}{year}".format(
uwnetid=instructor_netid,
quarter=quarter.lower()[:2],
year=str(year)[-2:]) | 0.003115 |
def rename_db_ref(stmts_in, ns_from, ns_to, **kwargs):
"""Rename an entry in the db_refs of each Agent.
This is particularly useful when old Statements in pickle files
need to be updated after a namespace was changed such as
'BE' to 'FPLX'.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements whose Agents' db_refs need to be changed
ns_from : str
The namespace identifier to replace
ns_to : str
The namespace identifier to replace to
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of Statements with Agents' db_refs changed.
"""
logger.info('Remapping "%s" to "%s" in db_refs on %d statements...' %
(ns_from, ns_to, len(stmts_in)))
stmts_out = [deepcopy(st) for st in stmts_in]
for stmt in stmts_out:
for agent in stmt.agent_list():
if agent is not None and ns_from in agent.db_refs:
agent.db_refs[ns_to] = agent.db_refs.pop(ns_from)
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | 0.000797 |
def dict_gather(comm, d, op='mean', assert_all_have_data=True):
"""
Perform a reduction operation over dicts
"""
if comm is None: return d
alldicts = comm.allgather(d)
size = comm.size
k2li = defaultdict(list)
for d in alldicts:
for (k,v) in d.items():
k2li[k].append(v)
result = {}
for (k,li) in k2li.items():
if assert_all_have_data:
assert len(li)==size, "only %i out of %i MPI workers have sent '%s'" % (len(li), size, k)
if op=='mean':
result[k] = np.mean(li, axis=0)
elif op=='sum':
result[k] = np.sum(li, axis=0)
else:
assert 0, op
return result | 0.011494 |
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name) | 0.00308 |
def totalNumberOfTiles(self, minZoom=None, maxZoom=None):
"Return the total number of tiles for this instance extent"
nbTiles = 0
minZoom = minZoom or 0
if maxZoom:
maxZoom = maxZoom + 1
else:
maxZoom = len(self.RESOLUTIONS)
for zoom in xrange(minZoom, maxZoom):
nbTiles += self.numberOfTilesAtZoom(zoom)
return nbTiles | 0.004866 |
def setPrefix(self, p, u=None):
"""
Set the element namespace prefix.
@param p: A new prefix for the element.
@type p: basestring
@param u: A namespace URI to be mapped to the prefix.
@type u: basestring
@return: self
@rtype: L{Element}
"""
self.prefix = p
if p is not None and u is not None:
self.addPrefix(p, u)
return self | 0.004619 |
def format_table(table,
align='<',
format='{:.3g}',
colwidth=None,
maxwidth=None,
spacing=2,
truncate=0,
suffix="..."
):
"""
Formats a table represented as an iterable of iterable into a nice big string
suitable for printing.
Parameters:
-----------
align : string or list of strings
Alignment of cell contents. Each character in a string specifies
the alignment of one column.
* ``<`` - Left aligned (default)
* ``^`` - Centered
* ``>`` - Right aligned
The last alignment is repeated for unspecified columns.
If it's a list of strings, each string specifies the alignment of
one row. The last string is used repeatedly for unspecified rows.
format : string/function, or (nested) list of string/function
Formats the contents of the cells using the specified function(s)
or format string(s).
If it's a list of strings/functions each entry specifies formatting
for one column, the last entry being used repeatedly for
unspecified columns.
If it's a list of lists, each sub-list specifies one row, the last
sub-list being used repeatedly for unspecified rows.
colwidth : int, list of ints or None
The width of each column. The last width is used repeatedly for
unspecified columns. If ``None`` the width is fitted to the
contents.
maxwidth : int or None
The maximum width of the table. Defaults to terminal width minus
1 if ``None``. If the table would be wider than ``maxwidth`` one
of the columns is truncated.
spacing : int
The spacing between columns
truncate : int
Which column to truncate if table width would exceed ``maxwidth``.
Beware that no columns can have zero or negative width. If for instance
'maxwidth' is 80 and 'colwidth' is [10, 30, 30, 30] with spacing 2 the total
width will initially be 10+2+30+2+30+2+30=106. That's 26 characters too
much, so a width of 26 will be removed from the truncated column. If
'truncate' is 0, column 0 will have a width of -16 which is not permitted.
"""
table = list(deepcopy(table))
if not isinstance(align, list):
align = [align]
if not isinstance(format, list):
format = [format]
if not isinstance(format[0], list):
format = [format]
num_cols = len(table[0])
if len(set([len(row) for row in table]))>1:
raise ValueError("All rows must have the same number of columns")
for i in range(len(table)):
table[i] = list(table[i])
colformat = format[min(i,len(format)-1)]
for j, cell in enumerate(table[i]):
f = colformat[min(j,len(colformat)-1)]
if isinstance(f, str):
fun = lambda x: f.format(x)
else:
fun = f
try:
table[i][j] = fun(cell)
except:
table[i][j] = str(cell)
if colwidth==None:
cellwidth = [[len(cell) for cell in row] for row in table]
colwidth = list(map(max, zip(*cellwidth)))
elif not isinstance(colwidth, list):
colwidth = [colwidth]
colwidth.extend([colwidth[-1]]*(num_cols-len(colwidth)))
if maxwidth==None:
maxwidth = get_terminal_size().columns-1
width = sum(colwidth)+spacing*(num_cols-1)
if width>maxwidth:
colwidth[truncate] -= (width-maxwidth)
for j, cw in enumerate(colwidth):
if cw<1:
raise RuntimeError("Column {} in format_table() has width {}. "
"Make sure all columns have width >0. "
"Read docstring for further details."
.format(j,cw)
)
s = ''
for i, row in enumerate(table):
if i != 0: s += "\n"
colalign = align[min(i,len(align)-1)]
colformat = format[min(i,len(format)-1)]
for j, col in enumerate(row):
a = colalign[min(j,len(colalign)-1)]
f = colformat[min(j,len(colformat)-1)]
w = colwidth[j]
if j!=0: s+= ' '*spacing
s += format_fit(format_time(col), w, a, suffix)
return s | 0.006191 |
def allow_analyst_reassignment(self):
"""Allow the Analyst reassignment
"""
reassing_analyst_transition = {
"id": "reassign",
"title": _("Reassign")}
for rs in self.review_states:
if rs["id"] not in ["default", "mine", "open", "all"]:
continue
rs["custom_transitions"].append(reassing_analyst_transition)
self.show_select_column = True
self.show_workflow_action_buttons = True | 0.004098 |
def get_pluggable_module_information(self, id_or_uri):
"""
Gets all the pluggable module information.
Args:
id_or_uri: Can be either the interconnect id or uri.
Returns:
array: dicts of the pluggable module information.
"""
uri = self._client.build_uri(id_or_uri) + "/pluggableModuleInformation"
return self._client.get(uri) | 0.004926 |
def logout(config):
"""Remove and forget your Bugzilla credentials"""
state = read(config.configfile)
if state.get("BUGZILLA"):
remove(config.configfile, "BUGZILLA")
success_out("Forgotten")
else:
error_out("No stored Bugzilla credentials") | 0.003571 |
def write_mRNA_children(self, db, mRNA_id):
"""
Write out the children records of the mRNA given by the ID
(not including the mRNA record itself) in a canonical
order, where exons are sorted by start position and given
first.
"""
mRNA_children = db.children(mRNA_id, order_by='start')
nonexonic_children = []
for child_rec in mRNA_children:
if child_rec.featuretype == "exon":
self.write_rec(child_rec)
self.write_exon_children(db, child_rec)
else:
nonexonic_children.append(child_rec)
self.write_recs(nonexonic_children) | 0.002972 |
def getResourceFile(self, pid, filename, destination=None):
""" Get a file within a resource.
:param pid: The HydroShare ID of the resource
:param filename: String representing the name of the resource file to get.
:param destination: String representing the directory to save the resource file to. If None, a stream
to the resource file will be returned instead.
:return: The path of the downloaded file (if destination was specified), or a stream to the resource
file.
:raises: HydroShareArgumentException if any parameters are invalid.
:raises: HydroShareNotAuthorized if user is not authorized to perform action.
:raises: HydroShareNotFound if the resource was not found.
:raises: HydroShareHTTPException if an unexpected HTTP response code is encountered.
"""
url = "{url_base}/resource/{pid}/files/{filename}".format(url_base=self.url_base,
pid=pid,
filename=filename)
if destination:
if not os.path.isdir(destination):
raise HydroShareArgumentException("{0} is not a directory.".format(destination))
if not os.access(destination, os.W_OK):
raise HydroShareArgumentException("You do not have write permissions to directory '{0}'.".format(destination))
r = self._request('GET', url, stream=True)
if r.status_code != 200:
if r.status_code == 403:
raise HydroShareNotAuthorized(('GET', url))
elif r.status_code == 404:
raise HydroShareNotFound((pid, filename))
else:
raise HydroShareHTTPException((url, 'GET', r.status_code))
if destination is None:
return r.iter_content(STREAM_CHUNK_SIZE)
else:
filepath = os.path.join(destination, filename)
with open(filepath, 'wb') as fd:
for chunk in r.iter_content(STREAM_CHUNK_SIZE):
fd.write(chunk)
return filepath | 0.005064 |
def untrace_function(module, function):
"""
Untraces given module function.
:param module: Module of the function.
:type module: object
:param function: Function to untrace.
:type function: object
:return: Definition success.
:rtype: bool
"""
if not is_traced(function):
return False
name = get_object_name(function)
setattr(module, name, untracer(function))
return True | 0.002309 |
def select(self, selection_specs=None, **selection):
"""Applies selection by dimension name
Applies a selection along the dimensions of the object using
keyword arguments. The selection may be narrowed to certain
objects using selection_specs. For container objects the
selection will be applied to all children as well.
Selections may select a specific value, slice or set of values:
* value: Scalar values will select rows along with an exact
match, e.g.:
ds.select(x=3)
* slice: Slices may be declared as tuples of the upper and
lower bound, e.g.:
ds.select(x=(0, 3))
* values: A list of values may be selected using a list or
set, e.g.:
ds.select(x=[0, 1, 2])
Args:
selection_specs: List of specs to match on
A list of types, functions, or type[.group][.label]
strings specifying which objects to apply the
selection on.
**selection: Dictionary declaring selections by dimension
Selections can be scalar values, tuple ranges, lists
of discrete values and boolean arrays
Returns:
Returns an Dimensioned object containing the selected data
or a scalar if a single value was selected
"""
if selection_specs is not None and not isinstance(selection_specs, (list, tuple)):
selection_specs = [selection_specs]
selection = {dim: sel for dim, sel in selection.items()
if dim in self.dimensions()+['selection_mask']}
if (selection_specs and not any(self.matches(sp) for sp in selection_specs)
or not selection):
return self
data = self.interface.select(self, **selection)
if np.isscalar(data):
return data
else:
return self.clone(data) | 0.002521 |
def remove_blocked_work_units(self, work_spec_name, work_unit_names):
'''Remove some work units in the blocked list.
If `work_unit_names` is :const:`None` (which must be passed
explicitly), all pending work units in `work_spec_name` are
removed; otherwise only the specific named work units will be.
Note that none of the "remove" functions will restart blocked
work units, so if you have called
e.g. :meth:`remove_available_work_units` for a predecessor
job, you may need to also call this method for its successor.
:param str work_spec_name: name of the work spec
:param list work_unit_names: names of the work units, or
:const:`None` for all in `work_spec_name`
:return: number of work units removed
'''
return self._remove_some_work_units(
work_spec_name, work_unit_names, suffix=_BLOCKED) | 0.002167 |
def set_scenario_role_names(self):
"""Populates the list of scenario role names in this deployment and
populates the scenario_master with the master role
Gets a list of deployment properties containing "isMaster" because
there is exactly one per scenario host, containing the role name
:return:
"""
log = logging.getLogger(self.cls_logger + '.set_scenario_role_names')
is_master_props = self.get_matching_property_names('isMaster')
for is_master_prop in is_master_props:
role_name = is_master_prop.split('.')[-1]
log.info('Adding scenario host: {n}'.format(n=role_name))
self.scenario_role_names.append(role_name)
# Determine if this is the scenario master
is_master = self.get_value(is_master_prop).lower().strip()
if is_master == 'true':
log.info('Found master scenario host: {r}'.format(r=role_name))
self.scenario_master = role_name | 0.00197 |
def checkIfIsTooSimple(proc):
"""check if process is just unconditional assignments
and it is useless to merge them"""
try:
a, = proc.statements
if isinstance(a, Assignment):
return True
except ValueError:
pass
return False | 0.003546 |
def get_upcoming_event_lists_for_the_remainder_of_the_month(self, year = None, month = None):
'''Return the set of events as triple of (today's events, events for the remainder of the week, events for the remainder of the month).'''
events = []
if year == None and month == None:
now = datetime.now(tz=self.timezone) # timezone?
else:
now = datetime(year=year, month=month, day=1, hour=0, minute=0, second=0, tzinfo=self.timezone)
# Get today's events, including past events
start_time = datetime(year=now.year, month=now.month, day=now.day, hour=0, minute=0, second=0, tzinfo=self.timezone)
end_time = datetime(year = start_time.year, month = start_time.month, day = start_time.day, hour=23, minute=59, second=59, tzinfo=self.timezone)
events.append(self.get_events(start_time.isoformat(), end_time.isoformat()))
# Get this week's events
if now.weekday() < 6:
start_time = datetime(year=now.year, month=now.month, day=now.day + 1, hour=0, minute=0, second=0, tzinfo=self.timezone)
end_time = start_time + timedelta(days = 6 - now.weekday())
# We do still want to return events in the next month if they fall within this week. Otherwise
#if end_time.month != now.month:
# end_time = end_time - timedelta(days = end_time.day)
# end_time = datetime(year = end_time.year, month = end_time.month, day = end_time.day, hour=23, minute=59, second=59, tzinfo=self.timezone)
#else:
end_time = end_time + timedelta(seconds = -1)
#end_time = datetime(year = end_time.year, month = end_time.month, day = end_time.day - 1, hour=23, minute=59, second=59, tzinfo=self.timezone)
events.append(self.get_events(start_time.isoformat(), end_time.isoformat()))
else:
events.append([])
# Get this remaining events in the month
start_time = end_time + timedelta(seconds = 1)
if start_time.month == now.month:
if now.month == 12:
end_time = datetime(year = start_time.year, month = 12, day = 31, hour=23, minute=59, second=59, tzinfo=self.timezone)
else:
end_time = datetime(year = start_time.year, month = start_time.month + 1, day = 1, hour=0, minute=0, second=0, tzinfo=self.timezone)
end_time = end_time - timedelta(seconds = 1)
events.append(self.get_events(start_time.isoformat(), end_time.isoformat()))
else:
events.append([])
return events | 0.019893 |
async def clear(self, namespace=None, _conn=None):
"""
Clears the cache in the cache namespace. If an alternative namespace is given, it will
clear those ones instead.
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: True
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
"""
start = time.monotonic()
ret = await self._clear(namespace, _conn=_conn)
logger.debug("CLEAR %s %d (%.4f)s", namespace, ret, time.monotonic() - start)
return ret | 0.007396 |
def runHotgym(numRecords):
"""Run the Hot Gym example."""
# Create a data source for the network.
dataSource = FileRecordStream(streamID=_INPUT_FILE_PATH)
numRecords = min(numRecords, dataSource.getDataRowCount())
network = createNetwork(dataSource)
# Set predicted field
network.regions["sensor"].setParameter("predictedField", "consumption")
# Enable learning for all regions.
network.regions["SP"].setParameter("learningMode", 1)
network.regions["TM"].setParameter("learningMode", 1)
network.regions["classifier"].setParameter("learningMode", 1)
# Enable inference for all regions.
network.regions["SP"].setParameter("inferenceMode", 1)
network.regions["TM"].setParameter("inferenceMode", 1)
network.regions["classifier"].setParameter("inferenceMode", 1)
results = []
N = 1 # Run the network, N iterations at a time.
for iteration in range(0, numRecords, N):
network.run(N)
predictionResults = getPredictionResults(network, "classifier")
oneStep = predictionResults[1]["predictedValue"]
oneStepConfidence = predictionResults[1]["predictionConfidence"]
fiveStep = predictionResults[5]["predictedValue"]
fiveStepConfidence = predictionResults[5]["predictionConfidence"]
result = (oneStep, oneStepConfidence * 100,
fiveStep, fiveStepConfidence * 100)
print "1-step: {:16} ({:4.4}%)\t 5-step: {:16} ({:4.4}%)".format(*result)
results.append(result)
return results | 0.013708 |
def write_report(summary_dict, seqid, genus, key):
"""
Parse the PointFinder outputs, and write the summary report for the current analysis type
:param summary_dict: nested dictionary containing data such as header strings, and paths to reports
:param seqid: name of the strain,
:param genus: MASH-calculated genus of current isolate
:param key: current result type. Options are 'prediction', and 'results'
"""
# Set the header string if the summary report doesn't already exist
if not os.path.isfile(summary_dict[genus][key]['summary']):
header_string = summary_dict[genus][key]['header']
else:
header_string = str()
summary_string = str()
try:
# Read in the predictions
with open(summary_dict[genus][key]['output'], 'r') as outputs:
# Skip the header
next(outputs)
for line in outputs:
# Skip empty lines
if line != '\n':
# When processing the results outputs, add the seqid to the summary string
if key == 'results':
summary_string += '{seq},{genus},'.format(seq=seqid,
genus=genus)
# Clean up the string before adding it to the summary string - replace commas
# with semi-colons, and replace tabs with commas
summary_string += line.replace(',', ';').replace('\t', ',')
# Ensure that there were results to report
if summary_string:
if not summary_string.endswith('\n'):
summary_string += '\n'
else:
if key == 'results':
summary_string += '{seq},{genus}\n'.format(seq=seqid,
genus=genus)
else:
summary_string += '{seq}\n'.format(seq=seqid)
# Write the summaries to the summary file
with open(summary_dict[genus][key]['summary'], 'a+') as summary:
# Write the header if necessary
if header_string:
summary.write(header_string)
summary.write(summary_string)
# Add the strain information If no FASTA file could be created by reference mapping
except FileNotFoundError:
# Extract the length of the header from the dictionary. Subtract two (don't need the strain, or the
# empty column created by a trailing comma
header_len = len(summary_dict[genus][key]['header'].split(',')) - 2
# When processing the results outputs, add the seqid to the summary string
if key == 'results':
summary_string += '{seq},{genus}\n'.format(seq=seqid,
genus=genus)
# For the prediction summary, populate the summary string with the appropriate number of comma-separated
# '0' entries
elif key == 'prediction':
summary_string += '{seq}{empty}\n'.format(seq=seqid,
empty=',0' * header_len)
# Write the summaries to the summary file
with open(summary_dict[genus][key]['summary'], 'a+') as summary:
# Write the header if necessary
if header_string:
summary.write(header_string)
summary.write(summary_string) | 0.004074 |
def _galaxy_library_upload(finfo, sample_info, config):
"""Upload results to galaxy library.
"""
folder_name = "%s_%s" % (config["fc_date"], config["fc_name"])
storage_dir = utils.safe_makedir(os.path.join(config["dir"], folder_name))
if finfo.get("type") == "directory":
storage_file = None
if finfo.get("ext") == "qc":
pdf_file = qcsummary.prep_pdf(finfo["path"], config)
if pdf_file:
finfo["path"] = pdf_file
finfo["type"] = "pdf"
storage_file = filesystem.copy_finfo(finfo, storage_dir, pass_uptodate=True)
else:
storage_file = filesystem.copy_finfo(finfo, storage_dir, pass_uptodate=True)
if "galaxy_url" in config and "galaxy_api_key" in config:
galaxy_url = config["galaxy_url"]
if not galaxy_url.endswith("/"):
galaxy_url += "/"
gi = GalaxyInstance(galaxy_url, config["galaxy_api_key"])
else:
raise ValueError("Galaxy upload requires `galaxy_url` and `galaxy_api_key` in config")
if storage_file and sample_info and not finfo.get("index", False) and not finfo.get("plus", False):
_to_datalibrary_safe(storage_file, gi, folder_name, sample_info, config) | 0.004823 |
def decode(self, envelope, session, **kwargs):
""" :meth:`.WMessengerOnionCoderLayerProto.decode` method implementation.
:param envelope: original envelope
:param session: original session
:param kwargs: additional arguments
:return: WMessengerBytesEnvelope
"""
return WMessengerBytesEnvelope(bytes(WUnHex(envelope.message())), meta=envelope) | 0.027933 |
def to_native(self, obj):
"""Remove password field when serializing an object"""
ret = super(UserSerializer, self).to_native(obj)
del ret['password']
return ret | 0.010417 |
def AddProperty(self, interface, name, value):
'''Add property to this object
interface: D-Bus interface to add this to. For convenience you can
specify '' here to add the property to the object's main
interface (as specified on construction).
name: Property name.
value: Property value.
'''
if not interface:
interface = self.interface
try:
self.props[interface][name]
raise dbus.exceptions.DBusException(
'property %s already exists' % name,
name=self.interface + '.PropertyExists')
except KeyError:
# this is what we expect
pass
# copy.copy removes one level of variant-ness, which means that the
# types get exported in introspection data correctly, but we can't do
# this for container types.
if not (isinstance(value, dbus.Dictionary) or isinstance(value, dbus.Array)):
value = copy.copy(value)
self.props.setdefault(interface, {})[name] = value | 0.002727 |
def action(self, *args, **kwargs):
"""the default action for Support Classifiers invokes any derivied
_action function, trapping any exceptions raised in the process. We
are obligated to catch these exceptions to give subsequent rules the
opportunity to act and perhaps mitigate the error. An error during the
action application is a failure of the rule, not a failure of the
classification system itself."""
try:
return self._action(*args, **kwargs)
except KeyError, x:
self.config.logger.debug(
'Rule %s action failed because of missing key "%s"',
to_str(self.__class__),
x,
)
except Exception, x:
self.config.logger.debug(
'Rule %s action failed because of "%s"',
to_str(self.__class__),
x,
exc_info=True
)
return False | 0.002049 |
def community_topic_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/topics#create-topic"
api_path = "/api/v2/community/topics.json"
return self.call(api_path, method="POST", data=data, **kwargs) | 0.011673 |
def namespace_to_regex(namespace):
"""Create a RegexObject from a wildcard namespace."""
db_name, coll_name = namespace.split(".", 1)
# A database name cannot contain a '.' character
db_regex = re.escape(db_name).replace(r"\*", "([^.]*)")
# But a collection name can.
coll_regex = re.escape(coll_name).replace(r"\*", "(.*)")
return re.compile(r"\A" + db_regex + r"\." + coll_regex + r"\Z") | 0.002398 |
def _t_normals(self):
r"""
Update the throat normals from the voronoi vertices
"""
verts = self['throat.vertices']
value = sp.zeros([len(verts), 3])
for i in range(len(verts)):
if len(sp.unique(verts[i][:, 0])) == 1:
verts_2d = sp.vstack((verts[i][:, 1], verts[i][:, 2])).T
elif len(sp.unique(verts[i][:, 1])) == 1:
verts_2d = sp.vstack((verts[i][:, 0], verts[i][:, 2])).T
else:
verts_2d = sp.vstack((verts[i][:, 0], verts[i][:, 1])).T
hull = sptl.ConvexHull(verts_2d, qhull_options='QJ Pp')
sorted_verts = verts[i][hull.vertices].astype(float)
v1 = sorted_verts[-1]-sorted_verts[0]
v2 = sorted_verts[1]-sorted_verts[0]
value[i] = tr.unit_vector(sp.cross(v1, v2))
return value | 0.002283 |
def money(s, thousand_sep=".", decimal_sep=","):
"""Converts money amount in string to a Decimal object.
With the default arguments, the format is expected to be
``-38.500,00``, where dots separate thousands and comma the decimals.
Args:
thousand_sep: Separator for thousands.
decimal_sep: Separator for decimals.
Returns:
A ``Decimal`` object of the string encoded money amount.
"""
s = s.replace(thousand_sep, "")
s = s.replace(decimal_sep, ".")
return Decimal(s) | 0.003472 |
def complete_sum(self):
"""
Return an equivalent DNF expression that includes all prime
implicants.
"""
node = self.node.complete_sum()
if node is self.node:
return self
else:
return _expr(node) | 0.007299 |
def findLibrary(name):
"""
Look for a library in the system.
Emulate the algorithm used by dlopen.
`name`must include the prefix, e.g. ``libpython2.4.so``
"""
assert is_unix, "Current implementation for Unix only (Linux, Solaris, AIX)"
lib = None
# Look in the LD_LIBRARY_PATH
lp = compat.getenv('LD_LIBRARY_PATH', '')
for path in lp.split(os.pathsep):
libs = glob(os.path.join(path, name + '*'))
if libs:
lib = libs[0]
break
# Look in /etc/ld.so.cache
if lib is None:
expr = r'/[^\(\)\s]*%s\.[^\(\)\s]*' % re.escape(name)
m = re.search(expr, compat.exec_command('/sbin/ldconfig', '-p'))
if m:
lib = m.group(0)
# Look in the known safe paths
if lib is None:
paths = ['/lib', '/usr/lib']
if is_aix:
paths.append('/opt/freeware/lib')
for path in paths:
libs = glob(os.path.join(path, name + '*'))
if libs:
lib = libs[0]
break
# give up :(
if lib is None:
return None
# Resolve the file name into the soname
dir, file = os.path.split(lib)
return os.path.join(dir, getSoname(lib)) | 0.001619 |
def is_unary_operator(oper):
"""returns True, if operator is unary operator, otherwise False"""
# definition:
# member in class
# ret-type operator symbol()
# ret-type operator [++ --](int)
# globally
# ret-type operator symbol( arg )
# ret-type operator [++ --](X&, int)
symbols = ['!', '&', '~', '*', '+', '++', '-', '--']
if not isinstance(oper, calldef_members.operator_t):
return False
if oper.symbol not in symbols:
return False
if isinstance(oper, calldef_members.member_operator_t):
if len(oper.arguments) == 0:
return True
elif oper.symbol in ['++', '--'] and \
isinstance(oper.arguments[0].decl_type, cpptypes.int_t):
return True
return False
if len(oper.arguments) == 1:
return True
elif oper.symbol in ['++', '--'] \
and len(oper.arguments) == 2 \
and isinstance(oper.arguments[1].decl_type, cpptypes.int_t):
# may be I need to add additional check whether first argument is
# reference or not?
return True
return False | 0.000887 |
def v_full_copy(self, val):
""" Sets full copy mode of trajectory and (!) ALL explored parameters!"""
self._full_copy = bool(val)
for param in self._explored_parameters.values():
if param is not None:
param.v_full_copy = bool(val) | 0.010638 |
def update_extension_statistics(self, extension_statistics_update, publisher_name, extension_name):
"""UpdateExtensionStatistics.
[Preview API]
:param :class:`<ExtensionStatisticUpdate> <azure.devops.v5_0.gallery.models.ExtensionStatisticUpdate>` extension_statistics_update:
:param str publisher_name:
:param str extension_name:
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
content = self._serialize.body(extension_statistics_update, 'ExtensionStatisticUpdate')
self._send(http_method='PATCH',
location_id='a0ea3204-11e9-422d-a9ca-45851cc41400',
version='5.0-preview.1',
route_values=route_values,
content=content) | 0.006809 |
def _tar_and_copy(src_dir, target_dir):
"""Tar and gzip src_dir and copy to GCS target_dir."""
src_dir = src_dir.rstrip("/")
target_dir = target_dir.rstrip("/")
tmp_dir = tempfile.gettempdir().rstrip("/")
src_base = os.path.basename(src_dir)
shell_run(
"tar --exclude=.git -zcf {tmp_dir}/{src_base}.tar.gz -C {src_dir} .",
src_dir=src_dir,
src_base=src_base,
tmp_dir=tmp_dir)
final_destination = "%s/%s.tar.gz" % (target_dir, src_base)
shell_run(
("gsutil cp {tmp_dir}/{src_base}.tar.gz "
"{final_destination}"),
tmp_dir=tmp_dir,
src_base=src_base,
final_destination=final_destination)
return final_destination | 0.014641 |
def Uniform(low, high, tag=None):
"""
A Uniform random variate
Parameters
----------
low : scalar
Lower bound of the distribution support.
high : scalar
Upper bound of the distribution support.
"""
assert low < high, 'Uniform "low" must be less than "high"'
return uv(ss.uniform(loc=low, scale=high - low), tag=tag) | 0.005376 |
def _progress_ret(self, progress, out):
'''
Print progress events
'''
import salt.output
# Get the progress bar
if not hasattr(self, 'progress_bar'):
try:
self.progress_bar = salt.output.get_progress(self.config, out, progress)
except Exception:
raise LoaderError('\nWARNING: Install the `progressbar` python package. '
'Requested job was still run but output cannot be displayed.\n')
salt.output.update_progress(self.config, progress, self.progress_bar, out) | 0.009917 |
def user(self, extra_params=None):
"""
The User currently assigned to the Ticket
"""
if self.get('assigned_to_id', None):
users = self.space.users(
id=self['assigned_to_id'],
extra_params=extra_params
)
if users:
return users[0] | 0.005814 |
def delete(self, id, project_id=None):
"""delete."""
result = db.session.query(Result).filter_by(id=id).first()
if result is None:
response = jsonify({
'result': None, 'message': 'No interface defined for URL.'
})
return response, 404
db.session.delete(result)
db.session.commit()
return jsonify({'result': result.serialize}) | 0.004684 |
def load_ini(filename):
"""
Read a CLASS ``.ini`` file, returning a dictionary of parameters
Parameters
----------
filename : str
the name of an existing parameter file to load, or one included as
part of the CLASS source
Returns
-------
dict :
the input parameters loaded from file
"""
# also look in data dir
path = _find_file(filename)
pars = {}
with open(path, 'r') as ff:
# loop over lines
for lineno, line in enumerate(ff):
if not line: continue
# skip any commented lines with #
if '#' in line: line = line[line.index('#')+1:]
# must have an equals sign to be valid
if "=" not in line: continue
# extract key and value pairs
fields = line.split("=")
if len(fields) != 2:
import warnings
warnings.warn("skipping line number %d: '%s'" %(lineno,line))
continue
pars[fields[0].strip()] = fields[1].strip()
return pars | 0.005556 |
def loader(schema, validator=CerberusValidator, update=None):
"""Create a load function based on schema dict and Validator class.
:param schema: a Cerberus schema dict.
:param validator: the validator class which must be a subclass of
more.cerberus.CerberusValidator which is the default.
:param update: will pass the update flag to the validator, when ``True``
the ``required`` rules will not be checked.
By default it will be set for PUT and PATCH requests to ``True``
and for other requests to ``False``.
You can plug this ``load`` function into a json view.
Returns a ``load`` function that takes a request JSON body
and uses the schema to validate it. This function raises
:class:`more.cerberus.ValidationError` if validation is not successful.
"""
if not issubclass(validator, CerberusValidator):
raise TypeError(
"Validator must be a subclass of more.cerberus.CerberusValidator"
)
return partial(load, schema, validator, update) | 0.00096 |
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via distance
after superposition
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, shape=(n_frames, n_ref_frames)
The RMSD value of each frame of the input trajectory to be
featurized versus each frame in the reference trajectory. The
number of features is the number of reference frames.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
if self.atom_indices is not None:
sliced_traj = traj.atom_slice(self.atom_indices)
else:
sliced_traj = traj
result = libdistance.cdist(
sliced_traj, self.sliced_reference_traj, 'rmsd'
)
return self._transform(result) | 0.002028 |
def get_hidden_signups(self):
""" Return a list of Users who are *not* in the All Students list but have signed up for an activity.
This is usually a list of signups for z-Withdrawn from TJ """
return EighthSignup.objects.filter(scheduled_activity__block=self).exclude(user__in=User.objects.get_students()) | 0.011976 |
def calculateSignature(privateSigningKey, message):
"""
:type privateSigningKey: ECPrivateKey
:type message: bytearray
"""
if privateSigningKey.getType() == Curve.DJB_TYPE:
rand = os.urandom(64)
res = _curve.calculateSignature(rand, privateSigningKey.getPrivateKey(), message)
return res
else:
raise InvalidKeyException("Unknown type: %s" % privateSigningKey.getType()) | 0.008584 |
def convert_mc_times_to_jte_times(times_mc, evt_timestamp_in_ns, evt_mc_time):
"""
Function that converts MC times to JTE times.
Parameters
----------
times_mc : np.ndarray
Time array with MC times.
evt_timestamp_in_ns : int
Total timestamp of the event in nanoseconds.
evt_mc_time : int
Mc time of the event in nanoseconds.
Returns
-------
ndarray
Converted time array with JTE times.
"""
# needs to be cast to normal ndarray (not recarray), or else we
# would get invalid type promotion
times_mc = np.array(times_mc).astype(float)
times_jte = times_mc - evt_timestamp_in_ns + evt_mc_time
return times_jte | 0.00142 |
def to_json(self):
"""
Returns the JSON Representation of the content type field validation.
"""
result = {}
for k, v in self._data.items():
result[camel_case(k)] = v
return result | 0.008299 |
def rebuild_token_map(self, partitioner, token_map):
"""
Rebuild our view of the topology from fresh rows from the
system topology tables.
For internal use only.
"""
self.partitioner = partitioner
if partitioner.endswith('RandomPartitioner'):
token_class = MD5Token
elif partitioner.endswith('Murmur3Partitioner'):
token_class = Murmur3Token
elif partitioner.endswith('ByteOrderedPartitioner'):
token_class = BytesToken
else:
self.token_map = None
return
token_to_host_owner = {}
ring = []
for host, token_strings in six.iteritems(token_map):
for token_string in token_strings:
token = token_class.from_string(token_string)
ring.append(token)
token_to_host_owner[token] = host
all_tokens = sorted(ring)
self.token_map = TokenMap(
token_class, token_to_host_owner, all_tokens, self) | 0.001932 |
def p_statement_draw3(p):
""" statement : DRAW expr COMMA expr COMMA expr
"""
p[0] = make_sentence('DRAW3',
make_typecast(TYPE.integer, p[2], p.lineno(3)),
make_typecast(TYPE.integer, p[4], p.lineno(5)),
make_typecast(TYPE.float_, p[6], p.lineno(5))) | 0.002967 |
def currentPlugin( self ):
"""
Returns the currently selected plugin.
:return <XWizardPlugin> || None
"""
col = self.uiWizardTABLE.currentColumn()
row = self.uiWizardTABLE.currentRow()
item = self.uiWizardTABLE.currentItem()
widget = self.uiWizardTABLE.cellWidget(row, col)
if ( not (widget and item and item.isSelected()) ):
return None
return widget.plugin() | 0.024 |
def handle_control_frame(self, frame):
"""
Handle a control frame as defined by RFC 6455.
"""
if frame.opcode == OPCODE_CLOSE:
self.close_frame_received = True
code, reason = frame.unpack_close()
if self.close_frame_sent:
self.onclose(code, reason)
self.sock.close()
raise SocketClosed(True)
else:
self.close_params = (code, reason)
self.send_close_frame(code, reason)
elif frame.opcode == OPCODE_PING:
# Respond with a pong message with identical payload
self.send_frame(ControlFrame(OPCODE_PONG, frame.payload))
elif frame.opcode == OPCODE_PONG:
# Assert that the PONG payload is identical to that of the PING
if not self.ping_sent:
raise PingError('received PONG while no PING was sent')
self.ping_sent = False
if frame.payload != self.ping_payload:
raise PingError('received PONG with invalid payload')
self.ping_payload = None
self.onpong(frame.payload) | 0.001712 |
def validate_accept(form, field):
"""Validate that accept have not been set."""
if field.data and form.reject.data:
raise validators.ValidationError(
_("Both reject and accept cannot be set at the same time.")
) | 0.007491 |
def make_query(catalog):
"""A function to prepare a query
"""
query = {}
request = api.get_request()
index = get_search_index_for(catalog)
limit = request.form.get("limit")
q = request.form.get("q")
if len(q) > 0:
query[index] = q + "*"
else:
return None
portal_type = request.form.get("portal_type")
if portal_type:
if not isinstance(portal_type, list):
portal_type = [portal_type]
query["portal_type"] = portal_type
if limit and limit.isdigit():
query["sort_limit"] = int(limit)
return query | 0.001664 |
def hstack(gctoos, remove_all_metadata_fields=False, error_report_file=None, fields_to_remove=[], reset_ids=False):
""" Horizontally concatenate gctoos.
Args:
gctoos (list of gctoo objects)
remove_all_metadata_fields (bool): ignore/strip all common metadata when combining gctoos
error_report_file (string): path to write file containing error report indicating
problems that occurred during hstack, mainly for inconsistencies in common metadata
fields_to_remove (list of strings): fields to be removed from the
common metadata because they don't agree across files
reset_ids (bool): set to True if sample ids are not unique
Return:
concated (gctoo object)
"""
# Separate each gctoo into its component dfs
row_meta_dfs = []
col_meta_dfs = []
data_dfs = []
srcs = []
for g in gctoos:
row_meta_dfs.append(g.row_metadata_df)
col_meta_dfs.append(g.col_metadata_df)
data_dfs.append(g.data_df)
srcs.append(g.src)
logger.debug("shapes of row_meta_dfs: {}".format([x.shape for x in row_meta_dfs]))
# Concatenate row metadata
all_row_metadata_df = assemble_common_meta(row_meta_dfs, fields_to_remove, srcs, remove_all_metadata_fields, error_report_file)
# Concatenate col metadata
all_col_metadata_df = assemble_concatenated_meta(col_meta_dfs, remove_all_metadata_fields)
# Concatenate the data_dfs
all_data_df = assemble_data(data_dfs, "horiz")
# Make sure df shapes are correct
assert all_data_df.shape[0] == all_row_metadata_df.shape[0], "Number of rows in metadata does not match number of rows in data - all_data_df.shape[0]: {} all_row_metadata_df.shape[0]: {}".format(all_data_df.shape[0], all_row_metadata_df.shape[0])
assert all_data_df.shape[1] == all_col_metadata_df.shape[0], "Number of columns in data does not match number of columns metadata - all_data_df.shape[1]: {} all_col_metadata_df.shape[0]: {}".format(all_data_df.shape[1], all_col_metadata_df.shape[0])
# If requested, reset sample ids to be unique integers and move old sample
# ids into column metadata
if reset_ids:
do_reset_ids(all_col_metadata_df, all_data_df, "horiz")
logger.info("Build GCToo of all...")
concated = GCToo.GCToo(row_metadata_df=all_row_metadata_df,
col_metadata_df=all_col_metadata_df,
data_df=all_data_df)
return concated | 0.004792 |
def html_page_context(app, pagename, templatename, context, doctree):
"""Event handler for the html-page-context signal.
Modifies the context directly.
- Replaces the 'toc' value created by the HTML builder with one
that shows all document titles and the local table of contents.
- Sets display_toc to True so the table of contents is always
displayed, even on empty pages.
- Replaces the 'toctree' function with one that uses the entire
document structure, ignores the maxdepth argument, and uses
only prune and collapse.
"""
rendered_toc = get_rendered_toctree(app.builder, pagename)
context['toc'] = rendered_toc
context['display_toc'] = True # force toctree to display
if "toctree" not in context:
# json builder doesn't use toctree func, so nothing to replace
return
def make_toctree(collapse=True):
return get_rendered_toctree(app.builder,
pagename,
prune=False,
collapse=collapse,
)
context['toctree'] = make_toctree | 0.000852 |
def _find_by_sha1(self, sha1):
"""
Return an |ImagePart| object belonging to this package or |None| if
no matching image part is found. The image part is identified by the
SHA1 hash digest of the image binary it contains.
"""
for image_part in self:
# ---skip unknown/unsupported image types, like SVG---
if not hasattr(image_part, 'sha1'):
continue
if image_part.sha1 == sha1:
return image_part
return None | 0.003766 |
def getQCAnalyses(self):
"""
Return the Quality Control analyses.
:returns: a list of QC analyses
:rtype: List of ReferenceAnalysis/DuplicateAnalysis
"""
qc_types = ['ReferenceAnalysis', 'DuplicateAnalysis']
analyses = self.getAnalyses()
return [a for a in analyses if a.portal_type in qc_types] | 0.005571 |
def value(self, t):
"""See Schedule.value"""
for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]):
if l_t <= t and t < r_t:
alpha = float(t - l_t) / (r_t - l_t)
return self._interpolation(l, r, alpha)
# t does not belong to any of the pieces, so doom.
assert self._outside_value is not None
return self._outside_value | 0.007109 |
def outputs(ctx):
"""Download outputs for job.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon job -j 1 outputs
```
"""
user, project_name, _job = get_job_or_local(ctx.obj.get('project'), ctx.obj.get('job'))
try:
PolyaxonClient().job.download_outputs(user, project_name, _job)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not download outputs for job `{}`.'.format(_job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success('Files downloaded.') | 0.006051 |
def gopro_set_request_send(self, target_system, target_component, cmd_id, value, force_mavlink1=False):
'''
Request to set a GOPRO_COMMAND with a desired
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
cmd_id : Command ID (uint8_t)
value : Value (uint8_t)
'''
return self.send(self.gopro_set_request_encode(target_system, target_component, cmd_id, value), force_mavlink1=force_mavlink1) | 0.006601 |
def _normalize_query(self, query):
"""
Converts Arrays in the query to comma
separaters lists for proper API handling.
"""
for k, v in query.items():
if isinstance(v, list):
query[k] = ','.join([str(e) for e in v]) | 0.007067 |
def fem(ab, off, angle, zsrc, zrec, lsrc, lrec, depth, freq, etaH, etaV, zetaH,
zetaV, xdirect, isfullspace, ht, htarg, use_ne_eval, msrc, mrec,
loop_freq, loop_off, conv=True):
r"""Return the electromagnetic frequency-domain response.
This function is called from one of the above modelling routines. No
input-check is carried out here. See the main description of :mod:`model`
for information regarding input and output parameters.
This function can be directly used if you are sure the provided input is in
the correct format. This is useful for inversion routines and similar, as
it can speed-up the calculation by omitting input-checks.
"""
# Preallocate array
fEM = np.zeros((freq.size, off.size), dtype=complex)
# Initialize kernel count
# (how many times the wavenumber-domain kernel was calld)
kcount = 0
# If <ab> = 36 (or 63), fEM-field is zero
if ab in [36, ]:
return fEM, kcount, conv
# Get full-space-solution if xdirect=True and model is a full-space or
# if src and rec are in the same layer.
if xdirect and (isfullspace or lsrc == lrec):
fEM += kernel.fullspace(off, angle, zsrc, zrec, etaH[:, lrec],
etaV[:, lrec], zetaH[:, lrec], zetaV[:, lrec],
ab, msrc, mrec)
# If `xdirect = None` we set it here to True, so it is NOT calculated in
# the wavenumber domain. (Only reflected fields are returned.)
if xdirect is None:
xdir = True
else:
xdir = xdirect
# Get angle dependent factors
factAng = kernel.angle_factor(angle, ab, msrc, mrec)
# Compute required lambdas for given hankel-filter-base
# This should be in utils, but this is a backwards-incompatible change.
# Move this to utils for version 2.0.
if ht == 'fht':
# htarg[0] = filter; htarg[1] = pts_per_dec
lambd, int_pts = transform.get_spline_values(htarg[0], off, htarg[1])
if not loop_off:
htarg = (htarg[0], htarg[1], lambd, int_pts)
# If not full-space with xdirect calculate fEM-field
if not isfullspace*xdir:
calc = getattr(transform, ht)
if loop_freq:
for i in range(freq.size):
out = calc(zsrc, zrec, lsrc, lrec, off, factAng, depth, ab,
etaH[None, i, :], etaV[None, i, :],
zetaH[None, i, :], zetaV[None, i, :], xdir,
htarg, use_ne_eval, msrc, mrec)
fEM[None, i, :] += out[0]
kcount += out[1]
conv *= out[2]
elif loop_off:
for i in range(off.size):
# See comments above where it says "ht == 'fht'".
# Get pre-calculated lambd, int_pts for this offset
if ht == 'fht':
htarg = (htarg[0], htarg[1], lambd[None, i, :], int_pts[i])
out = calc(zsrc, zrec, lsrc, lrec, off[None, i],
factAng[None, i], depth, ab, etaH, etaV, zetaH,
zetaV, xdir, htarg, use_ne_eval, msrc, mrec)
fEM[:, None, i] += out[0]
kcount += out[1]
conv *= out[2]
else:
out = calc(zsrc, zrec, lsrc, lrec, off, factAng, depth, ab, etaH,
etaV, zetaH, zetaV, xdir, htarg, use_ne_eval, msrc,
mrec)
fEM += out[0]
kcount += out[1]
conv *= out[2]
return fEM, kcount, conv | 0.00028 |
def _invalid_triple_quote(self, quote, row, col=None):
"""Add a message for an invalid triple quote.
Args:
quote: The quote characters that were found.
row: The row number the quote characters were found on.
col: The column the quote characters were found on.
"""
self.add_message(
'invalid-triple-quote',
line=row,
args=(quote, TRIPLE_QUOTE_OPTS.get(self.config.triple_quote)),
**self.get_offset(col)
) | 0.003788 |
def _expanded_sql(self):
"""Get the expanded BigQuery SQL string of this UDF
Returns
The expanded SQL string of this UDF
"""
if not self._sql:
self._sql = UDF._build_udf(self._name, self._code, self._return_type, self._params,
self._language, self._imports)
return self._sql | 0.008824 |
def get_body_from_file(kwds):
"""Reads message body if specified via filepath."""
if kwds["file"] and os.path.isfile(kwds["file"]):
kwds["body"] = open(kwds["file"], "r").read()
kwds["file"] = None | 0.004525 |
def call_function(self, c, i):
"""
Implement the CALL_FUNCTION_ operation.
.. _CALL_FUNCTION: https://docs.python.org/3/library/dis.html#opcode-CALL_FUNCTION
"""
callable_ = self.__stack[-1-i.arg]
args = tuple(self.__stack[len(self.__stack) - i.arg:])
self._print('call function')
self._print('\tfunction ', callable_)
self._print('\ti.arg ', i.arg)
self._print('\targs ', args)
self.call_callbacks('CALL_FUNCTION', callable_, *args)
if isinstance(callable_, FunctionType):
ret = callable_(*args)
elif callable_ is builtins.__build_class__:
ret = self.build_class(callable_, args)
elif callable_ is builtins.globals:
ret = self.builtins_globals()
else:
ret = callable_(*args)
self.pop(1 + i.arg)
self.__stack.append(ret) | 0.007439 |
def init_logging(settings):
'''Set up logger'''
lg_format = '%(asctime)s : - %(message)s'
lg_dateformat = '%Y.%m.%d %H:%M:%S'
logging.basicConfig(format=lg_format, datefmt=lg_dateformat)
log = get_logger()
handler = logging.handlers.WatchedFileHandler(
filename=settings['log_file'] \
if 'log_file' in settings.keys() else None,
encoding='utf-8')
formatter = logging.Formatter(fmt=lg_format, datefmt=lg_dateformat)
handler.setFormatter(formatter)
log.addHandler(handler)
return log | 0.003295 |
def divide(x: LispNumber, y: LispNumber) -> LispNumber:
"""Division reducer. If both arguments are integers, return a Fraction.
Otherwise, return the true division of x and y."""
if isinstance(x, int) and isinstance(y, int):
return Fraction(x, y)
return x / y | 0.003534 |
def monkey_patch_override_instance_method(instance):
"""
Override an instance method with a new version of the same name. The
original method implementation is made available within the override method
as `_original_<METHOD_NAME>`.
"""
def perform_override(override_fn):
fn_name = override_fn.__name__
original_fn_name = '_original_' + fn_name
# Override instance method, if it hasn't already been done
if not hasattr(instance, original_fn_name):
original_fn = getattr(instance, fn_name)
setattr(instance, original_fn_name, original_fn)
bound_override_fn = override_fn.__get__(instance)
setattr(instance, fn_name, bound_override_fn)
return perform_override | 0.001309 |
def notify_slack(title, content, attachment_color="#4bb543", short_threshold=40, token=None,
channel=None, mention_user=None, **kwargs):
"""
Sends a slack notification and returns *True* on success. The communication with the slack API
might have some delays and is therefore handled by a thread. The format of the notification
depends on *content*. If it is a string, a simple text notification is sent. Otherwise, it
should be a dictionary whose fields are used to build a message attachment with two-column
formatting.
"""
# test import
import slackclient # noqa: F401
cfg = Config.instance()
# get default token and channel
if not token:
token = cfg.get_expanded("notifications", "slack_token")
if not channel:
channel = cfg.get_expanded("notifications", "slack_channel")
if not token or not channel:
logger.warning("cannot send Slack notification, token ({}) or channel ({}) empty".format(
token, channel))
return False
# append the user to mention to the title
# unless explicitly set to empty string
mention_text = ""
if mention_user is None:
mention_user = cfg.get_expanded("notifications", "slack_mention_user")
if mention_user:
mention_text = " (@{})".format(mention_user)
# request data for the API call
request = {
"channel": channel,
"as_user": True,
"parse": "full",
}
# standard or attachment content?
if isinstance(content, six.string_types):
request["text"] = "{}{}\n\n{}".format(title, mention_text, content)
else:
# content is a dict, send its data as an attachment
request["text"] = "{} {}".format(title, mention_text)
request["attachments"] = at = {
"color": attachment_color,
"fields": [],
"fallback": "{}{}\n\n".format(title, mention_text),
}
# fill the attachment fields and extend the fallback
for key, value in content.items():
at["fields"].append({
"title": key,
"value": value,
"short": len(value) <= short_threshold,
})
at["fallback"] += "_{}_: {}\n".format(key, value)
# extend by arbitrary kwargs
request.update(kwargs)
# threaded, non-blocking API communication
thread = threading.Thread(target=_notify_slack, args=(token, request))
thread.start()
return True | 0.003208 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.