text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _ValidateTimeRange(timerange):
"""Parses a timerange argument and always returns non-None timerange."""
if len(timerange) != 2:
raise ValueError("Timerange should be a sequence with 2 items.")
(start, end) = timerange
precondition.AssertOptionalType(start, rdfvalue.RDFDatetime)
precondition.AssertOptionalType(end, rdfvalue.RDFDatetime) | 0.016854 |
def submit_row(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
return {
'onclick_attrib': (opts.get_ordered_objects() and change
and 'onclick="submitOrderForm();"' or ''),
'show_delete_link': (not is_popup and context['has_delete_permission']
and (change or context['show_delete'])),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and
not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True,
} | 0.007937 |
def call_interval(freq, **kwargs):
"""Decorator for the CallInterval wrapper"""
def wrapper(f):
return CallInterval(f, freq, **kwargs)
return wrapper | 0.005882 |
def find_exts(top, exts, exclude_dirs=None, include_dirs=None,
match_mode="basename"):
"""
Find all files with the extension listed in `exts` that are located within
the directory tree rooted at `top` (including top itself, but excluding
'.' and '..')
Args:
top (str): Root directory
exts (str or list of strings): List of extensions.
exclude_dirs (str): Wildcards used to exclude particular directories.
Can be concatenated via `|`
include_dirs (str): Wildcards used to select particular directories.
`include_dirs` and `exclude_dirs` are mutually exclusive
match_mode (str): "basename" if match should be done on the basename.
"abspath" for absolute path.
Returns:
(list of str): Absolute paths of the files.
Examples::
# Find all pdf and ps files starting from the current directory.
find_exts(".", ("pdf", "ps"))
# Find all pdf files, exclude hidden directories and dirs whose name
# starts with `_`
find_exts(".", "pdf", exclude_dirs="_*|.*")
# Find all ps files, in the directories whose basename starts with
# output.
find_exts(".", "ps", include_dirs="output*"))
"""
from monty.string import list_strings
exts = list_strings(exts)
# Handle file!
if os.path.isfile(top):
return [os.path.abspath(top)] if any(top.endswith(ext)
for ext in exts) else []
# Build shell-style wildcards.
from monty.fnmatch import WildCard
if exclude_dirs is not None:
exclude_dirs = WildCard(exclude_dirs)
if include_dirs is not None:
include_dirs = WildCard(include_dirs)
mangle = dict(
basename=os.path.basename,
abspath=os.path.abspath)[match_mode]
# Assume directory
paths = []
for dirpath, dirnames, filenames in os.walk(top):
dirpath = os.path.abspath(dirpath)
if exclude_dirs and exclude_dirs.match(mangle(dirpath)):
continue
if include_dirs and not include_dirs.match(mangle(dirpath)):
continue
for filename in filenames:
if any(filename.endswith(ext) for ext in exts):
paths.append(os.path.join(dirpath, filename))
return paths | 0.000425 |
def find_lt(a, x):
"""Find rightmost value less than x"""
i = bisect.bisect_left(a, x)
if i:
return a[i-1]
raise ValueError | 0.006803 |
def get_area_def(self, key, info=None):
"""Create AreaDefinition for specified product.
Projection information are hard coded for 0 degree geos projection
Test dataset doesn't provide the values in the file container.
Only fill values are inserted.
"""
# TODO Get projection information from input file
a = 6378169.
h = 35785831.
b = 6356583.8
lon_0 = 0.
# area_extent = (-5432229.9317116784, -5429229.5285458621,
# 5429229.5285458621, 5432229.9317116784)
area_extent = (-5570248.4773392612, -5567248.074173444,
5567248.074173444, 5570248.4773392612)
proj_dict = {'a': float(a),
'b': float(b),
'lon_0': float(lon_0),
'h': float(h),
'proj': 'geos',
'units': 'm'}
area = geometry.AreaDefinition(
'LI_area_name',
"LI area",
'geosli',
proj_dict,
self.ncols,
self.nlines,
area_extent)
self.area = area
logger.debug("Dataset area definition: \n {}".format(area))
return area | 0.00161 |
def add_key(self, key_id, key):
"""
::
POST /:login/keys
:param key_id: label for the new key
:type key_id: :py:class:`basestring`
:param key: the full SSH RSA public key
:type key: :py:class:`str`
Uploads a public key to be added to the account's credentials.
"""
data = {'name': str(key_id), 'key': str(key)}
j, _ = self.request('POST', '/keys', data=data)
return j | 0.011928 |
def on_write_timeout(self, query, consistency, write_type,
required_responses, received_responses, retry_num):
"""
This is called when a write operation times out from the coordinator's
perspective (i.e. a replica did not respond to the coordinator in time).
`query` is the :class:`.Statement` that timed out.
`consistency` is the :class:`.ConsistencyLevel` that the operation was
attempted at.
`write_type` is one of the :class:`.WriteType` enums describing the
type of write operation.
The `required_responses` and `received_responses` parameters describe
how many replicas needed to acknowledge the write to meet the requested
consistency level and how many replicas actually did acknowledge the
write before the coordinator timed out the request.
`retry_num` counts how many times the operation has been retried, so
the first time this method is called, `retry_num` will be 0.
By default, failed write operations will retried at most once, and
they will only be retried if the `write_type` was
:attr:`~.WriteType.BATCH_LOG`.
"""
if retry_num != 0:
return self.RETHROW, None
elif write_type == WriteType.BATCH_LOG:
return self.RETRY, consistency
else:
return self.RETHROW, None | 0.002827 |
def erase(self):
"""White out the progress bar."""
with self._at_last_line():
self.stream.write(self._term.clear_eol)
self.stream.flush() | 0.011561 |
def a2b_base58(s):
"""Convert base58 to binary using BASE58_ALPHABET."""
v, prefix = to_long(BASE58_BASE, lambda c: BASE58_LOOKUP[c], s.encode("utf8"))
return from_long(v, prefix, 256, lambda x: x) | 0.009569 |
async def contains_albums(self, *albums: Sequence[Union[str, Album]]) -> List[bool]:
"""Check if one or more albums is already saved in the current Spotify user’s ‘Your Music’ library.
Parameters
----------
albums : Union[Album, str]
A sequence of artist objects or spotify IDs
"""
_albums = [(obj if isinstance(obj, str) else obj.id) for obj in albums]
return await self.user.http.is_saved_album(_albums) | 0.008439 |
def _definition_from_example(example):
"""Generates a swagger definition json from a given example
Works only for simple types in the dict
Args:
example: The example for which we want a definition
Type is DICT
Returns:
A dict that is the swagger definition json
"""
assert isinstance(example, dict)
def _has_simple_type(value):
accepted = (str, int, float, bool)
return isinstance(value, accepted)
definition = {
'type': 'object',
'properties': {},
}
for key, value in example.items():
if not _has_simple_type(value):
raise Exception("Not implemented yet")
ret_value = None
if isinstance(value, str):
ret_value = {'type': 'string'}
elif isinstance(value, int):
ret_value = {'type': 'integer', 'format': 'int64'}
elif isinstance(value, float):
ret_value = {'type': 'number', 'format': 'double'}
elif isinstance(value, bool):
ret_value = {'type': 'boolean'}
else:
raise Exception("Not implemented yet")
definition['properties'][key] = ret_value
return definition | 0.001491 |
def iterate_symbols():
"""
Return an iterator yielding registered netcodes.
"""
for prefix in search_prefixes():
package = importlib.import_module(prefix)
for importer, modname, ispkg in pkgutil.walk_packages(path=package.__path__, onerror=lambda x: None):
network = network_for_netcode(modname)
if network:
yield network.symbol.upper() | 0.004902 |
def genl_send_simple(sk, family, cmd, version, flags):
"""Send a Generic Netlink message consisting only of a header.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/genl.c#L84
This function is a shortcut for sending a Generic Netlink message without any message payload. The message will only
consist of the Netlink and Generic Netlink headers. The header is constructed based on the specified parameters and
passed on to nl_send_simple() to send it on the specified socket.
Positional arguments:
sk -- Generic Netlink socket (nl_sock class instance).
family -- numeric family identifier (integer).
cmd -- numeric command identifier (integer).
version -- interface version (integer).
flags -- additional Netlink message flags (integer).
Returns:
0 on success or a negative error code.
"""
hdr = genlmsghdr(cmd=cmd, version=version)
return int(nl_send_simple(sk, family, flags, hdr, hdr.SIZEOF)) | 0.00308 |
def getShare(store, role, shareID):
"""
Retrieve the accessible facet of an Item previously shared with
L{shareItem}.
This method is pending deprecation, and L{Role.getShare} should be
preferred in new code.
@param store: an axiom store (XXX must be the same as role.store)
@param role: a L{Role}, the primary role for a user attempting to retrieve
the given item.
@return: a L{SharedProxy}. This is a wrapper around the shared item which
only exposes those interfaces explicitly allowed for the given role.
@raise: L{NoSuchShare} if there is no item shared to the given role for the
given shareID.
"""
warnings.warn("Use Role.getShare() instead of sharing.getShare().",
PendingDeprecationWarning,
stacklevel=2)
return role.getShare(shareID) | 0.001186 |
def category(self, title, pageid=None, cparams=None, namespace=None):
"""
Returns category query string
"""
query = self.LIST.substitute(
WIKI=self.uri,
ENDPOINT=self.endpoint,
LIST='categorymembers')
status = pageid or title
query += "&cmlimit=500"
if namespace is not None:
query += "&cmnamespace=%d" % namespace
if title and pageid:
title = None
if title:
query += "&cmtitle=" + safequote(title)
if pageid:
query += "&cmpageid=%d" % pageid
if cparams:
query += cparams
status += ' (%s)' % cparams
self.set_status('categorymembers', status)
return query | 0.002581 |
def results(self, query_name):
"""
Gets a single saved query with a 'result' object for a project from the
Keen IO API given a query name.
Read or Master key must be set.
"""
url = "{0}/{1}/result".format(self.saved_query_url, query_name)
response = self._get_json(HTTPMethods.GET, url, self._get_read_key())
return response | 0.005128 |
def dumps(asts):
"""
Create a compressed string from an Trace.
"""
d = asts.values.tostring()
t = asts.index.values.astype(float).tostring()
lt = struct.pack('<L', len(t))
i = asts.name.encode('utf-8')
li = struct.pack('<L', len(i))
try: # python 2
return buffer(zlib.compress(li + lt + i + t + d))
except NameError: # python 3
return zlib.compress(li + lt + i + t + d) | 0.002342 |
def _detect_branching(self, Dseg: np.ndarray, tips: np.ndarray, seg_reference=None):
"""Detect branching on given segment.
Call function __detect_branching three times for all three orderings of
tips. Points that do not belong to the same segment in all three
orderings are assigned to a fourth segment. The latter is, by Haghverdi
et al. (2016) referred to as 'undecided cells'.
Parameters
----------
Dseg
Dchosen distance matrix restricted to segment.
tips
The three tip points. They form a 'triangle' that contains the data.
Returns
-------
ssegs : list of np.ndarray
List of segments obtained from splitting the single segment defined
via the first two tip cells.
ssegs_tips : list of np.ndarray
List of tips of segments in ssegs.
"""
if self.flavor == 'haghverdi16':
ssegs = self._detect_branching_single_haghverdi16(Dseg, tips)
elif self.flavor == 'wolf17_tri':
ssegs = self._detect_branching_single_wolf17_tri(Dseg, tips)
elif self.flavor == 'wolf17_bi' or self.flavor == 'wolf17_bi_un':
ssegs = self._detect_branching_single_wolf17_bi(Dseg, tips)
else:
raise ValueError('`flavor` needs to be in {"haghverdi16", "wolf17_tri", "wolf17_bi"}.')
# make sure that each data point has a unique association with a segment
masks = np.zeros((len(ssegs), Dseg.shape[0]), dtype=bool)
for iseg, seg in enumerate(ssegs):
masks[iseg][seg] = True
nonunique = np.sum(masks, axis=0) > 1
ssegs = []
for iseg, mask in enumerate(masks):
mask[nonunique] = False
ssegs.append(np.arange(Dseg.shape[0], dtype=int)[mask])
# compute new tips within new segments
ssegs_tips = []
for inewseg, newseg in enumerate(ssegs):
if len(np.flatnonzero(newseg)) <= 1:
logg.warn('detected group with only {} cells'.format(np.flatnonzero(newseg)))
secondtip = newseg[np.argmax(Dseg[tips[inewseg]][newseg])]
ssegs_tips.append([tips[inewseg], secondtip])
undecided_cells = np.arange(Dseg.shape[0], dtype=int)[nonunique]
if len(undecided_cells) > 0:
ssegs.append(undecided_cells)
# establish the connecting points with the other segments
ssegs_connects = [[], [], [], []]
for inewseg, newseg_tips in enumerate(ssegs_tips):
reference_point = newseg_tips[0]
# closest cell to the new segment within undecided cells
closest_cell = undecided_cells[np.argmin(Dseg[reference_point][undecided_cells])]
ssegs_connects[inewseg].append(closest_cell)
# closest cell to the undecided cells within new segment
closest_cell = ssegs[inewseg][np.argmin(Dseg[closest_cell][ssegs[inewseg]])]
ssegs_connects[-1].append(closest_cell)
# also compute tips for the undecided cells
tip_0 = undecided_cells[np.argmax(Dseg[undecided_cells[0]][undecided_cells])]
tip_1 = undecided_cells[np.argmax(Dseg[tip_0][undecided_cells])]
ssegs_tips.append([tip_0, tip_1])
ssegs_adjacency = [[3], [3], [3], [0, 1, 2]]
trunk = 3
elif len(ssegs) == 3:
reference_point = np.zeros(3, dtype=int)
reference_point[0] = ssegs_tips[0][0]
reference_point[1] = ssegs_tips[1][0]
reference_point[2] = ssegs_tips[2][0]
closest_points = np.zeros((3, 3), dtype=int)
# this is another strategy than for the undecided_cells
# here it's possible to use the more symmetric procedure
# shouldn't make much of a difference
closest_points[0, 1] = ssegs[1][np.argmin(Dseg[reference_point[0]][ssegs[1]])]
closest_points[1, 0] = ssegs[0][np.argmin(Dseg[reference_point[1]][ssegs[0]])]
closest_points[0, 2] = ssegs[2][np.argmin(Dseg[reference_point[0]][ssegs[2]])]
closest_points[2, 0] = ssegs[0][np.argmin(Dseg[reference_point[2]][ssegs[0]])]
closest_points[1, 2] = ssegs[2][np.argmin(Dseg[reference_point[1]][ssegs[2]])]
closest_points[2, 1] = ssegs[1][np.argmin(Dseg[reference_point[2]][ssegs[1]])]
added_dist = np.zeros(3)
added_dist[0] = Dseg[closest_points[1, 0], closest_points[0, 1]] + Dseg[closest_points[2, 0], closest_points[0, 2]]
added_dist[1] = Dseg[closest_points[0, 1], closest_points[1, 0]] + Dseg[closest_points[2, 1], closest_points[1, 2]]
added_dist[2] = Dseg[closest_points[1, 2], closest_points[2, 1]] + Dseg[closest_points[0, 2], closest_points[2, 0]]
trunk = np.argmin(added_dist)
ssegs_adjacency = [[trunk] if i != trunk else
[j for j in range(3) if j != trunk]
for i in range(3)]
ssegs_connects = [[closest_points[i, trunk]] if i != trunk else
[closest_points[trunk, j] for j in range(3) if j != trunk]
for i in range(3)]
else:
trunk = 0
ssegs_adjacency = [[1], [0]]
reference_point_in_0 = ssegs_tips[0][0]
closest_point_in_1 = ssegs[1][np.argmin(Dseg[reference_point_in_0][ssegs[1]])]
reference_point_in_1 = closest_point_in_1 # ssegs_tips[1][0]
closest_point_in_0 = ssegs[0][np.argmin(Dseg[reference_point_in_1][ssegs[0]])]
ssegs_connects = [[closest_point_in_1], [closest_point_in_0]]
return ssegs, ssegs_tips, ssegs_adjacency, ssegs_connects, trunk | 0.003778 |
def get_size(path):
'''Return the size of path in bytes if it exists and can be determined.'''
size = os.path.getsize(path)
for item in os.walk(path):
for file in item[2]:
size += os.path.getsize(os.path.join(item[0], file))
return size | 0.003676 |
def ahrs3_encode(self, roll, pitch, yaw, altitude, lat, lng, v1, v2, v3, v4):
'''
Status of third AHRS filter if available. This is for ANU research
group (Ali and Sean)
roll : Roll angle (rad) (float)
pitch : Pitch angle (rad) (float)
yaw : Yaw angle (rad) (float)
altitude : Altitude (MSL) (float)
lat : Latitude in degrees * 1E7 (int32_t)
lng : Longitude in degrees * 1E7 (int32_t)
v1 : test variable1 (float)
v2 : test variable2 (float)
v3 : test variable3 (float)
v4 : test variable4 (float)
'''
return MAVLink_ahrs3_message(roll, pitch, yaw, altitude, lat, lng, v1, v2, v3, v4) | 0.004803 |
def application(environ, start_response):
"""WSGI interface.
"""
def send_response(status, body):
if not isinstance(body, bytes):
body = body.encode('utf-8')
start_response(status, [('Content-Type', 'text/plain'),
('Content-Length', '%d' % len(body))])
return [body]
if environ['REQUEST_METHOD'] != 'POST':
return send_response('403 Forbidden', "invalid request")
# Gets the posted input
try:
request_body_size = int(environ['CONTENT_LENGTH'])
except (KeyError, ValueError):
return send_response('400 Bad Request', "invalid content length")
if request_body_size > MAX_SIZE:
return send_response('403 Forbidden', "report too big")
request_body = environ['wsgi.input'].read(request_body_size)
# Tries to store
response_body = store(request_body, environ.get('REMOTE_ADDR'))
if not response_body:
status = '200 OK'
response_body = "stored"
else:
status = '501 Server Error'
# Sends the response
return send_response(status, response_body) | 0.000889 |
def size_container_folding(value):
"""
Convert value to ast expression if size is not too big.
Converter for sized container.
"""
if len(value) < MAX_LEN:
if isinstance(value, list):
return ast.List([to_ast(elt) for elt in value], ast.Load())
elif isinstance(value, tuple):
return ast.Tuple([to_ast(elt) for elt in value], ast.Load())
elif isinstance(value, set):
return ast.Set([to_ast(elt) for elt in value])
elif isinstance(value, dict):
keys = [to_ast(elt) for elt in value.keys()]
values = [to_ast(elt) for elt in value.values()]
return ast.Dict(keys, values)
elif isinstance(value, np.ndarray):
return ast.Call(func=ast.Attribute(
ast.Name(mangle('numpy'), ast.Load(), None),
'array',
ast.Load()),
args=[to_ast(totuple(value.tolist())),
ast.Attribute(
ast.Name(mangle('numpy'), ast.Load(), None),
value.dtype.name,
ast.Load())],
keywords=[])
else:
raise ConversionError()
else:
raise ToNotEval() | 0.000795 |
def slamdunkFilterStatsTable(self):
""" Take the parsed filter stats from Slamdunk and add it to a separate table """
headers = OrderedDict()
headers['mapped'] = {
'namespace': 'Slamdunk',
'title': '{} Mapped'.format(config.read_count_prefix),
'description': '# mapped reads ({})'.format(config.read_count_desc),
'shared_key': 'read_count',
'min': 0,
'format': '{:,.2f}',
'suffix': config.read_count_prefix,
'scale': 'YlGn',
'modify': lambda x: float(x) * config.read_count_multiplier,
}
headers['multimapper'] = {
'namespace': 'Slamdunk',
'title': '{} Multimap-Filtered'.format(config.read_count_prefix),
'description': '# multimap-filtered reads ({})'.format(config.read_count_desc),
'shared_key': 'read_count',
'min': 0,
'format': '{:,.2f}',
'suffix': config.read_count_prefix,
'scale': 'OrRd',
'modify': lambda x: float(x) * config.read_count_multiplier,
}
headers['nmfiltered'] = {
'namespace': 'Slamdunk',
'title': '{} NM-Filtered'.format(config.read_count_prefix),
'description': '# NM-filtered reads ({})'.format(config.read_count_desc),
'shared_key': 'read_count',
'min': 0,
'format': '{:,.2f}',
'suffix': config.read_count_prefix,
'scale': 'OrRd',
'modify': lambda x: float(x) * config.read_count_multiplier,
}
headers['idfiltered'] = {
'namespace': 'Slamdunk',
'title': '{} Identity-Filtered'.format(config.read_count_prefix),
'description': '# identity-filtered reads ({})'.format(config.read_count_desc),
'shared_key': 'read_count',
'min': 0,
'format': '{:,.2f}',
'suffix': config.read_count_prefix,
'scale': 'OrRd',
'modify': lambda x: float(x) * config.read_count_multiplier,
}
headers['mqfiltered'] = {
'namespace': 'Slamdunk',
'title': '{} MQ-Filtered'.format(config.read_count_prefix),
'description': '# MQ-filtered reads ({})'.format(config.read_count_desc),
'shared_key': 'read_count',
'min': 0,
'format': '{:,.2f}',
'suffix': config.read_count_prefix,
'scale': 'OrRd',
'modify': lambda x: float(x) * config.read_count_multiplier,
}
pconfig = {
'id': 'slamdunk_filtering_table',
'min': 0,
}
self.add_section (
name = 'Filter statistics',
anchor = 'slamdunk_filtering',
description = 'This table shows the number of reads filtered with each filter criterion during filtering phase of slamdunk.',
plot = table.plot(self.slamdunk_data, headers, pconfig)
) | 0.005972 |
def freshenFocus(self):
""" Did something which requires a new look. Move scrollbar up.
This often needs to be delayed a bit however, to let other
events in the queue through first. """
self.top.update_idletasks()
self.top.after(10, self.setViewAtTop) | 0.006667 |
def versions():
"""Report versions"""
stream = sys.stdout if _PY3K else sys.stderr
print('PyQ', __version__, file=stream)
if _np is not None:
print('NumPy', _np.__version__, file=stream)
print('KDB+ %s (%s) %s' % tuple(q('.z.K,.z.k,.z.o')), file=stream)
print('Python', sys.version, file=stream) | 0.003058 |
def _await_descriptor_upload(tor_protocol, onion, progress, await_all_uploads):
"""
Internal helper.
:param tor_protocol: ITorControlProtocol instance
:param onion: IOnionService instance
:param progress: a progess callback, or None
:returns: a Deferred that fires once we've detected at least one
descriptor upload for the service (as detected by listening for
HS_DESC events)
"""
# For v3 services, Tor attempts to upload to 16 services; we'll
# assume that for now but also cap it (we want to show some
# progress for "attempting uploads" but we need to decide how
# much) .. so we leave 50% of the "progress" for attempts, and the
# other 50% for "are we done" (which is either "one thing
# uploaded" or "all the things uploaded")
attempted_uploads = set()
confirmed_uploads = set()
failed_uploads = set()
uploaded = defer.Deferred()
await_all = False if await_all_uploads is None else await_all_uploads
def translate_progress(tag, description):
if progress:
done = len(confirmed_uploads) + len(failed_uploads)
done_endpoint = float(len(attempted_uploads)) if await_all else 1.0
done_pct = 0 if not attempted_uploads else float(done) / done_endpoint
started_pct = float(min(16, len(attempted_uploads))) / 16.0
try:
progress(
(done_pct * 50.0) + (started_pct * 50.0),
tag,
description,
)
except Exception:
log.err()
def hostname_matches(hostname):
if IAuthenticatedOnionClients.providedBy(onion):
return hostname[:-6] == onion.get_permanent_id()
else:
# provides IOnionService
return onion.hostname == hostname
def hs_desc(evt):
"""
From control-spec:
"650" SP "HS_DESC" SP Action SP HSAddress SP AuthType SP HsDir
[SP DescriptorID] [SP "REASON=" Reason] [SP "REPLICA=" Replica]
"""
args = evt.split()
subtype = args[0]
if subtype == 'UPLOAD':
if hostname_matches('{}.onion'.format(args[1])):
attempted_uploads.add(args[3])
translate_progress(
"wait_descriptor",
"Upload to {} started".format(args[3])
)
elif subtype == 'UPLOADED':
# we only need ONE successful upload to happen for the
# HS to be reachable.
# unused? addr = args[1]
# XXX FIXME I think tor is sending the onion-address
# properly with these now, so we can use those
# (i.e. instead of matching to "attempted_uploads")
if args[3] in attempted_uploads:
confirmed_uploads.add(args[3])
log.msg("Uploaded '{}' to '{}'".format(args[1], args[3]))
translate_progress(
"wait_descriptor",
"Successful upload to {}".format(args[3])
)
if not uploaded.called:
if await_all:
if (len(failed_uploads) + len(confirmed_uploads)) == len(attempted_uploads):
uploaded.callback(onion)
else:
uploaded.callback(onion)
elif subtype == 'FAILED':
if hostname_matches('{}.onion'.format(args[1])):
failed_uploads.add(args[3])
translate_progress(
"wait_descriptor",
"Failed upload to {}".format(args[3])
)
if failed_uploads == attempted_uploads:
msg = "Failed to upload '{}' to: {}".format(
args[1],
', '.join(failed_uploads),
)
uploaded.errback(RuntimeError(msg))
# the first 'yield' should be the add_event_listener so that a
# caller can do "d = _await_descriptor_upload()", then add the
# service.
yield tor_protocol.add_event_listener('HS_DESC', hs_desc)
yield uploaded
yield tor_protocol.remove_event_listener('HS_DESC', hs_desc)
# ensure we show "100%" at the end
if progress:
if await_all_uploads:
msg = "Completed descriptor uploads"
else:
msg = "At least one descriptor uploaded"
try:
progress(100.0, "wait_descriptor", msg)
except Exception:
log.err() | 0.000654 |
def lyap_r(data, emb_dim=10, lag=None, min_tsep=None, tau=1, min_neighbors=20,
trajectory_len=20, fit="RANSAC", debug_plot=False, debug_data=False,
plot_file=None, fit_offset=0):
"""
Estimates the largest Lyapunov exponent using the algorithm of Rosenstein
et al. [lr_1]_.
Explanation of Lyapunov exponents:
See lyap_e.
Explanation of the algorithm:
The algorithm of Rosenstein et al. is only able to recover the largest
Lyapunov exponent, but behaves rather robust to parameter choices.
The idea for the algorithm relates closely to the definition of Lyapunov
exponents. First, the dynamics of the data are reconstructed using a delay
embedding method with a lag, such that each value x_i of the data is mapped
to the vector
X_i = [x_i, x_(i+lag), x_(i+2*lag), ..., x_(i+(emb_dim-1) * lag)]
For each such vector X_i, we find the closest neighbor X_j using the
euclidean distance. We know that as we follow the trajectories from X_i and
X_j in time in a chaotic system the distances between X_(i+k) and X_(j+k)
denoted as d_i(k) will increase according to a power law
d_i(k) = c * e^(lambda * k) where lambda is a good approximation of the
highest Lyapunov exponent, because the exponential expansion along the axis
associated with this exponent will quickly dominate the expansion or
contraction along other axes.
To calculate lambda, we look at the logarithm of the distance trajectory,
because log(d_i(k)) = log(c) + lambda * k. This gives a set of lines
(one for each index i) whose slope is an approximation of lambda. We
therefore extract the mean log trajectory d'(k) by taking the mean of
log(d_i(k)) over all orbit vectors X_i. We then fit a straight line to
the plot of d'(k) versus k. The slope of the line gives the desired
parameter lambda.
Method for choosing min_tsep:
Usually we want to find neighbors between points that are close in phase
space but not too close in time, because we want to avoid spurious
correlations between the obtained trajectories that originate from temporal
dependencies rather than the dynamic properties of the system. Therefore it
is critical to find a good value for min_tsep. One rather plausible
estimate for this value is to set min_tsep to the mean period of the
signal, which can be obtained by calculating the mean frequency using the
fast fourier transform. This procedure is used by default if the user sets
min_tsep = None.
Method for choosing lag:
Another parameter that can be hard to choose by instinct alone is the lag
between individual values in a vector of the embedded orbit. Here,
Rosenstein et al. suggest to set the lag to the distance where the
autocorrelation function drops below 1 - 1/e times its original (maximal)
value. This procedure is used by default if the user sets lag = None.
References:
.. [lr_1] M. T. Rosenstein, J. J. Collins, and C. J. De Luca,
“A practical method for calculating largest Lyapunov exponents from
small data sets,” Physica D: Nonlinear Phenomena, vol. 65, no. 1,
pp. 117–134, 1993.
Reference Code:
.. [lr_a] mirwais, "Largest Lyapunov Exponent with Rosenstein's Algorithm",
url: http://www.mathworks.com/matlabcentral/fileexchange/38424-largest-lyapunov-exponent-with-rosenstein-s-algorithm
.. [lr_b] Shapour Mohammadi, "LYAPROSEN: MATLAB function to calculate
Lyapunov exponent",
url: https://ideas.repec.org/c/boc/bocode/t741502.html
Args:
data (iterable of float):
(one-dimensional) time series
Kwargs:
emb_dim (int):
embedding dimension for delay embedding
lag (float):
lag for delay embedding
min_tsep (float):
minimal temporal separation between two "neighbors" (default:
find a suitable value by calculating the mean period of the data)
tau (float):
step size between data points in the time series in seconds
(normalization scaling factor for exponents)
min_neighbors (int):
if lag=None, the search for a suitable lag will be stopped when the
number of potential neighbors for a vector drops below min_neighbors
trajectory_len (int):
the time (in number of data points) to follow the distance
trajectories between two neighboring points
fit (str):
the fitting method to use for the line fit, either 'poly' for normal
least squares polynomial fitting or 'RANSAC' for RANSAC-fitting which
is more robust to outliers
debug_plot (boolean):
if True, a simple plot of the final line-fitting step will
be shown
debug_data (boolean):
if True, debugging data will be returned alongside the result
plot_file (str):
if debug_plot is True and plot_file is not None, the plot will be saved
under the given file name instead of directly showing it through
``plt.show()``
fit_offset (int):
neglect the first fit_offset steps when fitting
Returns:
float:
an estimate of the largest Lyapunov exponent (a positive exponent is
a strong indicator for chaos)
(1d-vector, 1d-vector, list):
only present if debug_data is True: debug data of the form
``(ks, div_traj, poly)`` where ``ks`` are the x-values of the line fit,
``div_traj`` are the y-values and ``poly`` are the line coefficients
(``[slope, intercept]``).
"""
# convert data to float to avoid overflow errors in rowwise_euclidean
data = np.asarray(data, dtype="float32")
n = len(data)
max_tsep_factor = 0.25
if lag is None or min_tsep is None:
# both the algorithm for lag and min_tsep need the fft
f = np.fft.rfft(data, n * 2 - 1)
if min_tsep is None:
# calculate min_tsep as mean period (= 1 / mean frequency)
mf = np.fft.rfftfreq(n * 2 - 1) * np.abs(f)
mf = np.mean(mf[1:]) / np.sum(np.abs(f[1:]))
min_tsep = int(np.ceil(1.0 / mf))
if min_tsep > max_tsep_factor * n:
min_tsep = int(max_tsep_factor * n)
msg = "signal has very low mean frequency, setting min_tsep = {:d}"
warnings.warn(msg.format(min_tsep), RuntimeWarning)
if lag is None:
# calculate the lag as point where the autocorrelation drops to (1 - 1/e)
# times its maximum value
# note: the Wiener–Khinchin theorem states that the spectral
# decomposition of the autocorrelation function of a process is the power
# spectrum of that process
# => we can use fft to calculate the autocorrelation
acorr = np.fft.irfft(f * np.conj(f))
acorr = np.roll(acorr, n - 1)
eps = acorr[n - 1] * (1 - 1.0 / np.e)
lag = 1
# small helper function to calculate resulting number of vectors for a
# given lag value
def nb_neighbors(lag_value):
min_len = lyap_r_len(
emb_dim=emb_dim, lag=i, trajectory_len=trajectory_len,
min_tsep=min_tsep
)
return max(0, n - min_len)
# find lag
for i in range(1,n):
lag = i
if acorr[n - 1 + i] < eps or acorr[n - 1 - i] < eps:
break
if nb_neighbors(i) < min_neighbors:
msg = "autocorrelation declined too slowly to find suitable lag" \
+ ", setting lag to {}"
warnings.warn(msg.format(lag), RuntimeWarning)
break
min_len = lyap_r_len(
emb_dim=emb_dim, lag=lag, trajectory_len=trajectory_len,
min_tsep=min_tsep
)
if len(data) < min_len:
msg = "for emb_dim = {}, lag = {}, min_tsep = {} and trajectory_len = {}" \
+ " you need at least {} datapoints in your time series"
warnings.warn(
msg.format(emb_dim, lag, min_tsep, trajectory_len, min_len),
RuntimeWarning
)
# delay embedding
orbit = delay_embedding(data, emb_dim, lag)
m = len(orbit)
# construct matrix with pairwise distances between vectors in orbit
dists = np.array([rowwise_euclidean(orbit, orbit[i]) for i in range(m)])
# we do not want to consider vectors as neighbor that are less than min_tsep
# time steps together => mask the distances min_tsep to the right and left of
# each index by setting them to infinity (will never be considered as nearest
# neighbors)
for i in range(m):
dists[i, max(0, i - min_tsep):i + min_tsep + 1] = float("inf")
# check that we have enough data points to continue
ntraj = m - trajectory_len + 1
min_traj = min_tsep * 2 + 2 # in each row min_tsep + 1 disances are inf
if ntraj <= 0:
msg = "Not enough data points. Need {} additional data points to follow " \
+ "a complete trajectory."
raise ValueError(msg.format(-ntraj+1))
if ntraj < min_traj:
# not enough data points => there are rows where all values are inf
assert np.any(np.all(np.isinf(dists[:ntraj, :ntraj]), axis=1))
msg = "Not enough data points. At least {} trajectories are required " \
+ "to find a valid neighbor for each orbit vector with min_tsep={} " \
+ "but only {} could be created."
raise ValueError(msg.format(min_traj, min_tsep, ntraj))
assert np.all(np.any(np.isfinite(dists[:ntraj, :ntraj]), axis=1))
# find nearest neighbors (exclude last columns, because these vectors cannot
# be followed in time for trajectory_len steps)
nb_idx = np.argmin(dists[:ntraj, :ntraj], axis=1)
# build divergence trajectory by averaging distances along the trajectory
# over all neighbor pairs
div_traj = np.zeros(trajectory_len, dtype=float)
for k in range(trajectory_len):
# calculate mean trajectory distance at step k
indices = (np.arange(ntraj) + k, nb_idx + k)
div_traj_k = dists[indices]
# filter entries where distance is zero (would lead to -inf after log)
nonzero = np.where(div_traj_k != 0)
if len(nonzero[0]) == 0:
# if all entries where zero, we have to use -inf
div_traj[k] = -np.inf
else:
div_traj[k] = np.mean(np.log(div_traj_k[nonzero]))
# filter -inf entries from mean trajectory
ks = np.arange(trajectory_len)
finite = np.where(np.isfinite(div_traj))
ks = ks[finite]
div_traj = div_traj[finite]
if len(ks) < 1:
# if all points or all but one point in the trajectory is -inf, we cannot
# fit a line through the remaining points => return -inf as exponent
poly = [-np.inf, 0]
else:
# normal line fitting
poly = poly_fit(ks[fit_offset:], div_traj[fit_offset:], 1, fit=fit)
if debug_plot:
plot_reg(ks[fit_offset:], div_traj[fit_offset:], poly, "k", "log(d(k))", fname=plot_file)
le = poly[0] / tau
if debug_data:
return (le, (ks, div_traj, poly))
else:
return le | 0.005946 |
def move_to(self, thing, destination):
"Move a thing to a new location."
thing.bump = self.some_things_at(destination, Obstacle)
if not thing.bump:
thing.location = destination
for o in self.observers:
o.thing_moved(thing) | 0.006993 |
def _iter_coords(nsls):
"""Iterate through all matching coordinates in a sequence of slices."""
# First convert all slices to ranges
ranges = list()
for nsl in nsls:
if isinstance(nsl, int):
ranges.append(range(nsl, nsl+1))
else:
ranges.append(range(nsl.start, nsl.stop))
# Iterate through all matching coordinates
yield from itertools.product(*ranges) | 0.002404 |
def poll(in_sockets, out_sockets, timeout=-1):
"""
Poll a list of sockets
:param in_sockets: sockets for reading
:param out_sockets: sockets for writing
:param timeout: poll timeout in seconds, -1 is infinite wait
:return: tuple (read socket list, write socket list)
"""
sockets = {}
# reverse map fd => socket
fd_sockets = {}
for s in in_sockets:
sockets[s.fd] = POLLIN
fd_sockets[s.fd] = s
for s in out_sockets:
modes = sockets.get(s.fd, 0)
sockets[s.fd] = modes | POLLOUT
fd_sockets[s.fd] = s
# convert to milliseconds or -1
if timeout >= 0:
timeout_ms = int(timeout*1000)
else:
timeout_ms = -1
res, sockets = wrapper.nn_poll(sockets, timeout_ms)
_nn_check_positive_rtn(res)
read_list, write_list = [], []
for fd, result in sockets.items():
if (result & POLLIN) != 0:
read_list.append(fd_sockets[fd])
if (result & POLLOUT) != 0:
write_list.append(fd_sockets[fd])
return read_list, write_list | 0.000933 |
def encode(char_data, encoding='utf-8'):
"""
Encode the parameter as a byte string.
:param char_data: the data to encode
:rtype: bytes
"""
if type(char_data) is str:
return char_data.encode(encoding, errors='replace')
elif type(char_data) is bytes:
return char_data
else:
raise TypeError('message should be a string or bytes, found %s' % type(char_data)) | 0.004854 |
def _forward_kernel(self, F, inputs, states, **kwargs):
""" forward using CUDNN or CPU kenrel"""
if self._layout == 'NTC':
inputs = F.swapaxes(inputs, dim1=0, dim2=1)
if self._projection_size is None:
params = (kwargs['{}{}_{}_{}'.format(d, l, g, t)].reshape(-1)
for t in ['weight', 'bias']
for l in range(self._num_layers)
for d in ['l', 'r'][:self._dir]
for g in ['i2h', 'h2h'])
else:
params = (kwargs['{}{}_{}_{}'.format(d, l, g, t)].reshape(-1)
for t in ['weight', 'bias']
for l in range(self._num_layers)
for d in ['l', 'r'][:self._dir]
for g in ['i2h', 'h2h', 'h2r']
if g != 'h2r' or t != 'bias')
params = F._internal._rnn_param_concat(*params, dim=0)
rnn = F.RNN(inputs, params, *states, state_size=self._hidden_size,
projection_size=self._projection_size,
num_layers=self._num_layers, bidirectional=self._dir == 2,
p=self._dropout, state_outputs=True, mode=self._mode,
lstm_state_clip_min=self._lstm_state_clip_min,
lstm_state_clip_max=self._lstm_state_clip_max,
lstm_state_clip_nan=self._lstm_state_clip_nan)
if self._mode == 'lstm':
outputs, states = rnn[0], [rnn[1], rnn[2]]
else:
outputs, states = rnn[0], [rnn[1]]
if self._layout == 'NTC':
outputs = F.swapaxes(outputs, dim1=0, dim2=1)
return outputs, states | 0.002358 |
def matchesWithMatchers(self, event):
'''
Return all matches for this event. The first matcher is also returned for each matched object.
:param event: an input event
'''
ret = []
self._matches(event, set(), ret)
return tuple(ret) | 0.013605 |
def upload(self, source,
destination,
bucket,
chunk_size = 2 * 1024 * 1024,
metadata=None,
keep_private=True):
'''upload a file from a source to a destination. The client is expected
to have a bucket (self._bucket) that is created when instantiated.
This would be the method to do the same using the storage client,
but not easily done for resumable
blob = self._bucket.blob(destination)
blob.upload_from_filename(filename=source,
content_type="application/zip",
client=self._service)
url = blob.public_url
if isinstance(url, six.binary_type):
url = url.decode('utf-8')
return url
'''
env = 'SREGISTRY_GOOGLE_STORAGE_PRIVATE'
keep_private = self._get_and_update_setting(env) or keep_private
media = MediaFileUpload(source, chunksize=chunk_size, resumable=True)
request = self._storage_service.objects().insert(bucket=bucket.name,
name=destination,
media_body=media)
response = None
total = request.resumable._size / (1024*1024.0)
bar = ProgressBar(expected_size=total, filled_char='=', hide=self.quiet)
while response is None:
error = None
try:
progress, response = request.next_chunk()
if progress:
bar.show(progress.resumable_progress / (1024*1024.0))
except:
raise
# When we finish upload, get as blob
blob = bucket.blob(destination)
if blob.exists():
if not keep_private:
blob.make_public()
# If the user has a dictionary of metadata to update
if metadata is not None:
body = prepare_metadata(metadata)
blob.metadata = metadata
blob._properties['metadata'] = metadata
blob.patch()
return response | 0.008213 |
def difference(self, *iterables):
"""
Return a new set with elements in the set that are not in the
*iterables*.
"""
diff = self._set.difference(*iterables)
return self._fromset(diff, key=self._key) | 0.00813 |
def from_unit_to_satoshi(self, value, unit='satoshi'):
"""
Convert a value to satoshis. units can be any fiat currency.
By default the unit is satoshi.
"""
if not unit or unit == 'satoshi':
return value
if unit == 'bitcoin' or unit == 'btc':
return value * 1e8
# assume fiat currency that we can convert
convert = get_current_price(self.crypto, unit)
return int(value / convert * 1e8) | 0.004158 |
def choice_complete(self, ctx, incomplete):
"""Returns the completion results for click.core.Choice
Parameters
----------
ctx : click.core.Context
The current context
incomplete :
The string to complete
Returns
-------
[(str, str)]
A list of completion results
"""
return [
(c, None) for c in self.choices
if completion_configuration.match_incomplete(c, incomplete)
] | 0.002208 |
def generate_format(self):
"""
Means that value have to be in specified format. For example date, email or other.
.. code-block:: python
{'format': 'email'}
Valid value for this definition is [email protected] but not @username
"""
with self.l('if isinstance({variable}, str):'):
format_ = self._definition['format']
if format_ in self.FORMAT_REGEXS:
format_regex = self.FORMAT_REGEXS[format_]
self._generate_format(format_, format_ + '_re_pattern', format_regex)
# format regex is used only in meta schemas
elif format_ == 'regex':
with self.l('try:'):
self.l('re.compile({variable})')
with self.l('except Exception:'):
self.l('raise JsonSchemaException("{name} must be a valid regex")')
else:
self.l('pass') | 0.005258 |
def get_path(root, path, default=_UNSET):
"""Retrieve a value from a nested object via a tuple representing the
lookup path.
>>> root = {'a': {'b': {'c': [[1], [2], [3]]}}}
>>> get_path(root, ('a', 'b', 'c', 2, 0))
3
The path format is intentionally consistent with that of
:func:`remap`.
One of get_path's chief aims is improved error messaging. EAFP is
great, but the error messages are not.
For instance, ``root['a']['b']['c'][2][1]`` gives back
``IndexError: list index out of range``
What went out of range where? get_path currently raises
``PathAccessError: could not access 2 from path ('a', 'b', 'c', 2,
1), got error: IndexError('list index out of range',)``, a
subclass of IndexError and KeyError.
You can also pass a default that covers the entire operation,
should the lookup fail at any level.
Args:
root: The target nesting of dictionaries, lists, or other
objects supporting ``__getitem__``.
path (tuple): A list of strings and integers to be successively
looked up within *root*.
default: The value to be returned should any
``PathAccessError`` exceptions be raised.
"""
if isinstance(path, six.string_types):
path = path.split(".")
cur = root
try:
for seg in path:
try:
cur = cur[seg]
except (KeyError, IndexError) as exc:
raise PathAccessError(exc, seg, path)
except TypeError as exc:
# either string index in a list, or a parent that
# doesn't support indexing
try:
seg = int(seg)
cur = cur[seg]
except (ValueError, KeyError, IndexError, TypeError):
if not getattr(cur, "__iter__", None):
exc = TypeError("%r object is not indexable" % type(cur).__name__)
raise PathAccessError(exc, seg, path)
except PathAccessError:
if default is _UNSET:
raise
return default
return cur | 0.000945 |
def serve(application, host='127.0.0.1', port=8080):
"""Gevent-based WSGI-HTTP server."""
# Instantiate the server with a host/port configuration and our application.
WSGIServer((host, int(port)), application).serve_forever() | 0.026087 |
def merge_pdb_range_pairs(prs):
'''Takes in a list of PDB residue IDs (including insertion codes) specifying ranges and returns a sorted list of merged, sorted ranges.
This works as above but we have to split the residues into pairs as "1A" > "19".
'''
new_prs = []
sprs = [sorted((split_pdb_residue(p[0]), split_pdb_residue(p[1]))) for p in prs]
sprs = sorted(sprs)
merged = False
x = 0
from klab import colortext
while x < len(sprs):
newx = x + 1
new_pair = list(sprs[x])
for y in range(x + 1, len(sprs)):
if new_pair[0] <= (sprs[y][0][0] - 1, sprs[y][0][1]) <= new_pair[1]:
new_pair[0] = min(new_pair[0], sprs[y][0])
new_pair[1] = max(new_pair[1], sprs[y][1])
newx = y + 1
if new_pair not in new_prs:
new_prs.append(new_pair)
x = newx
return new_prs | 0.005482 |
def _index_fs(self):
"""Returns a deque object full of local file system items.
:returns: ``deque``
"""
indexed_objects = self._return_deque()
directory = self.job_args.get('directory')
if directory:
indexed_objects = self._return_deque(
deque=indexed_objects,
item=self._drectory_local_files(
directory=directory
)
)
object_names = self.job_args.get('object')
if object_names:
indexed_objects = self._return_deque(
deque=indexed_objects,
item=self._named_local_files(
object_names=object_names
)
)
return indexed_objects | 0.002558 |
def _convert_metrics_to_xml(metrics, root):
'''
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
'''
# Version
ETree.SubElement(root, 'Version').text = metrics.version
# Enabled
ETree.SubElement(root, 'Enabled').text = str(metrics.enabled)
# IncludeAPIs
if metrics.enabled and metrics.include_apis is not None:
ETree.SubElement(root, 'IncludeAPIs').text = str(metrics.include_apis)
# RetentionPolicy
retention_element = ETree.SubElement(root, 'RetentionPolicy')
_convert_retention_policy_to_xml(metrics.retention_policy, retention_element) | 0.002571 |
def unpublish(self, registry=None):
''' Try to un-publish the current version. Return a description of any
errors that occured, or None if successful.
'''
return registry_access.unpublish(
self.getRegistryNamespace(),
self.getName(),
self.getVersion(),
registry=registry
) | 0.005495 |
def xyz(self):
"""Return all particle coordinates in this compound.
Returns
-------
pos : np.ndarray, shape=(n, 3), dtype=float
Array with the positions of all particles.
"""
if not self.children:
pos = np.expand_dims(self._pos, axis=0)
else:
arr = np.fromiter(itertools.chain.from_iterable(
particle.pos for particle in self.particles()), dtype=float)
pos = arr.reshape((-1, 3))
return pos | 0.003854 |
def authenticate_or_redirect(self):
"""
Helper function suitable for @app.before_request and @check.
Sets g.oidc_id_token to the ID token if the user has successfully
authenticated, else returns a redirect object so they can go try
to authenticate.
:returns: A redirect object, or None if the user is logged in.
:rtype: Redirect
.. deprecated:: 1.0
Use :func:`require_login` instead.
"""
# the auth callback and error pages don't need user to be authenticated
if request.endpoint in frozenset(['_oidc_callback', '_oidc_error']):
return None
# retrieve signed ID token cookie
id_token = self._get_cookie_id_token()
if id_token is None:
return self.redirect_to_auth_server(request.url)
# ID token expired
# when Google is the IdP, this happens after one hour
if time.time() >= id_token['exp']:
# get credentials from store
try:
credentials = OAuth2Credentials.from_json(
self.credentials_store[id_token['sub']])
except KeyError:
logger.debug("Expired ID token, credentials missing",
exc_info=True)
return self.redirect_to_auth_server(request.url)
# refresh and store credentials
try:
credentials.refresh(httplib2.Http())
if credentials.id_token:
id_token = credentials.id_token
else:
# It is not guaranteed that we will get a new ID Token on
# refresh, so if we do not, let's just update the id token
# expiry field and reuse the existing ID Token.
if credentials.token_expiry is None:
logger.debug('Expired ID token, no new expiry. Falling'
' back to assuming 1 hour')
id_token['exp'] = time.time() + 3600
else:
id_token['exp'] = calendar.timegm(
credentials.token_expiry.timetuple())
self.credentials_store[id_token['sub']] = credentials.to_json()
self._set_cookie_id_token(id_token)
except AccessTokenRefreshError:
# Can't refresh. Wipe credentials and redirect user to IdP
# for re-authentication.
logger.debug("Expired ID token, can't refresh credentials",
exc_info=True)
del self.credentials_store[id_token['sub']]
return self.redirect_to_auth_server(request.url)
# make ID token available to views
g.oidc_id_token = id_token
return None | 0.0007 |
def from_project_root(cls, project_root, cli_vars):
"""Create a project from a root directory. Reads in dbt_project.yml and
packages.yml, if it exists.
:param project_root str: The path to the project root to load.
:raises DbtProjectError: If the project is missing or invalid, or if
the packages file exists and is invalid.
:returns Project: The project, with defaults populated.
"""
project_root = os.path.normpath(project_root)
project_yaml_filepath = os.path.join(project_root, 'dbt_project.yml')
# get the project.yml contents
if not path_exists(project_yaml_filepath):
raise DbtProjectError(
'no dbt_project.yml found at expected path {}'
.format(project_yaml_filepath)
)
if isinstance(cli_vars, compat.basestring):
cli_vars = parse_cli_vars(cli_vars)
renderer = ConfigRenderer(cli_vars)
project_dict = _load_yaml(project_yaml_filepath)
rendered_project = renderer.render_project(project_dict)
rendered_project['project-root'] = project_root
packages_dict = package_data_from_root(project_root)
return cls.from_project_config(rendered_project, packages_dict) | 0.001558 |
def endpoint_2_json(self):
"""
transform local object to JSON
:return: JSON object
"""
LOGGER.debug("Endpoint.endpoint_2_json")
json_obj = {
"endpointID": self.id,
"endpointURL": self.url,
"endpointParentNodeID": self.parent_node_id,
"endpointTwinEndpointsID": self.twin_endpoints_id,
"endpointProperties": self.properties
}
return json_obj | 0.00431 |
def update(self, ip_address=values.unset, friendly_name=values.unset,
cidr_prefix_length=values.unset):
"""
Update the IpAddressInstance
:param unicode ip_address: An IP address in dotted decimal notation from which you want to accept traffic. Any SIP requests from this IP address will be allowed by Twilio. IPv4 only supported today.
:param unicode friendly_name: A human readable descriptive text for this resource, up to 64 characters long.
:param unicode cidr_prefix_length: An integer representing the length of the CIDR prefix to use with this IP address when accepting traffic. By default the entire IP address is used.
:returns: Updated IpAddressInstance
:rtype: twilio.rest.api.v2010.account.sip.ip_access_control_list.ip_address.IpAddressInstance
"""
return self._proxy.update(
ip_address=ip_address,
friendly_name=friendly_name,
cidr_prefix_length=cidr_prefix_length,
) | 0.006903 |
def get_gaps_and_overlaps(self, tier1, tier2, maxlen=-1):
"""Give gaps and overlaps. The return types are shown in the table
below. The string will be of the format: ``id_tiername_tiername``.
.. note:: There is also a faster method: :func:`get_gaps_and_overlaps2`
For example when a gap occurs between tier1 and tier2 and they are
called ``speakerA`` and ``speakerB`` the annotation value of that gap
will be ``G12_speakerA_speakerB``.
| The gaps and overlaps are calculated using Heldner and Edlunds
method found in:
| *Heldner, M., & Edlund, J. (2010). Pauses, gaps and overlaps in
conversations. Journal of Phonetics, 38(4), 555–568.
doi:10.1016/j.wocn.2010.08.002*
+-----+---------------------------------------------+
| id | Description |
+=====+=============================================+
| O12 | Overlap from tier1 to tier2 |
+-----+---------------------------------------------+
| O21 | Overlap from tier2 to tier1 |
+-----+---------------------------------------------+
| G12 | Between speaker gap from tier1 to tier2 |
+-----+---------------------------------------------+
| G21 | Between speaker gap from tier2 to tier1 |
+-----+---------------------------------------------+
| W12 | Within speaker overlap from tier2 in tier1 |
+-----+---------------------------------------------+
| W21 | Within speaker overlap from tier1 in tier2 |
+-----+---------------------------------------------+
| P1 | Pause for tier1 |
+-----+---------------------------------------------+
| P2 | Pause for tier2 |
+-----+---------------------------------------------+
:param str tier1: Name of the first tier.
:param str tier2: Name of the second tier.
:param int maxlen: Maximum length of gaps (skip longer ones), if ``-1``
no maximum will be used.
:yields: Tuples of the form ``[(start, end, type)]``.
:raises KeyError: If a tier is non existent.
:raises IndexError: If no annotations are available in the tiers.
"""
spkr1anns = sorted((self.timeslots[a[0]], self.timeslots[a[1]])
for a in self.tiers[tier1][0].values())
spkr2anns = sorted((self.timeslots[a[0]], self.timeslots[a[1]])
for a in self.tiers[tier2][0].values())
line1 = []
def isin(x, lst):
return False if\
len([i for i in lst if i[0] <= x and i[1] >= x]) == 0 else True
minmax = (min(spkr1anns[0][0], spkr2anns[0][0]),
max(spkr1anns[-1][1], spkr2anns[-1][1]))
last = (1, minmax[0])
for ts in range(*minmax):
in1, in2 = isin(ts, spkr1anns), isin(ts, spkr2anns)
if in1 and in2: # Both speaking
if last[0] == 'B':
continue
ty = 'B'
elif in1: # Only 1 speaking
if last[0] == '1':
continue
ty = '1'
elif in2: # Only 2 speaking
if last[0] == '2':
continue
ty = '2'
else: # None speaking
if last[0] == 'N':
continue
ty = 'N'
line1.append((last[0], last[1], ts))
last = (ty, ts)
line1.append((last[0], last[1], minmax[1]))
for i in range(len(line1)):
if line1[i][0] == 'N':
if i != 0 and i < len(line1) - 1 and\
line1[i-1][0] != line1[i+1][0]:
t = ('G12', tier1, tier2) if line1[i-1][0] == '1' else\
('G21', tier2, tier1)
if maxlen == -1 or abs(line1[i][1]-line1[i][2]) < maxlen:
yield (line1[i][1], line1[i][2]-1, '_'.join(t))
else:
t = ('P1', tier1) if line1[i-1][0] == '1' else\
('P2', tier2)
if maxlen == -1 or abs(line1[i][1]-line1[i][2]) < maxlen:
yield (line1[i][1], line1[i][2]-1, '_'.join(t))
elif line1[i][0] == 'B':
if i != 0 and i < len(line1) - 1 and\
line1[i-1][0] != line1[i+1][0]:
t = ('O12', tier1, tier2) if line1[i-1][0] == '1' else\
('O21', tier2, tier1)
yield (line1[i][1], line1[i][2]-1, '_'.join(t))
else:
t = ('W12', tier1, tier2) if line1[i-1][0] == '1' else\
('W21', tier2, tier1)
yield (line1[i][1], line1[i][2]-1, '_'.join(t)) | 0.000398 |
def slurp_properties(source, destination, ignore=[], srckeys=None):
"""Copy properties from *source* (assumed to be a module) to
*destination* (assumed to be a dict).
*ignore* lists properties that should not be thusly copied.
*srckeys* is a list of keys to copy, if the source's __all__ is
untrustworthy.
"""
if srckeys is None:
srckeys = source.__all__
destination.update(dict([(name, getattr(source, name))
for name in srckeys
if not (name.startswith('__') or name in ignore)
])) | 0.008157 |
def create_strategy(name=None):
"""
Create a strategy, or just returns it if it's already one.
:param name:
:return: Strategy
"""
import logging
from bonobo.execution.strategies.base import Strategy
if isinstance(name, Strategy):
return name
if name is None:
name = DEFAULT_STRATEGY
logging.debug("Creating execution strategy {!r}...".format(name))
try:
factory = STRATEGIES[name]
except KeyError as exc:
raise RuntimeError(
"Invalid strategy {}. Available choices: {}.".format(repr(name), ", ".join(sorted(STRATEGIES.keys())))
) from exc
return factory() | 0.004505 |
def _search(self, words, include=None, exclude=None, lookup=None):
'''Full text search. Return a list of queries to intersect.'''
lookup = lookup or 'contains'
query = self.router.worditem.query()
if include:
query = query.filter(model_type__in=include)
if exclude:
query = query.exclude(model_type__in=include)
if not words:
return [query]
qs = []
if lookup == 'in':
# we are looking for items with at least one word in it
qs.append(query.filter(word__in=words))
elif lookup == 'contains':
#we want to match every single words
for word in words:
qs.append(query.filter(word=word))
else:
raise ValueError('Unknown lookup "{0}"'.format(lookup))
return qs | 0.003436 |
def parse(self, raise_parsing_errors=True):
"""
Process the file content.
Usage::
>>> plist_file_parser = PlistFileParser("standard.plist")
>>> plist_file_parser.parse()
True
>>> plist_file_parser.elements.keys()
[u'Dictionary A', u'Number A', u'Array A', u'String A', u'Date A', u'Boolean A', u'Data A']
:param raise_parsing_errors: Raise parsing errors.
:type raise_parsing_errors: bool
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Reading elements from: '{0}'.".format(self.path))
element_tree_parser = ElementTree.iterparse(self.path)
self.__parsing_errors = []
for action, element in element_tree_parser:
unmarshal = self.__unserializers.get(element.tag)
if unmarshal:
data = unmarshal(element)
element.clear()
element.text = data
elif element.tag != "plist":
self.__parsing_errors.append(foundations.exceptions.FileStructureParsingError(
"Unknown element: {0}".format(element.tag)))
if self.__parsing_errors:
if raise_parsing_errors:
raise foundations.exceptions.FileStructureParsingError(
"{0} | '{1}' structure is invalid, parsing exceptions occured!".format(self.__class__.__name__,
self.path))
else:
self.__elements = foundations.common.get_first_item(element_tree_parser.root).text
return True | 0.004169 |
def import_schema_to_json(name, store_it=False):
"""
loads the given schema name
from the local filesystem
and puts it into a store if it
is not in there yet
:param name:
:param store_it: if set to True, stores the contents
:return:
"""
schema_file = u"%s.json" % name
file_path = os.path.join(SCHEMA_ROOT, schema_file)
log.debug(u"trying to load %s " % file_path)
schema = None
try:
schema_file = open(file_path, "r").read()
except IOError, e:
log.error(u"file not found %s" % e)
msg = "Could not find schema file. %s" % file_path
raise SalesKingException("SCHEMA_NOT_FOUND", msg)
schema = json.loads(schema_file)
if schema is None:
msg = "loading failed foo %s" % name
raise SalesKingException("SCHEMA_NOT_FOUND", msg)
return schema | 0.001168 |
def error_msg_from_exception(e):
"""Translate exception into error message
Database have different ways to handle exception. This function attempts
to make sense of the exception object and construct a human readable
sentence.
TODO(bkyryliuk): parse the Presto error message from the connection
created via create_engine.
engine = create_engine('presto://localhost:3506/silver') -
gives an e.message as the str(dict)
presto.connect('localhost', port=3506, catalog='silver') - as a dict.
The latter version is parsed correctly by this function.
"""
msg = ''
if hasattr(e, 'message'):
if isinstance(e.message, dict):
msg = e.message.get('message')
elif e.message:
msg = '{}'.format(e.message)
return msg or '{}'.format(e) | 0.001196 |
async def allocate(
cls, *,
hostname: str = None,
architectures: typing.Sequence[str] = None,
cpus: int = None,
fabrics: typing.Sequence[FabricParam] = None,
interfaces: typing.Sequence[InterfaceParam] = None,
memory: float = None,
pod: typing.Union[str, Pod] = None,
not_pod: typing.Union[str, Pod] = None,
pod_type: str = None,
not_pod_type: str = None,
storage: typing.Sequence[str] = None,
subnets: typing.Sequence[SubnetParam] = None,
tags: typing.Sequence[str] = None,
zone: typing.Union[str, Zone] = None,
not_fabrics: typing.Sequence[FabricParam] = None,
not_subnets: typing.Sequence[SubnetParam] = None,
not_tags: typing.Sequence[str] = None,
not_zones: typing.Sequence[ZoneParam] = None,
agent_name: str = None, comment: str = None,
bridge_all: bool = None, bridge_stp: bool = None,
bridge_fd: int = None, dry_run: bool = None, verbose: bool = None):
"""
Allocate a machine.
:param hostname: The hostname to match.
:type hostname: `str`
:param architectures: The architecture(s) to match.
:type architectures: sequence of `str`
:param cpus: The minimum number of CPUs to match.
:type cpus: `int`
:param fabrics: The connected fabrics to match.
:type fabrics: sequence of either `str`, `int`, or `Fabric`
:param interfaces: The interfaces to match.
:type interfaces: sequence of either `str`, `int`, or `Interface`
:param memory: The minimum amount of RAM to match in MiB.
:type memory: `int`
:param pod: The pod to allocate the machine from.
:type pod: `str`
:param not_pod: Pod the machine must not be located in.
:type not_pod: `str`
:param pod_type: The type of pod to allocate the machine from.
:type pod_type: `str`
:param not_pod_type: Pod type the machine must not be located in.
:type not_pod_type: `str`
:param subnets: The subnet(s) the desired machine must be linked to.
:type subnets: sequence of `str` or `int` or `Subnet`
:param storage: The storage contraint to match.
:type storage: `str`
:param tags: The tags to match, as a sequence.
:type tags: sequence of `str`
:param zone: The zone the desired machine must belong to.
:type zone: `str` or `Zone`
:param not_fabrics: The fabrics the machine must NOT be connected to.
:type not_fabrics: sequence of either `str`, `int`, or `Fabric`
:param not_subnets: The subnet(s) the desired machine must NOT be
linked to.
:type not_subnets: sequence of `str` or `int` or `Subnet`
:param not_zones: The zone(s) the desired machine must NOT in.
:type not_zones: sequence of `str` or `Zone`
:param agent_name: Agent name to attach to the acquire machine.
:type agent_name: `str`
:param comment: Comment for the allocate event placed on machine.
:type comment: `str`
:param bridge_all: Automatically create a bridge on all interfaces
on the allocated machine.
:type bridge_all: `bool`
:param bridge_stp: Turn spaning tree protocol on or off for the
bridges created with bridge_all.
:type bridge_stp: `bool`
:param bridge_fd: Set the forward delay in seconds on the bridges
created with bridge_all.
:type bridge_fd: `int`
:param dry_run: Don't actually acquire the machine just return the
machine that would have been acquired.
:type dry_run: `bool`
:param verbose: Indicate that the user would like additional verbosity
in the constraints_by_type field (each constraint will be prefixed
by `verbose_`, and contain the full data structure that indicates
which machine(s) matched).
:type verbose: `bool`
"""
params = remove_None({
'name': hostname,
'arch': architectures,
'cpu_count': str(cpus) if cpus else None,
'mem': str(memory) if memory else None,
'pod_type': pod_type,
'not_pod_type': not_pod_type,
'storage': storage,
'tags': tags,
'not_tags': not_tags,
'agent_name': agent_name,
'comment': comment,
'bridge_all': bridge_all,
'bridge_stp': bridge_stp,
'bridge_fd': bridge_fd,
'dry_run': dry_run,
'verbose': verbose,
})
if fabrics is not None:
params["fabrics"] = [
get_param_arg('fabrics', idx, Fabric, fabric)
for idx, fabric in enumerate(fabrics)
]
if interfaces is not None:
params["interfaces"] = [
get_param_arg('interfaces', idx, Interface, nic)
for idx, nic in enumerate(interfaces)
]
if pod is not None:
if isinstance(pod, Pod):
params["pod"] = pod.name
elif isinstance(pod, str):
params["pod"] = pod
else:
raise TypeError(
"pod must be a str or Pod, not %s" % type(pod).__name__)
if not_pod is not None:
if isinstance(not_pod, Pod):
params["not_pod"] = not_pod.name
elif isinstance(not_pod, str):
params["not_pod"] = not_pod
else:
raise TypeError(
"not_pod must be a str or Pod, not %s" %
type(not_pod).__name__)
if subnets is not None:
params["subnets"] = [
get_param_arg('subnets', idx, Subnet, subnet)
for idx, subnet in enumerate(subnets)
]
if zone is not None:
if isinstance(zone, Zone):
params["zone"] = zone.name
elif isinstance(zone, str):
params["zone"] = zone
else:
raise TypeError(
"zone must be a str or Zone, not %s" % type(zone).__name__)
if not_fabrics is not None:
params["not_fabrics"] = [
get_param_arg('not_fabrics', idx, Fabric, fabric)
for idx, fabric in enumerate(not_fabrics)
]
if not_subnets is not None:
params["not_subnets"] = [
get_param_arg('not_subnets', idx, Subnet, subnet)
for idx, subnet in enumerate(not_subnets)
]
if not_zones is not None:
params["not_in_zones"] = [
get_param_arg('not_zones', idx, Zone, zone, attr='name')
for idx, zone in enumerate(not_zones)
]
try:
data = await cls._handler.allocate(**params)
except CallError as error:
if error.status == HTTPStatus.CONFLICT:
message = "No machine matching the given criteria was found."
raise MachineNotFound(message) from error
else:
raise
else:
return cls._object(data) | 0.000271 |
async def addNodes(self, nodedefs):
'''
Quickly add/modify a list of nodes from node definition tuples.
This API is the simplest/fastest way to add nodes, set node props,
and add tags to nodes remotely.
Args:
nodedefs (list): A list of node definition tuples. See below.
A node definition tuple is defined as:
( (form, valu), {'props':{}, 'tags':{})
The "props" or "tags" keys may be omitted.
'''
async with await self.snap() as snap:
snap.strict = False
async for node in snap.addNodes(nodedefs):
yield node | 0.003077 |
def flds_firstsort(d):
'''
Perform a lexsort and return the sort indices and shape as a tuple.
'''
shape = [ len( np.unique(d[l]) )
for l in ['xs', 'ys', 'zs'] ];
si = np.lexsort((d['z'],d['y'],d['x']));
return si,shape; | 0.046512 |
def stream(self, sha):
"""For now, all lookup is done by git itself"""
hexsha, typename, size, stream = self._git.stream_object_data(bin_to_hex(sha))
return OStream(hex_to_bin(hexsha), typename, size, stream) | 0.012931 |
def ANC_closed(pH, total_carbonates):
"""Calculate the acid neutralizing capacity (ANC) under a closed system
in which no carbonates are exchanged with the atmosphere during the
experiment. Based on pH and total carbonates in the system.
:param pH: pH of the system
:type pH: float
:param total_carbonates: Total carbonate concentration in the system (mole/L)
:type total_carbonates: float
:return: The acid neutralizing capacity of the closed system (eq/L)
:rtype: float
:Examples:
>>> from aguaclara.research.environmental_processes_analysis import ANC_closed
>>> from aguaclara.core.units import unit_registry as u
>>> round(ANC_closed(10, 1*u.mol/u.L), 7)
<Quantity(1.359831, 'equivalent / liter')>
"""
return (total_carbonates * (u.eq/u.mol * alpha1_carbonate(pH) +
2 * u.eq/u.mol * alpha2_carbonate(pH)) +
1 * u.eq/u.mol * Kw/invpH(pH) - 1 * u.eq/u.mol * invpH(pH)) | 0.003115 |
def _friendlyAuthError(fn):
''' Decorator to print a friendly you-are-not-authorised message. Use
**outside** the _handleAuth decorator to only print the message after
the user has been given a chance to login. '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except requests.exceptions.HTTPError as e:
if e.response.status_code == requests.codes.unauthorized: #pylint: disable=no-member
logger.error('insufficient permission')
elif e.response.status_code == requests.codes.bad and 'jwt has expired' in e.response.text.lower(): #pylint: disable=no-member
logger.error('server returned status %s: %s', e.response.status_code, e.response.text)
logger.error('Check that your system clock is set accurately!')
else:
logger.error('server returned status %s: %s', e.response.status_code, e.response.text)
raise
return wrapped | 0.008746 |
def redeliver(self):
"""
Re-deliver the answer to the consequence which previously handled it
by raising an exception.
This method is intended to be invoked after the code in question has
been upgraded. Since there are no buggy answer receivers in
production, nothing calls it yet.
"""
self.consequence.answerReceived(self.answerValue,
self.messageValue,
self.sender,
self.target)
self.deleteFromStore() | 0.00335 |
def get(cls, format):
"""
Gets an emitter, returns the class and a content-type.
"""
if cls.EMITTERS.has_key(format):
return cls.EMITTERS.get(format)
raise ValueError("No emitters found for type %s" % format) | 0.011494 |
def bip32_seed(self, s):
"""
Parse a bip32 private key from a seed.
Return a :class:`BIP32 <pycoin.key.BIP32Node.BIP32Node>` or None.
"""
pair = parse_colon_prefix(s)
if pair is None or pair[0] not in "HP":
return None
if pair[0] == "H":
try:
master_secret = h2b(pair[1])
except ValueError:
return None
else:
master_secret = pair[1].encode("utf8")
return self._network.keys.bip32_seed(master_secret) | 0.00363 |
def adjoint(self):
"""Adjoint of this operator.
Returns
-------
adjoint : `PointwiseInnerAdjoint`
"""
return PointwiseInnerAdjoint(
sspace=self.base_space, vecfield=self.vecfield,
vfspace=self.domain, weighting=self.weights) | 0.006734 |
def get_all_items_of_delivery_note(self, delivery_note_id):
"""
Get all items of delivery note
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param delivery_note_id: the delivery note id
:return: list
"""
return self._iterate_through_pages(
get_function=self.get_items_of_delivery_note_per_page,
resource=DELIVERY_NOTE_ITEMS,
**{'delivery_note_id': delivery_note_id}
) | 0.005226 |
def filter(self, **kwargs):
'''
Only columns/attributes that have been specified as having an index with
the ``index=True`` option on the column definition can be filtered with
this method. Prefix, suffix, and pattern match filters must be provided
using the ``.startswith()``, ``.endswith()``, and the ``.like()``
methods on the query object, respectively. Geo location queries should
be performed using the ``.near()`` method.
Filters should be of the form::
# for numeric ranges, use None for open-ended ranges
attribute=(min, max)
# you can also query for equality by passing a single number
attribute=value
# for string searches, passing a plain string will require that
# string to be in the index as a literal
attribute=string
# to perform an 'or' query on strings, you can pass a list of
# strings
attribute=[string1, string2]
As an example, the following will return entities that have both
``hello`` and ``world`` in the ``String`` column ``scol`` and has a
``Numeric`` column ``ncol`` with value between 2 and 10 (including the
endpoints)::
results = MyModel.query \\
.filter(scol='hello') \\
.filter(scol='world') \\
.filter(ncol=(2, 10)) \\
.all()
If you only want to match a single value as part of your range query,
you can pass an integer, float, or Decimal object by itself, similar
to the ``Model.get_by()`` method::
results = MyModel.query \\
.filter(ncol=5) \\
.execute()
.. note:: Trying to use a range query `attribute=(min, max)` on indexed
string columns won't return any results.
.. note:: This method only filters columns that have been defined with
``index=True``.
'''
cur_filters = list(self._filters)
for attr, value in kwargs.items():
value = self._check(attr, value, which='filter')
if isinstance(value, NUMERIC_TYPES):
# for simple numeric equiality filters
value = (value, value)
if isinstance(value, six.string_types):
cur_filters.append('%s:%s'%(attr, value))
elif six.PY3 and isinstance(value, bytes):
cur_filters.append('%s:%s'%(attr, value.decode('latin-1')))
elif isinstance(value, tuple):
if value is NOT_NULL:
from .columns import OneToOne, ManyToOne
ctype = type(self._model._columns[attr])
if not issubclass(ctype, (OneToOne, ManyToOne)):
raise QueryError("Can only query for non-null column values " \
"on OneToOne or ManyToOne columns, %r is of type %r"%(attr, ctype))
if len(value) != 2:
raise QueryError("Numeric ranges require 2 endpoints, you provided %s with %r"%(len(value), value))
tt = []
for v in value:
if isinstance(v, date):
v = dt2ts(v)
if isinstance(v, dtime):
v = t2ts(v)
tt.append(v)
value = tt
cur_filters.append((attr, value[0], value[1]))
elif isinstance(value, list) and value:
cur_filters.append(['%s:%s'%(attr, _ts(v)) for v in value])
else:
raise QueryError("Sorry, we don't know how to filter %r by %r"%(attr, value))
return self.replace(filters=tuple(cur_filters)) | 0.003943 |
def _dos2unix_cygwin(self, file_path):
"""
Use cygwin to convert file to unix format
"""
dos2unix_cmd = \
[os.path.join(self._cygwin_bin_location, "dos2unix.exe"),
self._get_cygwin_path(file_path)]
process = Popen(dos2unix_cmd,
stdout=PIPE, stderr=PIPE, shell=False)
process.communicate() | 0.005195 |
def split_bgedge(self, bgedge, guidance=None, sorted_guidance=False,
account_for_colors_multiplicity_in_guidance=True,
key=None):
""" Splits a :class:`bg.edge.BGEdge` in current :class:`BreakpointGraph` most similar to supplied one (if no unique identifier ``key`` is provided) with respect to supplied guidance.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__split_bgedge` method.
:param bgedge: an edge to find most "similar to" among existing edges for a split
:type bgedge: :class:`bg.edge.BGEdge`
:param guidance: a guidance for underlying :class:`bg.multicolor.Multicolor` object to be split
:type guidance: iterable where each entry is iterable with colors entries
:param duplication_splitting: flag (**not** currently implemented) for a splitting of color-based splitting to take into account multiplicity of respective colors
:type duplication_splitting: ``Boolean``
:param key: unique identifier of edge to be split
:type key: any python object. ``int`` is expected
:return: ``None``, performs inplace changes
"""
self.__split_bgedge(bgedge=bgedge, guidance=guidance, sorted_guidance=sorted_guidance,
account_for_colors_multiplicity_in_guidance=account_for_colors_multiplicity_in_guidance,
key=key) | 0.008427 |
def systemInformationType2bis():
"""SYSTEM INFORMATION TYPE 2bis Section 9.1.33"""
a = L2PseudoLength(l2pLength=0x15)
b = TpPd(pd=0x6)
c = MessageType(mesType=0x2) # 00000010
d = NeighbourCellsDescription()
e = RachControlParameters()
f = Si2bisRestOctets()
packet = a / b / c / d / e / f
return packet | 0.00295 |
def getrawfile(self, project_id, sha1, filepath):
"""
Get the raw file contents for a file by commit SHA and path.
:param project_id: The ID of a project
:param sha1: The commit or branch name
:param filepath: The path the file
:return: raw file contents
"""
data = {'filepath': filepath}
request = requests.get(
'{0}/{1}/repository/blobs/{2}'.format(self.projects_url, project_id, sha1),
params=data, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout,
headers=self.headers)
if request.status_code == 200:
return request.content
else:
return False | 0.005642 |
def leaky_relu(attrs, inputs, proto_obj):
"""Leaky Relu function"""
if 'alpha' in attrs:
new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'})
else:
new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 0.01})
return 'LeakyReLU', new_attrs, inputs | 0.012539 |
def api(self):
""" Get or create an Api() instance using django settings. """
api = getattr(self, '_api', None)
if api is None:
self._api = mailjet.Api()
return self._api | 0.009259 |
def _charlist(self, data) -> list:
"""
Private method to return the variables in a SAS Data set that are of type char
:param data: SAS Data object to process
:return: list of character variables
:rtype: list
"""
# Get list of character variables to add to nominal list
char_string = """
data _null_; file LOG;
d = open('{0}.{1}');
nvars = attrn(d, 'NVARS');
put 'VARLIST=';
do i = 1 to nvars;
vart = vartype(d, i);
var = varname(d, i);
if vart eq 'C' then
put var; end;
put 'VARLISTend=';
run;
"""
# ignore teach_me_SAS mode to run contents
nosub = self.sas.nosub
self.sas.nosub = False
ll = self.sas.submit(char_string.format(data.libref, data.table + data._dsopts()))
self.sas.nosub = nosub
l2 = ll['LOG'].partition("VARLIST=\n")
l2 = l2[2].rpartition("VARLISTend=\n")
charlist1 = l2[0].split("\n")
del charlist1[len(charlist1) - 1]
charlist1 = [x.casefold() for x in charlist1]
return charlist1 | 0.00339 |
def dump(args):
"""
%prog dump fastbfile
Export ALLPATHS fastb file to fastq file. Use --dir to indicate a previously
run allpaths folder.
"""
p = OptionParser(dump.__doc__)
p.add_option("--dir",
help="Working directory [default: %default]")
p.add_option("--nosim", default=False, action="store_true",
help="Do not simulate qual to 50 [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastbfile, = args
d = opts.dir
if d:
from jcvi.assembly.preprocess import export_fastq
rc = "jump" in fastbfile
export_fastq(d, fastbfile, rc=rc)
return
sim = not opts.nosim
pf = "j" if "jump" in fastbfile else "f"
statsfile = "{0}.lib_stats".format(pf)
if op.exists(statsfile):
os.remove(statsfile)
cmd = "SplitReadsByLibrary READS_IN={0}".format(fastbfile)
cmd += " READS_OUT={0} QUALS=True".format(pf)
sh(cmd)
libs = []
fp = open(statsfile)
next(fp); next(fp) # skip two rows
for row in fp:
if row.strip() == "":
continue
libname = row.split()[0]
if libname == "Unpaired":
continue
libs.append(libname)
logging.debug("Found libraries: {0}".format(",".join(libs)))
cmds = []
for libname in libs:
cmd = "FastbQualbToFastq"
cmd += " HEAD_IN={0}.{1}.AB HEAD_OUT={1}".format(pf, libname)
cmd += " PAIRED=True PHRED_OFFSET=33"
if sim:
cmd += " SIMULATE_QUALS=True"
if pf == 'j':
cmd += " FLIP=True"
cmds.append((cmd, ))
m = Jobs(target=sh, args=cmds)
m.run()
for libname in libs:
cmd = "mv {0}.A.fastq {0}.1.fastq".format(libname)
sh(cmd)
cmd = "mv {0}.B.fastq {0}.2.fastq".format(libname)
sh(cmd) | 0.002096 |
def removefromreadergroup(self, groupname):
"""Remove a reader from a reader group"""
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
if 0 != hresult:
raise EstablishContextException(hresult)
try:
hresult = SCardRemoveReaderFromGroup(hcontext, self.name,
groupname)
if 0 != hresult:
raise RemoveReaderFromGroupException(hresult, self.name,
groupname)
finally:
hresult = SCardReleaseContext(hcontext)
if 0 != hresult:
raise ReleaseContextException(hresult) | 0.006289 |
def radial_density(im, bins=10, voxel_size=1):
r"""
Computes radial density function by analyzing the histogram of voxel
values in the distance transform. This function is defined by
Torquato [1] as:
.. math::
\int_0^\infty P(r)dr = 1.0
where *P(r)dr* is the probability of finding a voxel at a lying at a radial
distance between *r* and *dr* from the solid interface. This is equivalent
to a probability density function (*pdf*)
The cumulative distribution is defined as:
.. math::
F(r) = \int_r^\infty P(r)dr
which gives the fraction of pore-space with a radius larger than *r*. This
is equivalent to the cumulative distribution function (*cdf*).
Parameters
----------
im : ND-array
Either a binary image of the pore space with ``True`` indicating the
pore phase (or phase of interest), or a pre-calculated distance
transform which can save time.
bins : int or array_like
This number of bins (if int) or the location of the bins (if array).
This argument is passed directly to Scipy's ``histogram`` function so
see that docstring for more information. The default is 10 bins, which
reduces produces a relatively smooth distribution.
voxel_size : scalar
The size of a voxel side in preferred units. The default is 1, so the
user can apply the scaling to the returned results after the fact.
Returns
-------
result : named_tuple
A named-tuple containing several 1D arrays:
*R* - radius, equivalent to ``bin_centers``
*pdf* - probability density function
*cdf* - cumulative density function
*bin_centers* - the center point of each bin
*bin_edges* - locations of bin divisions, including 1 more value than
the number of bins
*bin_widths* - useful for passing to the ``width`` argument of
``matplotlib.pyplot.bar``
Notes
-----
This function should not be taken as a pore size distribution in the
explict sense, but rather an indicator of the sizes in the image. The
distance transform contains a very skewed number of voxels with small
values near the solid walls. Nonetheless, it does provide a useful
indicator and it's mathematical formalism is handy.
Torquato refers to this as the *pore-size density function*, and mentions
that it is also known as the *pore-size distribution function*. These
terms are avoided here since they have specific connotations in porous
media analysis.
References
----------
[1] Torquato, S. Random Heterogeneous Materials: Mircostructure and
Macroscopic Properties. Springer, New York (2002) - See page 48 & 292
"""
if im.dtype == bool:
im = spim.distance_transform_edt(im)
mask = find_dt_artifacts(im) == 0
im[mask] = 0
x = im[im > 0].flatten()
h = sp.histogram(x, bins=bins, density=True)
h = _parse_histogram(h=h, voxel_size=voxel_size)
rdf = namedtuple('radial_density_function',
('R', 'pdf', 'cdf', 'bin_centers', 'bin_edges',
'bin_widths'))
return rdf(h.bin_centers, h.pdf, h.cdf, h.bin_centers, h.bin_edges,
h.bin_widths) | 0.000303 |
def get_notifications(self, login=None, **kwargs):
"""Get the current notifications of a user.
:return: JSON
"""
_login = kwargs.get(
'login',
login or self._login
)
_notif_url = NOTIF_URL.format(login=_login)
return self._request_api(url=_notif_url).json() | 0.0059 |
def _push_forever_keys(self, namespace, key):
"""
Store a copy of the full key for each namespace segment.
:type namespace: str
:type key: str
"""
full_key = '%s%s:%s' % (self.get_prefix(),
hashlib.sha1(encode(self._tags.get_namespace())).hexdigest(),
key)
for segment in namespace.split('|'):
self._store.connection().lpush(self._forever_key(segment), full_key) | 0.008065 |
def csv_to_dict(file_name, file_location):
"""
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
"""
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict | 0.00241 |
def rollout(self, **kwargs):
"""Generate x for open loop movements.
"""
if kwargs.has_key('tau'):
timesteps = int(self.timesteps / kwargs['tau'])
else:
timesteps = self.timesteps
self.x_track = np.zeros(timesteps)
self.reset_state()
for t in range(timesteps):
self.x_track[t] = self.x
self.step(**kwargs)
return self.x_track | 0.013393 |
def cancel(self):
"""
Cancel the consumer and stop recieving messages.
This method is a :ref:`coroutine <coroutine>`.
"""
self.sender.send_BasicCancel(self.tag)
try:
yield from self.synchroniser.wait(spec.BasicCancelOK)
except AMQPError:
pass
else:
# No need to call ready if channel closed.
self.reader.ready()
self.cancelled = True
self.cancelled_future.set_result(self)
if hasattr(self.callback, 'on_cancel'):
self.callback.on_cancel() | 0.003407 |
def read(config_file, configspec, server_mode=False, default_section='default_settings', list_values=True):
'''
Read the config file with spec validation
'''
# configspec = ConfigObj(path.join(path.abspath(path.dirname(__file__)), configspec),
# encoding='UTF8',
# interpolation='Template',
# list_values=False,
# _inspec=True)
config = ConfigObj(config_file,
configspec=path.join(path.abspath(path.dirname(__file__)),
configspec),
list_values=list_values)
validation = config.validate(validate.Validator(), preserve_errors=True)
if validation == True:
config = dict(config)
for section in config:
if section != default_section:
if server_mode: # When it's a servers config file, retrieve the correct fqdn
config[section]['availability'] = True
if config[section]['custom_fqdn'] == None:
config[section]['custom_fqdn'] = socket.getfqdn()
for option in config[section]: # retrieve default configuration for missing values
if config[section][option] == None:
config[section][option] = config[default_section][option]
del(config[default_section])
return config
else:
raise ConfiguratorException(config_file, validation) | 0.007702 |
def create_token(self,
token_name,
project_name,
dataset_name,
is_public):
"""
Creates a token with the given parameters.
Arguments:
project_name (str): Project name
dataset_name (str): Dataset name project is based on
token_name (str): Token name
is_public (int): 1 is public. 0 is not public
Returns:
bool: True if project created, false if not created.
"""
return self.resources.create_token(token_name,
project_name,
dataset_name,
is_public) | 0.007833 |
def Tracer_AD_Pe(t_seconds, t_bar, C_bar, Pe):
"""Used by Solver_AD_Pe. All inputs and outputs are unitless. This is the
model function, f(x, ...). It takes the independent variable as the
first argument and the parameters to fit as separate remaining arguments.
:param t_seconds: List of times
:type t_seconds: float list
:param t_bar: Average time spent in the reactor
:type t_bar: float
:param C_bar: Average concentration ((mass of tracer)/(volume of the reactor))
:type C_bar: float
:param Pe: The Peclet number for the reactor.
:type Pe: float
:return: The model concentration as a function of time
:rtype: float list
:Examples:
>>> from aguaclara.research.environmental_processes_analysis import Tracer_AD_Pe
>>> from aguaclara.core.units import unit_registry as u
>>> Tracer_AD_Pe([1, 2, 3, 4, 5]*u.s, 5*u.s, 10*u.mg/u.L, 5)
<Quantity([0.25833732 3.23793989 5.8349833 6.62508831 6.30783131], 'milligram / liter')>
"""
return C_bar*E_Advective_Dispersion(t_seconds/t_bar, Pe) | 0.003745 |
def find_position(edges, prow, bstart, bend, total=5):
"""Find a EMIR CSU bar position in a edge image.
Parameters
==========
edges; ndarray,
a 2d image with 1 where is a border, 0 otherwise
prow: int,
reference 'row' of the bars
bstart: int,
minimum 'x' position of a bar (0-based)
bend: int
maximum 'x' position of a bar (0 based)
total: int
number of rows to check near `prow`
Return
======
list of (x, y) centroids
"""
nt = total // 2
# This bar is too near the border
if prow-nt < 0 or prow + nt >= edges.shape[0]:
return []
s2edges = edges[prow-nt:prow+nt+1, bstart:bend]
structure = morph.generate_binary_structure(2,2) # 8 way conection
har, num_f = mes.label(s2edges, structure=structure)
cen_of_mass = mes.center_of_mass(s2edges, labels=har, index=range(1, num_f + 1))
# center_of_mass returns y, x coordinates
cen_of_mass_off = [(x + bstart, prow-nt + y) for y,x in cen_of_mass]
return cen_of_mass_off | 0.004721 |
def plot(self, figsize="GROW", parameters=None, chains=None, extents=None, filename=None,
display=False, truth=None, legend=None, blind=None, watermark=None): # pragma: no cover
""" Plot the chain!
Parameters
----------
figsize : str|tuple(float)|float, optional
The figure size to generate. Accepts a regular two tuple of size in inches,
or one of several key words. The default value of ``COLUMN`` creates a figure
of appropriate size of insertion into an A4 LaTeX document in two-column mode.
``PAGE`` creates a full page width figure. ``GROW`` creates an image that
scales with parameters (1.5 inches per parameter). String arguments are not
case sensitive. If you pass a float, it will scale the default ``GROW`` by
that amount, so ``2.0`` would result in a plot 3 inches per parameter.
parameters : list[str]|int, optional
If set, only creates a plot for those specific parameters (if list). If an
integer is given, only plots the fist so many parameters.
chains : int|str, list[str|int], optional
Used to specify which chain to show if more than one chain is loaded in.
Can be an integer, specifying the
chain index, or a str, specifying the chain name.
extents : list[tuple[float]] or dict[str], optional
Extents are given as two-tuples. You can pass in a list the same size as
parameters (or default parameters if you don't specify parameters),
or as a dictionary.
filename : str, optional
If set, saves the figure to this location
display : bool, optional
If True, shows the figure using ``plt.show()``.
truth : list[float] or dict[str], optional
A list of truth values corresponding to parameters, or a dictionary of
truth values indexed by key
legend : bool, optional
If true, creates a legend in your plot using the chain names.
blind : bool|string|list[string], optional
Whether to blind axes values. Can be set to `True` to blind all parameters,
or can pass in a string (or list of strings) which specify the parameters to blind.
watermark : str, optional
A watermark to add to the figure
Returns
-------
figure
the matplotlib figure
"""
chains, parameters, truth, extents, blind = self._sanitise(chains, parameters, truth,
extents, color_p=True, blind=blind)
names = [chain.name for chain in chains]
if legend is None:
legend = len(chains) > 1
# If no chains have names, don't plot the legend
legend = legend and len([n for n in names if n]) > 0
# Calculate cmap extents
unique_color_params = list(set([c.config["color_params"] for c in chains if c.config["color_params"] is not None]))
num_cax = len(unique_color_params)
color_param_extents = {}
for u in unique_color_params:
umin, umax = np.inf, -np.inf
for chain in chains:
if chain.config["color_params"] == u:
data = chain.get_color_data()
if data is not None:
umin = min(umin, data.min())
umax = max(umax, data.max())
color_param_extents[u] = (umin, umax)
grow_size = 1.5
if isinstance(figsize, float):
grow_size *= figsize
figsize = "GROW"
if isinstance(figsize, str):
if figsize.upper() == "COLUMN":
figsize = (5 + (1 if num_cax > 0 else 0), 5)
elif figsize.upper() == "PAGE":
figsize = (10, 10)
elif figsize.upper() == "GROW":
figsize = (grow_size * len(parameters) + num_cax * 1.0, grow_size * len(parameters))
else:
raise ValueError("Unknown figure size %s" % figsize)
elif isinstance(figsize, float):
figsize = (figsize * grow_size * len(parameters), figsize * grow_size * len(parameters))
plot_hists = self.parent.config["plot_hists"]
flip = (len(parameters) == 2 and plot_hists and self.parent.config["flip"])
fig, axes, params1, params2, extents = self._get_figure(parameters, chains=chains, figsize=figsize, flip=flip,
external_extents=extents, blind=blind)
axl = axes.ravel().tolist()
summary = self.parent.config["summary"]
if summary is None:
summary = len(parameters) < 5 and len(self.parent.chains) == 1
if len(chains) == 1:
self._logger.debug("Plotting surfaces for chain of dimension %s" %
(chains[0].chain.shape,))
else:
self._logger.debug("Plotting surfaces for %d chains" % len(chains))
cbar_done = []
chain_points = [c for c in chains if c.config["plot_point"]]
num_chain_points = len(chain_points)
if num_chain_points:
subgroup_names = list(set([c.name for c in chain_points]))
subgroups = [[c for c in chain_points if c.name == n] for n in subgroup_names]
markers = [group[0].config["marker_style"] for group in subgroups] # Only one marker per group
marker_sizes = [[g.config["marker_size"] for g in group] for group in subgroups] # But size can diff
marker_alphas = [group[0].config["marker_alpha"] for group in subgroups] # Only one marker per group
for i, p1 in enumerate(params1):
for j, p2 in enumerate(params2):
if i < j:
continue
ax = axes[i, j]
do_flip = (flip and i == len(params1) - 1)
# Plot the histograms
if plot_hists and i == j:
if do_flip:
self._add_truth(ax, truth, p1)
else:
self._add_truth(ax, truth, None, py=p2)
max_val = None
# Plot each chain
for chain in chains:
if p1 not in chain.parameters:
continue
if not chain.config["plot_contour"]:
continue
param_summary = summary and p1 not in blind
m = self._plot_bars(ax, p1, chain, flip=do_flip, summary=param_summary)
if max_val is None or m > max_val:
max_val = m
if num_chain_points and self.parent.config["global_point"]:
m = self._plot_point_histogram(ax, subgroups, p1, flip=do_flip)
if max_val is None or m > max_val:
max_val = m
if max_val is not None:
if do_flip:
ax.set_xlim(0, 1.1 * max_val)
else:
ax.set_ylim(0, 1.1 * max_val)
else:
for chain in chains:
if p1 not in chain.parameters or p2 not in chain.parameters:
continue
if not chain.config["plot_contour"]:
continue
h = None
if p1 in chain.parameters and p2 in chain.parameters:
h = self._plot_contour(ax, chain, p1, p2, color_extents=color_param_extents)
cp = chain.config["color_params"]
if h is not None and cp is not None and cp not in cbar_done:
cbar_done.append(cp)
aspect = figsize[1] / 0.15
fraction = 0.85 / figsize[0]
cbar = fig.colorbar(h, ax=axl, aspect=aspect, pad=0.03, fraction=fraction, drawedges=False)
label = cp
if label == "weights":
label = "Weights"
elif label == "log_weights":
label = "log(Weights)"
elif label == "posterior":
label = "log(Posterior)"
cbar.set_label(label, fontsize=14)
cbar.solids.set(alpha=1)
if num_chain_points:
self._plot_points(ax, subgroups, markers, marker_sizes, marker_alphas, p1, p2)
self._add_truth(ax, truth, p1, py=p2)
colors = [c.config["color"] for c in chains]
plot_points = [c.config["plot_point"] for c in chains]
plot_contours = [c.config["plot_contour"] for c in chains]
linestyles = [c.config["linestyle"] for c in chains]
linewidths = [c.config["linewidth"] for c in chains]
marker_styles = [c.config["marker_style"] for c in chains]
marker_sizes = [c.config["marker_size"] for c in chains]
legend_kwargs = self.parent.config["legend_kwargs"]
legend_artists = self.parent.config["legend_artists"]
legend_color_text = self.parent.config["legend_color_text"]
legend_location = self.parent.config["legend_location"]
if legend_location is None:
if not flip or len(parameters) > 2:
legend_location = (0, -1)
else:
legend_location = (-1, 0)
outside = (legend_location[0] >= legend_location[1])
if names is not None and legend:
ax = axes[legend_location[0], legend_location[1]]
if "markerfirst" not in legend_kwargs:
# If we have legend inside a used subplot, switch marker order
legend_kwargs["markerfirst"] = outside or not legend_artists
linewidths2 = linewidths if legend_artists else [0]*len(linewidths)
linestyles2 = linestyles if legend_artists else ["-"]*len(linestyles)
marker_sizes2 = marker_sizes if legend_artists else [0]*len(linestyles)
artists = []
done_names = []
final_colors = []
for i, (n, c, ls, lw, marker, size, pp, pc) in enumerate(zip(names, colors, linestyles2, linewidths2,
marker_styles, marker_sizes2, plot_points, plot_contours)):
if n is None or n in done_names:
continue
done_names.append(n)
final_colors.append(c)
size = np.sqrt(size) # plot vs scatter use size differently, hence the sqrt
if pc and not pp:
artists.append(plt.Line2D((0, 1), (0, 0), color=c, ls=ls, lw=lw))
elif not pc and pp:
artists.append(plt.Line2D((0, 1), (0, 0), color=c, ls=ls, lw=0, marker=marker, markersize=size))
else:
artists.append(plt.Line2D((0, 1), (0, 0), color=c, ls=ls, lw=lw, marker=marker, markersize=size))
leg = ax.legend(artists, done_names, **legend_kwargs)
if legend_color_text:
for text, c in zip(leg.get_texts(), final_colors):
text.set_weight("medium")
text.set_color(c)
if not outside:
loc = legend_kwargs.get("loc") or ""
if "right" in loc.lower():
vp = leg._legend_box._children[-1]._children[0]
vp.align = "right"
fig.canvas.draw()
for ax in axes[-1, :]:
offset = ax.get_xaxis().get_offset_text()
ax.set_xlabel('{0} {1}'.format(ax.get_xlabel(), "[{0}]".format(offset.get_text()) if offset.get_text() else ""))
offset.set_visible(False)
for ax in axes[:, 0]:
offset = ax.get_yaxis().get_offset_text()
ax.set_ylabel('{0} {1}'.format(ax.get_ylabel(), "[{0}]".format(offset.get_text()) if offset.get_text() else ""))
offset.set_visible(False)
dpi = 300
if watermark:
if flip and len(parameters) == 2:
ax = axes[-1, 0]
else:
ax = None
self._add_watermark(fig, ax, figsize, watermark, dpi=dpi)
if filename is not None:
if isinstance(filename, str):
filename = [filename]
for f in filename:
self._save_fig(fig, f, dpi)
if display:
plt.show()
return fig | 0.003716 |
def parse_value(self, tup_tree):
"""
Parse a VALUE element and return its text content as a unicode string.
Whitespace is preserved.
The conversion of the text representation of the value to a CIM data
type object requires CIM type information which is not available on the
VALUE element and therefore will be done when parsing higher level
elements that have that information.
::
<!ELEMENT VALUE (#PCDATA)>
"""
self.check_node(tup_tree, 'VALUE', (), (), (), allow_pcdata=True)
return self.pcdata(tup_tree) | 0.003263 |
def prepare_build_dir(self):
'''Ensure that a build dir exists for the recipe. This same single
dir will be used for building all different archs.'''
self.build_dir = self.get_build_dir()
self.common_dir = self.get_common_dir()
copy_files(join(self.bootstrap_dir, 'build'), self.build_dir)
copy_files(join(self.common_dir, 'build'), self.build_dir,
override=False)
if self.ctx.symlink_java_src:
info('Symlinking java src instead of copying')
shprint(sh.rm, '-r', join(self.build_dir, 'src'))
shprint(sh.mkdir, join(self.build_dir, 'src'))
for dirn in listdir(join(self.bootstrap_dir, 'build', 'src')):
shprint(sh.ln, '-s', join(self.bootstrap_dir, 'build', 'src', dirn),
join(self.build_dir, 'src'))
with current_directory(self.build_dir):
with open('project.properties', 'w') as fileh:
fileh.write('target=android-{}'.format(self.ctx.android_api)) | 0.002865 |
def get_documented_add(self, record_descriptors):
"""
this hack is used to document add function
a methods __doc__ attribute is read-only (or must use metaclasses, what I certainly don't want to do...)
we therefore create a function (who's __doc__ attribute is read/write), and will bind it to Table in __init__
"""
def add(data=None, **or_data):
"""
Parameters
----------
data: dictionary containing field lowercase names or index as keys, and field values as values (dict syntax)
or_data: keyword arguments containing field names as keys (kwargs syntax)
A lowercase name is the lowercase EnergyPlus name, for which all non alpha-numeric characters have been replaced
by underscores. All multiple consecutive underscores are then replaced by one unique underscore.
The two syntaxes are not meant to cohabit. The kwargs syntax is nicer, but does not enable to use indexes
instead of names.
Examples
--------
for Schedule:Compact table:
schedule = table.add( # kwarg syntax
name="Heating Setpoint Schedule - new[1]",
schedule_type_limits_name="Any Number",
field_1="Through: 12/31",
field_2="For: AllDays",
field_3="Until: 24:00,20.0"
)
schedule = table.add({ # dict syntax, mixing names and index keys
name="Heating Setpoint Schedule - new[1]",
schedule_type_limits_name="Any Number",
2="Through: 12/31",
3="For: AllDays",
4="Until: 24:00,20.0"
})
Returns
-------
Created Record instance
"""
return self.batch_add([or_data if data is None else data])[0]
add.__doc__ = "\n".join([fd.ref.lower() for fd in record_descriptors if fd.ref is not None])
return add | 0.004769 |
def _set_logging(
logger_name="colin",
level=logging.INFO,
handler_class=logging.StreamHandler,
handler_kwargs=None,
format='%(asctime)s.%(msecs).03d %(filename)-17s %(levelname)-6s %(message)s',
date_format='%H:%M:%S'):
"""
Set personal logger for this library.
:param logger_name: str, name of the logger
:param level: int, see logging.{DEBUG,INFO,ERROR,...}: level of logger and handler
:param handler_class: logging.Handler instance, default is StreamHandler (/dev/stderr)
:param handler_kwargs: dict, keyword arguments to handler's constructor
:param format: str, formatting style
:param date_format: str, date style in the logs
"""
if level != logging.NOTSET:
logger = logging.getLogger(logger_name)
logger.setLevel(level)
# do not readd handlers if they are already present
if not [x for x in logger.handlers if isinstance(x, handler_class)]:
handler_kwargs = handler_kwargs or {}
handler = handler_class(**handler_kwargs)
handler.setLevel(level)
formatter = logging.Formatter(format, date_format)
handler.setFormatter(formatter)
logger.addHandler(handler) | 0.00318 |
def add_error(name=None, code=None, status=None):
"""Create a new Exception class"""
if not name or not status or not code:
raise Exception("Can't create Exception class %s: you must set both name, status and code" % name)
myexception = type(name, (PyMacaronException, ), {"code": code, "status": status})
globals()[name] = myexception
if code in code_to_class:
raise Exception("ERROR! Exception %s is already defined." % code)
code_to_class[code] = myexception
return myexception | 0.005725 |
def port_provisioned(port_id):
"""Returns true if port still exists."""
session = db.get_reader_session()
with session.begin():
port_model = models_v2.Port
res = bool(session.query(port_model)
.filter(port_model.id == port_id).count())
return res | 0.003367 |
def cancelar_ultima_venda(self, chave_cfe, dados_cancelamento):
"""Sobrepõe :meth:`~satcfe.base.FuncoesSAT.cancelar_ultima_venda`.
:return: Uma resposta SAT especializada em ``CancelarUltimaVenda``.
:rtype: satcfe.resposta.cancelarultimavenda.RespostaCancelarUltimaVenda
"""
resp = self._http_post('cancelarultimavenda',
chave_cfe=chave_cfe,
dados_cancelamento=dados_cancelamento.documento())
conteudo = resp.json()
return RespostaCancelarUltimaVenda.analisar(conteudo.get('retorno')) | 0.006981 |
def make_valid_polygon(shape):
"""
Make a polygon valid. Polygons can be invalid in many ways, such as
self-intersection, self-touching and degeneracy. This process attempts to
make a polygon valid while retaining as much of its extent or area as
possible.
First, we call pyclipper to robustly union the polygon. Using this on its
own appears to be good for "cleaning" the polygon.
This might result in polygons which still have degeneracies according to
the OCG standard of validity - as pyclipper does not consider these to be
invalid. Therefore we follow by using the `buffer(0)` technique to attempt
to remove any remaining degeneracies.
"""
assert shape.geom_type == 'Polygon'
shape = make_valid_pyclipper(shape)
assert shape.is_valid
return shape | 0.00122 |
def _deserialize_dict(data, boxed_type):
"""Deserializes a dict and its elements.
:param data: dict to deserialize.
:type data: dict
:param boxed_type: class literal.
:return: deserialized dict.
:rtype: dict
"""
return {k: _deserialize(v, boxed_type)
for k, v in six.iteritems(data)} | 0.00304 |
def process_generic(self, kind, context):
"""Transform otherwise unhandled kinds of chunks by calling an underscore prefixed function by that name."""
result = None
while True:
chunk = yield result
if chunk is None:
return
result = chunk.clone(line='_' + kind + '(' + chunk.line + ')') | 0.056604 |
def compress_monkey_patch():
"""patch all compress
we need access to variables from widget scss
for example we have::
/themes/bootswatch/cyborg/_variables
but only if is cyborg active for this reasone we need
dynamically append import to every scss file
"""
from compressor.templatetags import compress as compress_tags
from compressor import base as compress_base
compress_base.Compressor.filter_input = filter_input
compress_base.Compressor.output = output
compress_base.Compressor.hunks = hunks
compress_base.Compressor.precompile = precompile
compress_tags.CompressorMixin.render_compressed = render_compressed
from django_pyscss import compressor as pyscss_compressor
pyscss_compressor.DjangoScssFilter.input = input | 0.001253 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.