text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def collection_list(self, resource_id, resource_type="collection"):
"""
Fetches a list of all resource and component IDs within the specified resource.
:param long resource_id: The ID of the resource to fetch children from.
:param string resource_type: Specifies whether the resource to fetch is a collection or a child element.
Defaults to 'collection'.
:return: A list of longs representing the database resource IDs for all children of the requested record.
:rtype list:
"""
ret = []
cursor = self.db.cursor()
if resource_type == "collection":
cursor.execute(
"SELECT resourceComponentId FROM ResourcesComponents WHERE parentResourceComponentId IS NULL AND resourceId=%s",
(resource_id),
)
else:
ret.append(resource_id)
cursor.execute(
"SELECT resourceComponentId FROM ResourcesComponents WHERE parentResourceComponentId=%s",
(resource_id),
)
rows = cursor.fetchall()
if len(rows):
for row in rows:
ret.extend(self.collection_list(row[0], "description"))
return ret | 0.005604 |
def _need_update(self):
'Returns whether the ProgressBar should redraw the line.'
if self.currval >= self.next_update or self.finished: return True
delta = time.time() - self.last_update_time
return self._time_sensitive and delta > self.poll | 0.010949 |
def explore_server(server_url, username, password):
""" Demo of exploring a cim server for characteristics defined by
the server class
"""
print("WBEM server URL:\n %s" % server_url)
conn = WBEMConnection(server_url, (username, password),
no_verification=True)
server = WBEMServer(conn)
print("Brand:\n %s" % server.brand)
print("Version:\n %s" % server.version)
print("Interop namespace:\n %s" % server.interop_ns)
print("All namespaces:")
for ns in server.namespaces:
print(" %s" % ns)
print("Advertised management profiles:")
org_vm = ValueMapping.for_property(server, server.interop_ns,
'CIM_RegisteredProfile',
'RegisteredOrganization')
for inst in server.profiles:
print_profile_info(org_vm, inst)
indication_profiles = server.get_selected_profiles('DMTF', 'Indications')
print('Profiles for DMTF:Indications')
for inst in indication_profiles:
print_profile_info(org_vm, inst)
server_profiles = server.get_selected_profiles('SNIA', 'Server')
print('Profiles for SNIA:Server')
for inst in server_profiles:
print_profile_info(org_vm, inst)
# get Central Instances
for inst in indication_profiles:
org = org_vm.tovalues(inst['RegisteredOrganization'])
name = inst['RegisteredName']
vers = inst['RegisteredVersion']
print("Central instances for profile %s:%s:%s (component):" % \
(org, name, vers))
try:
ci_paths = server.get_central_instances(
inst.path,
"CIM_IndicationService", "CIM_System", ["CIM_HostedService"])
except Exception as exc:
print("Error: %s" % str(exc))
ci_paths = []
for ip in ci_paths:
print(" %s" % str(ip))
for inst in server_profiles:
org = org_vm.tovalues(inst['RegisteredOrganization'])
name = inst['RegisteredName']
vers = inst['RegisteredVersion']
print("Central instances for profile %s:%s:%s(autonomous):" %
(org, name, vers))
try:
ci_paths = server.get_central_instances(inst.path)
except Exception as exc:
print("Error: %s" % str(exc))
ci_paths = []
for ip in ci_paths:
print(" %s" % str(ip)) | 0.000818 |
def _random_best_expander(fringe, iteration, viewer):
'''
Expander that expands one randomly chosen nodes on the fringe that
is better than the current (first) node.
'''
current = fringe[0]
neighbors = current.expand(local_search=True)
if viewer:
viewer.event('expanded', [current], [neighbors])
betters = [n for n in neighbors
if n.value > current.value]
if betters:
chosen = random.choice(betters)
if viewer:
viewer.event('chosen_node', chosen)
fringe.append(chosen) | 0.001773 |
def validate_any(prop, value, xpath_map=None):
""" Validates any metadata property, complex or simple (string or array) """
if value is not None:
if prop in (ATTRIBUTES, CONTACTS, DIGITAL_FORMS):
validate_complex_list(prop, value, xpath_map)
elif prop in (BOUNDING_BOX, LARGER_WORKS, RASTER_INFO):
validate_complex(prop, value, xpath_map)
elif prop == DATES:
validate_dates(prop, value, xpath_map)
elif prop == PROCESS_STEPS:
validate_process_steps(prop, value)
elif prop not in _supported_props and xpath_map is not None:
# Validate custom data structures as complex lists by default
validate_complex_list(prop, value, xpath_map)
else:
for val in wrap_value(value, include_empty=True):
validate_type(prop, val, (string_types, list)) | 0.002235 |
def lock_parameter(self, name, par, lock=True):
"""Set parameter to locked/unlocked state. A locked parameter
will be ignored when running methods that free/fix sources or
parameters.
Parameters
----------
name : str
Source name.
par : str
Parameter name.
lock : bool
Set parameter to locked (True) or unlocked (False) state.
"""
name = self.roi.get_source_by_name(name).name
lck_params = self._lck_params.setdefault(name, [])
if lock:
self.free_parameter(name, par, False)
if not par in lck_params:
lck_params += [par]
else:
if par in lck_params:
lck_params.remove(par) | 0.005063 |
def opt(parser: Union[Parser, Sequence[Input]]) -> OptionalParser:
"""Optionally match a parser.
An ``OptionalParser`` attempts to match ``parser``. If it succeeds, it
returns a list of length one with the value returned by the parser as the
only element. If it fails, it returns an empty list.
Args:
parser: Parser or literal
"""
if isinstance(parser, str):
parser = lit(parser)
return OptionalParser(parser) | 0.002179 |
def cache_key(working_directory, arguments, configure_kwargs):
"""Compute a `TensorBoardInfo.cache_key` field.
The format returned by this function is opaque. Clients may only
inspect it by comparing it for equality with other results from this
function.
Args:
working_directory: The directory from which TensorBoard was launched
and relative to which paths like `--logdir` and `--db` are
resolved.
arguments: The command-line args to TensorBoard, as `sys.argv[1:]`.
Should be a list (or tuple), not an unparsed string. If you have a
raw shell command, use `shlex.split` before passing it to this
function.
configure_kwargs: A dictionary of additional argument values to
override the textual `arguments`, with the same semantics as in
`tensorboard.program.TensorBoard.configure`. May be an empty
dictionary.
Returns:
A string such that if two (prospective or actual) TensorBoard
invocations have the same cache key then it is safe to use one in
place of the other. The converse is not guaranteed: it is often safe
to change the order of TensorBoard arguments, or to explicitly set
them to their default values, or to move them between `arguments`
and `configure_kwargs`, but such invocations may yield distinct
cache keys.
"""
if not isinstance(arguments, (list, tuple)):
raise TypeError(
"'arguments' should be a list of arguments, but found: %r "
"(use `shlex.split` if given a string)"
% (arguments,)
)
datum = {
"working_directory": working_directory,
"arguments": arguments,
"configure_kwargs": configure_kwargs,
}
raw = base64.b64encode(
json.dumps(datum, sort_keys=True, separators=(",", ":")).encode("utf-8")
)
# `raw` is of type `bytes`, even though it only contains ASCII
# characters; we want it to be `str` in both Python 2 and 3.
return str(raw.decode("ascii")) | 0.0041 |
def _ConstructReference(cls, pairs=None, flat=None,
reference=None, serialized=None, urlsafe=None,
app=None, namespace=None, parent=None):
"""Construct a Reference; the signature is the same as for Key."""
if cls is not Key:
raise TypeError('Cannot construct Key reference on non-Key class; '
'received %r' % cls)
if (bool(pairs) + bool(flat) + bool(reference) + bool(serialized) +
bool(urlsafe)) != 1:
raise TypeError('Cannot construct Key reference from incompatible keyword '
'arguments.')
if flat or pairs:
if flat:
if len(flat) % 2:
raise TypeError('_ConstructReference() must have an even number of '
'positional arguments.')
pairs = [(flat[i], flat[i + 1]) for i in xrange(0, len(flat), 2)]
elif parent is not None:
pairs = list(pairs)
if not pairs:
raise TypeError('Key references must consist of at least one pair.')
if parent is not None:
if not isinstance(parent, Key):
raise datastore_errors.BadValueError(
'Expected Key instance, got %r' % parent)
pairs[:0] = parent.pairs()
if app:
if app != parent.app():
raise ValueError('Cannot specify a different app %r '
'than the parent app %r' %
(app, parent.app()))
else:
app = parent.app()
if namespace is not None:
if namespace != parent.namespace():
raise ValueError('Cannot specify a different namespace %r '
'than the parent namespace %r' %
(namespace, parent.namespace()))
else:
namespace = parent.namespace()
reference = _ReferenceFromPairs(pairs, app=app, namespace=namespace)
else:
if parent is not None:
raise TypeError('Key reference cannot be constructed when the parent '
'argument is combined with either reference, serialized '
'or urlsafe arguments.')
if urlsafe:
serialized = _DecodeUrlSafe(urlsafe)
if serialized:
reference = _ReferenceFromSerialized(serialized)
if not reference.path().element_size():
raise RuntimeError('Key reference has no path or elements (%r, %r, %r).'
% (urlsafe, serialized, str(reference)))
# TODO: ensure that each element has a type and either an id or a name
if not serialized:
reference = _ReferenceFromReference(reference)
# You needn't specify app= or namespace= together with reference=,
# serialized= or urlsafe=, but if you do, their values must match
# what is already in the reference.
if app is not None:
ref_app = reference.app()
if app != ref_app:
raise RuntimeError('Key reference constructed uses a different app %r '
'than the one specified %r' %
(ref_app, app))
if namespace is not None:
ref_namespace = reference.name_space()
if namespace != ref_namespace:
raise RuntimeError('Key reference constructed uses a different '
'namespace %r than the one specified %r' %
(ref_namespace, namespace))
return reference | 0.008722 |
def get_metrics(metrics_description):
"""Get metrics from a list of dictionaries. """
return utils.get_objectlist(metrics_description,
config_key='data_analyzation_plugins',
module=sys.modules[__name__]) | 0.003636 |
def get_notables(self, id_num):
"""Return the notables of the activity with the given id.
"""
url = self._build_url('my', 'activities', id_num, 'notables')
return self._json(url) | 0.009524 |
def render_summary(self, include_title=True, request=None):
"""Render the traceback for the interactive console."""
title = ''
frames = []
classes = ['traceback']
if not self.frames:
classes.append('noframe-traceback')
if include_title:
if self.is_syntax_error:
title = text_('Syntax Error')
else:
title = text_('Traceback <small>(most recent call last)'
'</small>')
for frame in self.frames:
frames.append(
text_('<li%s>%s') % (
frame.info and text_(' title="%s"' % escape(frame.info)) or
text_(''),
frame.render()
))
if self.is_syntax_error:
description_wrapper = text_('<pre class=syntaxerror>%s</pre>')
else:
description_wrapper = text_('<blockquote>%s</blockquote>')
vars = {
'classes': text_(' '.join(classes)),
'title': title and text_('<h3 class="traceback">%s</h3>'
% title) or text_(''),
'frames': text_('\n'.join(frames)),
'description': description_wrapper % escape(self.exception),
}
app = request.app
template = app.ps.jinja2.env.get_template('debugtoolbar/exception_summary.html')
return template.render(app=app, request=request, **vars) | 0.002028 |
def proxy_callback_allowed(service, pgturl):
"""Check if a given proxy callback is allowed for the given service identifier."""
if hasattr(settings, 'MAMA_CAS_SERVICES'):
return _is_allowed('proxy_callback_allowed', service, pgturl)
return _is_valid_service_url(service) | 0.006897 |
def group_records_by_type(records, update_events):
"""Break records into two lists; create/update events and delete events.
:param records:
:param update_events:
:return update_records, delete_records:
"""
update_records, delete_records = [], []
for record in records:
if record.get("detail-type", "") == "Scheduled Event":
LOG.error("[X] Received a Scheduled Event in the Queue... Please check that your environment is set up"
" correctly.")
continue
# Ignore SQS junk messages (like subscription notices and things):
if not record.get("detail"):
continue
# Do not capture error events:
if not record["detail"].get("errorCode"):
if record['detail']['eventName'] in update_events:
update_records.append(record)
else:
delete_records.append(record)
return update_records, delete_records | 0.002053 |
def getOneMessage ( self ):
"""
I pull one complete message off the buffer and return it decoded
as a dict. If there is no complete message in the buffer, I
return None.
Note that the buffer can contain more than once message. You
should therefore call me in a loop until I return None.
"""
( mbytes, hbytes ) = self._findMessageBytes ( self.buffer )
if not mbytes:
return None
msgdata = self.buffer[:mbytes]
self.buffer = self.buffer[mbytes:]
hdata = msgdata[:hbytes]
elems = hdata.split ( '\n' )
cmd = elems.pop ( 0 )
headers = {}
# We can't use a simple split because the value can legally contain
# colon characters (for example, the session returned by ActiveMQ).
for e in elems:
try:
i = e.find ( ':' )
except ValueError:
continue
k = e[:i].strip()
v = e[i+1:].strip()
headers [ k ] = v
# hbytes points to the start of the '\n\n' at the end of the header,
# so 2 bytes beyond this is the start of the body. The body EXCLUDES
# the final two bytes, which are '\x00\n'. Note that these 2 bytes
# are UNRELATED to the 2-byte '\n\n' that Frame.pack() used to insert
# into the data stream.
body = msgdata[hbytes+2:-2]
msg = { 'cmd' : cmd,
'headers' : headers,
'body' : body,
}
return msg | 0.017823 |
def parse(self, format_string):
"""Parse color syntax from a formatted string."""
txt, state = '', 0
colorstack = [(None, None)]
itokens = self.tokenize(format_string)
for token, escaped in itokens:
if token == self._START_TOKEN and not escaped:
if txt:
yield txt, colorstack[-1]
txt = ''
state += 1
colors = self.extract_syntax(colorise.compat.next(itokens)[0])
colorstack.append(tuple(b or a
for a, b in zip(colorstack[-1],
colors)))
elif token == self._FMT_TOKEN and not escaped:
# if state == 0:
# raise ColorSyntaxError("Missing '{0}'"
# .format(self._START_TOKEN))
if state % 2 != 0:
state += 1
else:
txt += token
elif token == self._STOP_TOKEN and not escaped:
if state < 2:
raise ColorSyntaxError("Missing '{0}' or '{1}'"
.format(self._STOP_TOKEN,
self._FMT_TOKEN))
if txt:
yield txt, colorstack[-1]
txt = ''
state -= 2
colorstack.pop()
else:
txt += token
if state != 0:
raise ColorSyntaxError("Invalid color format")
if txt:
yield txt, colorstack[-1] | 0.001191 |
def block_sep1(self, Y):
r"""Separate variable into component corresponding to
:math:`\mathbf{y}_1` in :math:`\mathbf{y}\;\;`.
"""
return Y[(slice(None),)*self.blkaxis + (slice(self.blkidx, None),)] | 0.008658 |
def get_readable_time(t):
"""
Format the time to a readable format.
Parameters
----------
t : int
Time in ms
Returns
-------
string
The time splitted to highest used time (minutes, hours, ...)
"""
ms = t % 1000
t -= ms
t /= 1000
s = t % 60
t -= s
t /= 60
minutes = t % 60
t -= minutes
t /= 60
if t != 0:
return "%ih, %i minutes %is %ims" % (t, minutes, s, ms)
elif minutes != 0:
return "%i minutes %is %ims" % (minutes, s, ms)
elif s != 0:
return "%is %ims" % (s, ms)
else:
return "%ims" % ms | 0.00158 |
def search_hits(self, sort_by='CreationTime', sort_direction='Ascending',
page_size=10, page_number=1, response_groups=None):
"""
Return a page of a Requester's HITs, on behalf of the Requester.
The operation returns HITs of any status, except for HITs that
have been disposed with the DisposeHIT operation.
Note:
The SearchHITs operation does not accept any search parameters
that filter the results.
"""
params = {'SortProperty' : sort_by,
'SortDirection' : sort_direction,
'PageSize' : page_size,
'PageNumber' : page_number}
# Handle optional response groups argument
if response_groups:
self.build_list_params(params, response_groups, 'ResponseGroup')
return self._process_request('SearchHITs', params, [('HIT', HIT),]) | 0.011905 |
def get_table_from_csv(filename='ssg_report_aarons_returns.csv', delimiter=',', dos=False):
"""Dictionary of sequences from CSV file"""
table = []
with open(filename, 'rb') as f:
reader = csv.reader(f, dialect='excel', delimiter=delimiter)
for row in reader:
table += [row]
if not dos:
return table
return dos_from_table(table) | 0.005222 |
def from_mpl(fig, savefig_kw=None):
"""Create a SVG figure from a ``matplotlib`` figure.
Parameters
----------
fig : matplotlib.Figure instance
savefig_kw : dict
keyword arguments to be passed to matplotlib's
`savefig`
Returns
-------
SVGFigure
newly created :py:class:`SVGFigure` initialised with the string
content.
Examples
--------
If you want to overlay the figure on another SVG, you may want to pass
the `transparent` option:
>>> from svgutils import transform
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> line, = plt.plot([1,2])
>>> svgfig = transform.from_mpl(fig,
... savefig_kw=dict(transparent=True))
>>> svgfig.getroot()
<svgutils.transform.GroupElement object at ...>
"""
fid = StringIO()
if savefig_kw is None:
savefig_kw = {}
try:
fig.savefig(fid, format='svg', **savefig_kw)
except ValueError:
raise(ValueError, "No matplotlib SVG backend")
fid.seek(0)
fig = fromstring(fid.read())
# workaround mpl units bug
w, h = fig.get_size()
fig.set_size((w.replace('pt', ''), h.replace('pt', '')))
return fig | 0.000805 |
def read_frvect(vect, epoch, start, end, name=None, series_class=TimeSeries):
"""Read an array from an `FrVect` structure
Parameters
----------
vect : `LDASTools.frameCPP.FrVect`
the frame vector structur to read
start : `float`
the GPS start time of the request
end : `float`
the GPS end time of the request
epoch : `float`
the GPS start time of the containing `FrData` structure
name : `str`, optional
the name of the output `series_class`; this is also used
to ignore ``FrVect`` structures containing other information
series_class : `type`, optional
the `Series` sub-type to return.
Returns
-------
series : `~gwpy.timeseries.TimeSeriesBase`
the formatted data series
Raises
------
_Skip
if this vect doesn't overlap with the requested
``[start, end)`` interval, or the name doesn't match.
"""
# only read FrVect with matching name (or no name set)
# frame spec allows for arbitrary other FrVects
# to hold other information
if vect.GetName() and name and vect.GetName() != name:
raise _Skip()
# get array
arr = vect.GetDataArray()
nsamp = arr.size
# and dimensions
dim = vect.GetDim(0)
dx = dim.dx
x0 = dim.startX
# start and end GPS times of this FrVect
dimstart = epoch + x0
dimend = dimstart + nsamp * dx
# index of first required sample
nxstart = int(max(0., float(start-dimstart)) / dx)
# requested start time is after this frame, skip
if nxstart >= nsamp:
raise _Skip()
# index of end sample
if end:
nxend = int(nsamp - ceil(max(0., float(dimend-end)) / dx))
else:
nxend = None
if nxstart or nxend:
arr = arr[nxstart:nxend]
# -- cast as a series
# get unit
unit = vect.GetUnitY() or None
# create array
series = series_class(arr, t0=dimstart+nxstart*dx, dt=dx, name=name,
channel=name, unit=unit, copy=False)
# add information to channel
series.channel.sample_rate = series.sample_rate.value
series.channel.unit = unit
series.channel.dtype = series.dtype
return series | 0.000446 |
def _read_utf(cls, data, pos, kind=None):
"""
:param kind: Optional; a human-friendly identifier for the kind of UTF-8 data we're loading (e.g. is it a keystore alias? an algorithm identifier? something else?).
Used to construct more informative exception messages when a decoding error occurs.
"""
size = b2.unpack_from(data, pos)[0]
pos += 2
try:
return data[pos:pos+size].decode('utf-8'), pos+size
except (UnicodeEncodeError, UnicodeDecodeError) as e:
raise BadKeystoreFormatException(("Failed to read %s, contains bad UTF-8 data: %s" % (kind, str(e))) if kind else \
("Encountered bad UTF-8 data: %s" % str(e))) | 0.009198 |
async def _on_event(self, event_):
"""Receive a hangouts_pb2.Event and fan out to Conversations.
Args:
event_: hangouts_pb2.Event instance
"""
conv_id = event_.conversation_id.id
try:
conv = await self._get_or_fetch_conversation(conv_id)
except exceptions.NetworkError:
logger.warning(
'Failed to fetch conversation for event notification: %s',
conv_id
)
else:
self._sync_timestamp = parsers.from_timestamp(event_.timestamp)
conv_event = conv.add_event(event_)
# conv_event may be None if the event was a duplicate.
if conv_event is not None:
await self.on_event.fire(conv_event)
await conv.on_event.fire(conv_event) | 0.002401 |
def multi_map(key, iterable, *, default_dict=False):
"""Collect data into a multi-map.
Arguments
----------
key : function
A function that accepts an element retrieved from the
iterable and returns the key to be used in the multi-map
iterable : iterable
default_dict : boolean
Indicates whether or not the returned multi-map is an
instance of defaultdict(list)
Returns
-------
A dictionary of lists where the dictionary is either an instance of dict()
or defaultdict(list) based on the *default_dict* boolean and each list
contains the elements that are associated with the key in the order in
which they occur in the iterable.
"""
result = collections.defaultdict(list)
for rec in iterable:
result[key(rec)].append(rec)
return result if default_dict else dict(result) | 0.001074 |
def unban_chat_member(
self,
chat_id: Union[int, str],
user_id: Union[int, str]
) -> bool:
"""Use this method to unban a previously kicked user in a supergroup or channel.
The user will **not** return to the group or channel automatically, but will be able to join via link, etc.
You must be an administrator for this to work.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
user_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target user.
For a contact that exists in your Telegram address book you can use his phone number (str).
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
self.send(
functions.channels.EditBanned(
channel=self.resolve_peer(chat_id),
user_id=self.resolve_peer(user_id),
banned_rights=types.ChatBannedRights(
until_date=0
)
)
)
return True | 0.005761 |
def rename(self, from_symbol, to_symbol, audit=None):
"""
Rename a symbol
Parameters
----------
from_symbol: str
the existing symbol that will be renamed
to_symbol: str
the new symbol name
audit: dict
audit information
"""
sym = self._get_symbol_info(from_symbol)
if not sym:
raise NoDataFoundException('No data found for %s' % (from_symbol))
if self._get_symbol_info(to_symbol) is not None:
raise Exception('Symbol %s already exists' % (to_symbol))
mongo_retry(self._collection.update_many)({SYMBOL: from_symbol},
{'$set': {SYMBOL: to_symbol}})
mongo_retry(self._symbols.update_one)({SYMBOL: from_symbol},
{'$set': {SYMBOL: to_symbol}})
mongo_retry(self._mdata.update_many)({SYMBOL: from_symbol},
{'$set': {SYMBOL: to_symbol}})
mongo_retry(self._audit.update_many)({'symbol': from_symbol},
{'$set': {'symbol': to_symbol}})
if audit is not None:
audit['symbol'] = to_symbol
audit['action'] = 'symbol rename'
audit['old_symbol'] = from_symbol
self._audit.insert_one(audit) | 0.002147 |
def buy_open_order_quantity(self):
"""
[int] 买方向挂单量
"""
return sum(order.unfilled_quantity for order in self.open_orders if
order.side == SIDE.BUY and order.position_effect == POSITION_EFFECT.OPEN) | 0.012097 |
def _update_history(self):
"""Save the current test information to history json."""
ipa_utils.update_history_log(
self.history_log,
description=self.description,
test_log=self.log_file
) | 0.00813 |
def installOrResume(self, userstore):
"""
Install this product on a user store. If this product has been
installed on the user store already and the installation is suspended,
it will be resumed. If it exists and is not suspended, an error will be
raised.
"""
for i in userstore.query(Installation, Installation.types == self.types):
if i.suspended:
unsuspendTabProviders(i)
return
else:
raise RuntimeError("installOrResume called for an"
" installation that isn't suspended")
else:
self.installProductOn(userstore) | 0.004298 |
def render_template(self, template_path, context=None):
"""
This function has been deprecated. It calls render_django_template to support backwards compatibility.
"""
warnings.warn(
"ResourceLoader.render_template has been deprecated in favor of ResourceLoader.render_django_template"
)
return self.render_django_template(template_path, context) | 0.009877 |
def add_exploration(traj):
"""Explores different values of `I` and `tau_ref`."""
print('Adding exploration of I and tau_ref')
explore_dict = {'neuron.I': np.arange(0, 1.01, 0.01).tolist(),
'neuron.tau_ref': [5.0, 7.5, 10.0]}
explore_dict = cartesian_product(explore_dict, ('neuron.tau_ref', 'neuron.I'))
# The second argument, the tuple, specifies the order of the cartesian product,
# The variable on the right most side changes fastest and defines the
# 'inner for-loop' of the cartesian product
traj.f_explore(explore_dict) | 0.005155 |
def limit(self, limit):
"""
Set absolute limit on number of images to return, or set to None to return
as many results as needed; default 50 posts.
"""
params = join_params(self.parameters, {"limit": limit})
return self.__class__(**params) | 0.003788 |
def get_all():
'''
Return all installed services
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
cmd = 'launchctl list'
service_lines = [
line for line in __salt__['cmd.run'](cmd).splitlines()
if not line.startswith('PID')
]
service_labels_from_list = [
line.split("\t")[2] for line in service_lines
]
service_labels_from_services = list(_available_services().keys())
return sorted(set(service_labels_from_list + service_labels_from_services)) | 0.001838 |
def sort_by_preference(options, prefer):
"""
:param options: List of options
:param prefer: Prefered options
:return:
Pass in a list of options, return options in 'prefer' first
>>> sort_by_preference(["cairo", "cairocffi"], ["cairocffi"])
["cairocffi", "cairo"]
"""
if not prefer:
return options
return sorted(options, key=lambda x: (prefer + options).index(x)) | 0.002427 |
def _compute_heating_rates(self):
"""Computes energy flux convergences to get heating rates in :math:`W/m^2`.
"""
for varname, value in self.state.items():
self.heating_rate[varname] = - self.b * (value - global_mean(value)) | 0.015326 |
def return_hdr(self):
"""Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header
Notes
-----
the time is probably in "local" Unix Time, which is in the local time
zone, so we read it as "UTC" (meaning, do not apply timezone
transformation) and then remove timezone info.
The only doubt I have is how to interpret the "SystemOffset" time.
I assume it's in s, and that would fix most of the time zone problems,
but it does not take into account DST. Or maybe "SystemOffset" is in
micros and we need to apply the correct time zone to TimeStamp Unix
time. This needs to be tested with a Moberg system.
"""
subj_id = str()
patient = parse(join(self.filename, 'patient.info'))
for patientname in ['PatientFirstName', 'PatientLastName']:
subj_id += patient.findall(patientname)[0].text.strip()
unix_time = int(patient.findall('TimeStamp')[0].text.strip()) / 1e6
system_offset = int(patient.findall('SystemOffset')[0].text.strip())
start_time = (datetime.fromtimestamp(unix_time, TIMEZONE) +
timedelta(seconds=system_offset)).replace(tzinfo=None)
s_freq = 256 # could not find it in the text files
montage = parse(join(self.filename, 'Montage.xml'))
mont = montage.find('Montage')
chan_name = [chan.get('lead') for chan in mont.findall('Channel')
if chan.get('role') == 'REFERENTIAL_INPUT']
data_size = getsize(join(self.filename, EEG_FILE))
n_samples = int(data_size / DATA_PRECISION / len(chan_name))
self.n_smp = n_samples
self.n_chan = len(chan_name)
settings = parse(join(self.filename, SETTINGS_FILE))
conversion = settings.findall('SampleConversion')[0].text.strip()
dig_min, dig_max, anl_min, anl_max = [int(x) for x in
conversion.split(',')]
if dig_max == -dig_min and anl_max == -anl_min:
self.convertion = lambda dat: dat / dig_max * anl_max
else: # pragma: no cover
self.convertion = lambda dat: ((dat + dig_min) /
(dig_max - dig_min) *
(anl_max - anl_min) + anl_min)
orig = {'patient': patient,
'montage': montage,
'settings': settings,
}
return subj_id, start_time, s_freq, chan_name, n_samples, orig | 0.000675 |
def dump(self, dest_pattern="{id}.jpg", override=True, max_size=None, bits=8, contrast=None, gamma=None,
colormap=None, inverse=None):
"""
Download the image with optional image modifications.
Parameters
----------
dest_pattern : str, optional
Destination path for the downloaded image. "{X}" patterns are replaced by the value of X attribute
if it exists.
override : bool, optional
True if a file with same name can be overrided by the new file.
max_size : int, tuple, optional
Maximum size (width or height) of returned image. None to get original size.
bits : int (8,16,32) or str ("max"), optional
Bit depth (bit per channel) of returned image. "max" returns the original image bit depth
contrast : float, optional
Optional contrast applied on returned image.
gamma : float, optional
Optional gamma applied on returned image.
colormap : int, optional
Cytomine identifier of a colormap to apply on returned image.
inverse : bool, optional
True to inverse color mapping, False otherwise.
Returns
-------
downloaded : bool
True if everything happens correctly, False otherwise. As a side effect, object attribute "filename"
is filled with downloaded file path.
"""
if self.id is None:
raise ValueError("Cannot dump an annotation with no ID.")
pattern = re.compile("{(.*?)}")
dest_pattern = re.sub(pattern, lambda m: str(getattr(self, str(m.group(0))[1:-1], "_")), dest_pattern)
destination = os.path.dirname(dest_pattern)
filename, extension = os.path.splitext(os.path.basename(dest_pattern))
extension = extension[1:]
if extension not in ("jpg", "png", "tif", "tiff"):
extension = "jpg"
if not os.path.exists(destination):
os.makedirs(destination)
if isinstance(max_size, tuple) or max_size is None:
max_size = max(self.width, self.height)
parameters = {
"maxSize": max_size,
"contrast": contrast,
"gamma": gamma,
"colormap": colormap,
"inverse": inverse,
"bits": bits
}
file_path = os.path.join(destination, "{}.{}".format(filename, extension))
url = self.preview[:self.preview.index("?")]
url = url.replace(".png", ".{}".format(extension))
result = Cytomine.get_instance().download_file(url, file_path, override, parameters)
if result:
self.filename = file_path
return result | 0.004029 |
def onSave(self, grid):#, age_data_type='site'):
"""
Save grid data in the data object
"""
# deselect column, including remove 'EDIT ALL' label
if self.drop_down_menu:
self.drop_down_menu.clean_up()
# save all changes to er_magic data object
self.grid_builder.save_grid_data()
# don't actually write data in this step (time-consuming)
# instead, write to files when user is done editing
#self.er_magic_data.write_files()
wx.MessageBox('Saved!', 'Info',
style=wx.OK | wx.ICON_INFORMATION) | 0.00817 |
def debug_print(*message):
"""Output debug messages to stdout"""
warnings.warn("debug_print is deprecated; use the logging module instead.")
if get_debug_level():
ss = STDOUT
if PY3:
# This is needed after restarting and using debug_print
for m in message:
ss.buffer.write(str(m).encode('utf-8'))
print('', file=ss)
else:
print(*message, file=ss) | 0.002188 |
def mappedPolygon(self, polygon, path=None, percent=0.5):
"""
Maps the inputed polygon to the inputed path \
used when drawing items along the path. If no \
specific path is supplied, then this object's own \
path will be used. It will rotate and move the \
polygon according to the inputed percentage.
:param polygon <QPolygonF>
:param path <QPainterPath>
:param percent <float>
:return <QPolygonF> mapped_poly
"""
translatePerc = percent
anglePerc = percent
# we don't want to allow the angle percentage greater than 0.85
# or less than 0.05 or we won't get a good rotation angle
if 0.95 <= anglePerc:
anglePerc = 0.98
elif anglePerc <= 0.05:
anglePerc = 0.05
if not path:
path = self.path()
if not (path and path.length()):
return QPolygonF()
# transform the polygon to the path
point = path.pointAtPercent(translatePerc)
angle = path.angleAtPercent(anglePerc)
# rotate about the 0 axis
transform = QTransform().rotate(-angle)
polygon = transform.map(polygon)
# move to the translation point
transform = QTransform().translate(point.x(), point.y())
# create the rotated polygon
mapped_poly = transform.map(polygon)
self._polygons.append(mapped_poly)
return mapped_poly | 0.002618 |
def expected_h(nvals, fit="RANSAC"):
"""
Uses expected_rs to calculate the expected value for the Hurst exponent h
based on the values of n used for the calculation.
Args:
nvals (iterable of int):
the values of n used to calculate the individual (R/S)_n
KWargs:
fit (str):
the fitting method to use for the line fit, either 'poly' for normal
least squares polynomial fitting or 'RANSAC' for RANSAC-fitting which
is more robust to outliers
Returns:
float:
expected h for white noise
"""
rsvals = [expected_rs(n) for n in nvals]
poly = poly_fit(np.log(nvals), np.log(rsvals), 1, fit=fit)
return poly[0] | 0.007519 |
def visit_html(self, node):
"""
Generate html elements and schematic json
"""
parentClsNode = node.parent.parent
assert parentClsNode.attributes['objtype'] == 'class'
assert parentClsNode.attributes['domain'] == 'py'
sign = node.parent.parent.children[0]
assert isinstance(sign, desc_signature)
absolute_name = sign.attributes['ids'][0]
_construct = node["constructor_fn "]
serialno = node["serialno"]
try:
if _construct is None:
unitCls = generic_import(absolute_name)
if not issubclass(unitCls, Unit):
raise AssertionError(
"Can not use hwt-schematic sphinx directive and create schematic"
" for %s because it is not subclass of %r" % (absolute_name, Unit))
u = unitCls()
else:
assert len(_construct) > 0 and RE_IS_ID.match(_construct), _construct
_absolute_name = []
assert ".." not in absolute_name, absolute_name
for n in absolute_name.split(sep=".")[:-1]:
if n != "":
_absolute_name.append(n)
_absolute_name.append(_construct)
constructor_fn = generic_import(_absolute_name)
u = constructor_fn()
if not isinstance(u, Unit):
raise AssertionError(
"Can not use hwt-schematic sphinx directive and create schematic"
" for %s because function did not returned instance of %r, (%r)" % (
_absolute_name, Unit, u))
schem_file = SchematicPaths.get_sch_file_name_absolute(
self.document, absolute_name, serialno)
makedirs(path.dirname(schem_file), exist_ok=True)
with open(schem_file, "w") as f:
synthesised(u, DEFAULT_PLATFORM)
g = UnitToLNode(u, optimizations=DEFAULT_LAYOUT_OPTIMIZATIONS)
idStore = ElkIdStore()
data = g.toElkJson(idStore)
json.dump(data, f)
viewer = SchematicPaths.get_sch_viewer_link(self.document)
sch_name = SchematicPaths.get_sch_file_name(
self.document, absolute_name, serialno)
ref = nodes.reference(text=_("schematic"), # internal=False,
refuri="%s?schematic=%s" % (
viewer,
path.join(SchematicPaths.SCHEMATIC_DIR_PREFIX,
sch_name)))
node += ref
except Exception as e:
logging.error(e, exc_info=True)
raise Exception(
"Error occured while processing of %s" % absolute_name) | 0.003756 |
def get_groundings(entity):
"""Return groundings as db_refs for an entity."""
def get_grounding_entries(grounding):
if not grounding:
return None
entries = []
values = grounding.get('values', [])
# Values could still have been a None entry here
if values:
for entry in values:
ont_concept = entry.get('ontologyConcept')
value = entry.get('value')
if ont_concept is None or value is None:
continue
entries.append((ont_concept, value))
return entries
# Save raw text and Eidos scored groundings as db_refs
db_refs = {'TEXT': entity['text']}
groundings = entity.get('groundings')
if not groundings:
return db_refs
for g in groundings:
entries = get_grounding_entries(g)
# Only add these groundings if there are actual values listed
if entries:
key = g['name'].upper()
if key == 'UN':
db_refs[key] = [(s[0].replace(' ', '_'), s[1])
for s in entries]
else:
db_refs[key] = entries
return db_refs | 0.001498 |
def closeSession(self):
"""
C_CloseSession
"""
rv = self.lib.C_CloseSession(self.session)
if rv != CKR_OK:
raise PyKCS11Error(rv) | 0.01105 |
def build(self, output_path=""):
"""method that should be inherited by all vis classes"""
self.output_path = self.checkOutputPath(output_path)
self._buildStaticFiles()
self.final_url = self._buildTemplates()
printDebug("Done.", "comment")
printDebug("=> %s" % (self.final_url), "comment")
return self.final_url | 0.00545 |
def in_words_float(amount, _gender=FEMALE):
"""
Float in words
@param amount: float numeral
@type amount: C{float} or C{Decimal}
@return: in-words reprsentation of float numeral
@rtype: C{unicode}
@raise ValueError: when ammount is negative
"""
check_positive(amount)
pts = []
# преобразуем целую часть
pts.append(sum_string(int(amount), 2,
(u"целая", u"целых", u"целых")))
# теперь то, что после запятой
remainder = _get_float_remainder(amount)
signs = len(str(remainder)) - 1
pts.append(sum_string(int(remainder), 2, FRACTIONS[signs]))
return u" ".join(pts) | 0.00152 |
def fromexcel(cls, path, sheet_name_or_num=0, headers=None):
"""
Constructs a new DataTable from an Excel file.
Specify sheet_name_or_number to load that specific sheet.
Headers will be inferred automatically, but if you'd prefer
to load only a subset of all the headers, pass in a list of the
headers you'd like as `headers`.
---
Alternatively, it's quite simple to:
reader = ExcelReader('myfile.xls')
reader.change_sheet('default')
data = DataTable(reader)
"""
reader = ExcelRW.UnicodeDictReader(path, sheet_name_or_num)
return cls(reader, headers=headers) | 0.002911 |
def clear(self):
"""
Calls `_clear` abstract method which must be implemented by descendants.
:raises: GPflowError exception when parent of the node is built.
"""
parent = self.parent
if parent is not self and parent.is_built_coherence(self.graph) is Build.YES:
raise GPflowError('Clear method cannot be started. Upper nodes are built.')
self._clear() | 0.011905 |
def cut_cross(self, x, y, radius, data):
"""Cut two data subarrays that have a center at (x, y) and with
radius (radius) from (data). Returns the starting pixel (x0, y0)
of each cut and the respective arrays (xarr, yarr).
"""
n = int(round(radius))
ht, wd = data.shape
x, y = int(round(x)), int(round(y))
x0, x1 = int(max(0, x - n)), int(min(wd - 1, x + n))
y0, y1 = int(max(0, y - n)), int(min(ht - 1, y + n))
xarr = data[y, x0:x1 + 1]
yarr = data[y0:y1 + 1, x]
return (x0, y0, xarr, yarr) | 0.003407 |
def create_subscribe(self, access_token, show_id):
"""doc: http://open.youku.com/docs/doc?id=29
"""
url = 'https://openapi.youku.com/v2/users/subscribe/create.json'
params = {
'client_id': self.client_id,
'access_token': access_token,
'show_id': show_id
}
r = requests.get(url, params=params)
check_error(r)
return r.json()['result'] == 0 | 0.004566 |
def on_resize(self, event):
"""Resize handler
Parameters
----------
event : instance of Event
The resize event.
"""
self._update_transforms()
if self._central_widget is not None:
self._central_widget.size = self.size
if len(self._vp_stack) == 0:
self.context.set_viewport(0, 0, *self.physical_size) | 0.009501 |
def verification_list(self, limit=10):
"""
Get list of verifications. Uses GET to /verifications interface.
:Returns: (list) Verification list as specified `here <https://cloud.knuverse.com/docs/api/#api-Verifications-Get_verification_list>`_.
"""
# TODO add arguments for paging and stuff
params = {}
params["limit"] = limit
response = self._get(url.verifications, params=params)
self._check_response(response, 200)
return self._create_response(response).get("verifications") | 0.005357 |
def init(name, *args, **kwargs):
"""Instantiate a plugin from the catalog.
"""
if name in _PLUGIN_CATALOG:
if rapport.config.get_int("rapport", "verbosity") >= 2:
print("Initialize plugin {0}: {1} {2}".format(name, args, kwargs))
try:
return _PLUGIN_CATALOG[name](*args, **kwargs)
except (ValueError, TypeError) as e:
print("Failed to initialize plugin {0}: {1}!".format(name, e), file=sys.stderr)
else:
print("Failed to initialize plugin {0}: Not in catalog!".format(name), file=sys.stderr) | 0.005217 |
def content(self, file_relpath):
"""Returns the content for file at path. Raises exception if path is ignored.
Raises exception if path is ignored.
"""
if self.isignored(file_relpath):
self._raise_access_ignored(file_relpath)
return self._content_raw(file_relpath) | 0.010274 |
def process_request(self, request_object):
"""Process Create Resource Request"""
resource = request_object.entity_cls.create(**request_object.data)
return ResponseSuccessCreated(resource) | 0.009434 |
def lower_folded_outputs(ir_blocks):
"""Lower standard folded output fields into GremlinFoldedContextField objects."""
folds, remaining_ir_blocks = extract_folds_from_ir_blocks(ir_blocks)
if not remaining_ir_blocks:
raise AssertionError(u'Expected at least one non-folded block to remain: {} {} '
u'{}'.format(folds, remaining_ir_blocks, ir_blocks))
output_block = remaining_ir_blocks[-1]
if not isinstance(output_block, ConstructResult):
raise AssertionError(u'Expected the last non-folded block to be ConstructResult, '
u'but instead was: {} {} '
u'{}'.format(type(output_block), output_block, ir_blocks))
# Turn folded Filter blocks into GremlinFoldedFilter blocks.
converted_folds = {
base_fold_location.get_location_name()[0]: _convert_folded_blocks(folded_ir_blocks)
for base_fold_location, folded_ir_blocks in six.iteritems(folds)
}
new_output_fields = dict()
for output_name, output_expression in six.iteritems(output_block.fields):
new_output_expression = output_expression
# Turn FoldedContextField expressions into GremlinFoldedContextField ones.
if isinstance(output_expression, FoldedContextField):
# Get the matching folded IR blocks and put them in the new context field.
base_fold_location_name = output_expression.fold_scope_location.get_location_name()[0]
folded_ir_blocks = converted_folds[base_fold_location_name]
new_output_expression = GremlinFoldedContextField(
output_expression.fold_scope_location, folded_ir_blocks,
output_expression.field_type)
new_output_fields[output_name] = new_output_expression
new_ir_blocks = remaining_ir_blocks[:-1]
new_ir_blocks.append(ConstructResult(new_output_fields))
return new_ir_blocks | 0.005171 |
def put(self, items, panic=True):
"""
Load a single row into the target table.
:param list items: A list of values in the row corresponding to the
fields specified by :code:`self.columns`
:param bool panic: If :code:`True`, when an error is encountered it will be
raised. Otherwise, the error will be logged and :code:`self.error_count`
is incremented.
:raises `giraffez.errors.GiraffeEncodeError`: if :code:`panic` is :code:`True` and there
are format errors in the row values.
:raises `giraffez.errors.GiraffeError`: if table name is not set.
:raises `giraffez.TeradataPTError`: if there is a problem
connecting to Teradata.
"""
if not self.initiated:
self._initiate()
try:
row_status = self.mload.put_row(self.preprocessor(items))
self.applied_count += 1
except (TeradataPTError, EncoderError) as error:
self.error_count += 1
if panic:
raise error
log.info("BulkLoad", error) | 0.004484 |
def entities(self, subject_id):
""" Returns all the entities of assertions for a subject, disregarding
whether the assertion still is valid or not.
:param subject_id: The identifier of the subject
:return: A possibly empty list of entity identifiers
"""
res = self._cache.get(subject_id)
if not res:
raise KeyError("No such subject")
else:
return res | 0.004556 |
def system_switch_attributes_chassis_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
system = ET.SubElement(config, "system", xmlns="urn:brocade.com:mgmt:brocade-ras")
switch_attributes = ET.SubElement(system, "switch-attributes")
chassis_name = ET.SubElement(switch_attributes, "chassis-name")
chassis_name.text = kwargs.pop('chassis_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.00578 |
def build_model(input_shape):
"""Create a compiled Keras model.
Parameters
----------
input_shape : tuple, len=3
Shape of each image sample.
Returns
-------
model : keras.Model
Constructed model.
"""
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, kernel_size=(3, 3),
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
return model | 0.001111 |
def list_available_events(self, event_ids=None, event_type_ids=None, event_status=None, session=None,
lightweight=None):
"""
Search for events that have live score data available.
:param list event_ids: Optionally restricts the results to the specified event IDs
:param list event_type_ids: Optionally restricts the results to the specified event type IDs
:param list event_status: Optionally restricts the results to the specified event status
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.AvailableEvent]
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listAvailableEvents')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.AvailableEvent, elapsed_time, lightweight) | 0.008114 |
def Lastovka_solid(T, similarity_variable):
r'''Calculate solid constant-pressure heat capacitiy with the similarity
variable concept and method as shown in [1]_.
.. math::
C_p = 3(A_1\alpha + A_2\alpha^2)R\left(\frac{\theta}{T}\right)^2
\frac{\exp(\theta/T)}{[\exp(\theta/T)-1]^2}
+ (C_1\alpha + C_2\alpha^2)T + (D_1\alpha + D_2\alpha^2)T^2
Parameters
----------
T : float
Temperature of solid [K]
similarity_variable : float
similarity variable as defined in [1]_, [mol/g]
Returns
-------
Cps : float
Solid constant-pressure heat capacitiy, [J/kg/K]
Notes
-----
Many restrictions on its use. Trained on data with MW from 12.24 g/mol
to 402.4 g/mol, C mass fractions from 61.3% to 95.2%,
H mass fractions from 3.73% to 15.2%, N mass fractions from 0 to 15.4%,
O mass fractions from 0 to 18.8%, and S mass fractions from 0 to 29.6%.
Recommended for organic compounds with low mass fractions of hetero-atoms
and especially when molar mass exceeds 200 g/mol. This model does not show
and effects of phase transition but should not be used passed the triple
point.
Original model is in terms of J/g/K. Note that the model s for predicting
mass heat capacity, not molar heat capacity like most other methods!
A1 = 0.013183; A2 = 0.249381; theta = 151.8675; C1 = 0.026526;
C2 = -0.024942; D1 = 0.000025; D2 = -0.000123.
Examples
--------
>>> Lastovka_solid(300, 0.2139)
1682.063629222013
References
----------
.. [1] Laštovka, Václav, Michal Fulem, Mildred Becerra, and John M. Shaw.
"A Similarity Variable for Estimating the Heat Capacity of Solid Organic
Compounds: Part II. Application: Heat Capacity Calculation for
Ill-Defined Organic Solids." Fluid Phase Equilibria 268, no. 1-2
(June 25, 2008): 134-41. doi:10.1016/j.fluid.2008.03.018.
'''
A1 = 0.013183
A2 = 0.249381
theta = 151.8675
C1 = 0.026526
C2 = -0.024942
D1 = 0.000025
D2 = -0.000123
Cp = (3*(A1*similarity_variable + A2*similarity_variable**2)*R*(theta/T
)**2*exp(theta/T)/(exp(theta/T)-1)**2
+ (C1*similarity_variable + C2*similarity_variable**2)*T
+ (D1*similarity_variable + D2*similarity_variable**2)*T**2)
Cp = Cp*1000 # J/g/K to J/kg/K
return Cp | 0.002103 |
async def get_word(self, term: str) -> 'asyncurban.word.Word':
"""Gets the first matching word available.
Args:
term: The word to be defined.
Returns:
The closest matching :class:`Word` from UrbanDictionary.
Raises:
UrbanConnectionError: If the response status isn't ``200``.
WordNotFoundError: If the response doesn't contain data (i.e. no word found).
"""
resp = await self._get(term=term)
return Word(resp['list'][0]) | 0.010889 |
def axis_bounds(self) -> Dict[str, Tuple[float, float]]:
""" The (minimum, maximum) bounds for each axis. """
return {ax: (0, pos+0.5) for ax, pos in _HOME_POSITION.items()
if ax not in 'BC'} | 0.008969 |
def _process_morbidmap(self, limit):
"""
This will process the morbidmap file to get the links between
omim genes and diseases. Here, we create anonymous nodes for some
variant loci that are variants of the gene that causes the disease.
Triples created:
<some_anonymous_variant_locus>
is_allele_of
<omim_gene_id>
<some_anonymous_variant_locus> causes condition <omim_disease_id>
<assoc> hasSubject <some_anonymous_variant_locus>
<assoc> hasObject <omim_disease_id>
<assoc> hasPredicate <causes condition>
<assoc> DC:evidence <eco_id>
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
assoc_count = 0
src_key = 'morbidmap'
col = self.files[src_key]['columns']
raw = '/'.join((self.rawdir, self.files[src_key]['file']))
with open(raw) as reader:
line = reader.readline() # Copyright
line = reader.readline() # Generated: 2016-04-11
line = reader.readline() # EOF for field spec
line = reader.readline().strip() # columns header
line_counter = 4
row = line.split('\t') # includes funky leading octothorpe
if row != col: # assert
LOG.error('Expected %s to have columns: %s', raw, col)
LOG.error('But Found %s to have columns: %s', raw, row)
raise AssertionError('Incomming data headers have changed.')
for line in reader:
line_counter += 1
line = line.strip()
# since there are comments at the end of the file as well,
if line[0] == '#':
continue
row = line.split('\t')
if len(row) != len(col):
LOG.warning(
'Unexpected input on line: %i got: %s', line_counter, row)
continue
disorder = row[col.index('# Phenotype')]
gene_symbols = row[col.index('Gene Symbols')]
gene_num = row[col.index('MIM Number')]
# loc = row[col.index('Cyto Location')]
# LOG.info("morbidmap disorder: %s", disorder) # too verbose
# disorder = disorder label , number (mapping key)
# 3-M syndrome 1, 273750 (3)|CUL7, 3M1|609577|6p21.1
# but note that for those diseases where they are genomic loci
# (not genes though), the omim id is only listed as the gene
# Alopecia areata 1 (2)|AA1|104000|18p11.3-p11.2
# when there's a gene and disease
disorder_match = self.disorder_regex.match(disorder)
nogene_match = self.nogene_regex.match(disorder)
if disorder_match is not None:
disorder_parts = disorder_match.groups()
(disorder_label, disorder_num, phene_key) = disorder_parts
if self.test_mode and (
int(disorder_num) not in self.test_ids or
int(gene_num) not in self.test_ids):
continue
assoc_count += 1
gene_symbols = gene_symbols.split(', ')
gene_id = 'OMIM:' + str(gene_num)
self._make_pheno_assoc(
graph, gene_id, disorder_num, disorder_label, phene_key)
elif nogene_match is not None:
# this is a case where the disorder
# a blended gene/phenotype
# we lookup the NCBIGene feature and make the association
(disorder_label, phene_key) = nogene_match.groups()
disorder_num = gene_num
# make what's in the gene column the disease
disorder_id = 'OMIM:' + str(disorder_num)
if self.test_mode and int(disorder_num) not in self.test_ids:
continue
if disorder_id in self.omim_ncbigene_idmap:
# get the gene ids
gene_ids = self.omim_ncbigene_idmap[disorder_id]
if gene_ids is None:
continue
for gene_num in gene_ids:
# TODO add gene filter for testMode and NCBIGenes
gene_id = 'NCBIGene:' + str(gene_num).strip()
assoc_count += 1
self._make_pheno_assoc(
graph, gene_id, disorder_num, disorder_label, phene_key)
else:
# we can create an anonymous feature
# to house this thing for example, 158900
feature_id = self._make_anonymous_feature(gene_num)
assoc_count += 1
self._make_pheno_assoc(
graph, feature_id, disorder_num, disorder_label, phene_key)
LOG.info(
"We don't have an NCBIGene feature id to link %s with %s",
disorder_id, disorder_label)
if self.test_mode and gene_num not in self.test_ids:
continue
else:
LOG.warning(
"There are misformatted rows %i:%s", line_counter, line)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Added %d G2P associations", assoc_count) | 0.001704 |
def GetEnabledInterfaces():
"""Gives a list of enabled interfaces. Should work on all windows versions.
Returns:
interfaces: Names of interfaces found enabled.
"""
interfaces = []
show_args = ['/c', 'netsh', 'show', 'interface']
# pylint: disable=undefined-variable
res = client_utils_common.Execute(
'cmd', show_args, time_limit=-1, bypass_whitelist=True)
pattern = re.compile(r'\s*')
for line in res[0].split('\r\n'): # res[0] is stdout.
interface_info = pattern.split(line)
if 'Enabled' in interface_info:
interfaces.extend(interface_info[-1:])
return interfaces | 0.016367 |
def abort(message, *args):
'''Raise an AbortException, halting task execution and exiting.'''
if args:
raise _AbortException(message.format(*args))
raise _AbortException(message) | 0.027174 |
def create_single_poll(self, polls_question, polls_description=None):
"""
Create a single poll.
Create a new poll for the current user
"""
path = {}
data = {}
params = {}
# REQUIRED - polls[question]
"""The title of the poll."""
data["polls[question]"] = polls_question
# OPTIONAL - polls[description]
"""A brief description or instructions for the poll."""
if polls_description is not None:
data["polls[description]"] = polls_description
self.logger.debug("POST /api/v1/polls with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/polls".format(**path), data=data, params=params, no_data=True) | 0.004802 |
def write_interactions(G, path, delimiter=' ', encoding='utf-8'):
"""Write a DyNetx graph in interaction list format.
Parameters
----------
G : graph
A DyNetx graph.
path : basestring
The desired output filename
delimiter : character
Column delimiter
"""
for line in generate_interactions(G, delimiter):
line += '\n'
path.write(line.encode(encoding)) | 0.002155 |
def _createunbound(kls, **info):
"""Create a new UnboundNode representing a given class."""
if issubclass(kls, Bitfield):
nodetype = UnboundBitfieldNode
elif hasattr(kls, '_fields_'):
nodetype = UnboundStructureNode
elif issubclass(kls, ctypes.Array):
nodetype = UnboundArrayNode
else:
nodetype = UnboundSimpleNode
return nodetype(type=kls, **info) | 0.007194 |
def bytes_dict_cast(dict_, include_keys=True, include_vals=True, **kwargs):
"""
Converts any string-like items in input dict to bytes-like values, with
respect to python version
Parameters
----------
dict_ : dict
any string-like objects contained in the dict will be converted to bytes
include_keys : bool, default=True
if True, cast keys to bytes, else ignore
include_values : bool, default=True
if True, cast values to bytes, else ignore
kwargs:
encoding: str, default: 'utf-8'
encoding to be used when encoding string
"""
new_keys = bytes_list_cast(dict_.keys(), **kwargs) if include_keys else dict_.keys()
new_vals = bytes_list_cast(dict_.values(), **kwargs) if include_vals else dict_.values()
new_dict = dict(zip_(new_keys, new_vals))
return new_dict | 0.004667 |
def _draw_rectangle(context, width, height):
"""Draw a rectangle
Assertion: The current point is the center point of the rectangle
:param context: Cairo context
:param width: Width of the rectangle
:param height: Height of the rectangle
"""
c = context
# First move to upper left corner
c.rel_move_to(-width / 2., -height / 2.)
# Draw closed rectangle
c.rel_line_to(width, 0)
c.rel_line_to(0, height)
c.rel_line_to(-width, 0)
c.close_path() | 0.00361 |
def load_lc_data(filename, indep, dep, indweight=None, mzero=None, dir='./'):
"""
load dictionary with lc data
"""
if '/' in filename:
path, filename = os.path.split(filename)
else:
# TODO: this needs to change to be directory of the .phoebe file
path = dir
load_file = os.path.join(path, filename)
lcdata = np.loadtxt(load_file)
ncol = len(lcdata[0])
if dep == 'Magnitude':
mag = lcdata[:,1]
flux = 10**(-0.4*(mag-mzero))
lcdata[:,1] = flux
d = {}
d['phoebe_lc_time'] = lcdata[:,0]
d['phoebe_lc_flux'] = lcdata[:,1]
if indweight=="Standard deviation":
if ncol >= 3:
d['phoebe_lc_sigmalc'] = lcdata[:,2]
else:
logger.warning('A sigma column was mentioned in the .phoebe file but is not present in the lc data file')
elif indweight =="Standard weight":
if ncol >= 3:
sigma = np.sqrt(1/lcdata[:,2])
d['phoebe_lc_sigmalc'] = sigma
logger.warning('Standard weight has been converted to Standard deviation.')
else:
logger.warning('A sigma column was mentioned in the .phoebe file but is not present in the lc data file')
else:
logger.warning('Phoebe 2 currently only supports standard deviaton')
# dataset.set_value(check_visible=False, **d)
return d | 0.009116 |
def apply(self, resource):
"""
Apply filter to resource
:param resource: Image
:return: Image
"""
if not isinstance(resource, Image.Image):
raise ValueError('Unknown resource format')
original_width, original_height = resource.size
if self.start[0] < original_width or self.start[1] < original_height:
left = self.start[0]
upper = self.start[1]
else:
left = 0
upper = 0
right = self.start[0] + self.size[0]
lower = self.start[1] + self.size[1]
resource_format = resource.format
resource = resource.crop(
(
left,
upper,
right if right < original_width else original_width,
lower if lower < original_height else original_height
)
)
resource.format = resource_format
return resource | 0.002083 |
def process_config_dict(self, key, d, level):
"""
Process the CONFIG block
"""
lines = []
for k, v in d.items():
k = "CONFIG {}".format(self.quoter.add_quotes(k.upper()))
v = self.quoter.add_quotes(v)
lines.append(self.__format_line(self.whitespace(level, 1), k, v))
return lines | 0.00551 |
def find_parents(root, path, names):
"""Find files matching the given names relative to the given path.
Args:
path (str): The file path to start searching up from.
names (List[str]): The file/directory names to look for.
root (str): The directory at which to stop recursing upwards.
Note:
The path MUST be within the root.
"""
if not root:
return []
if not os.path.commonprefix((root, path)):
log.warning("Path %s not in %s", path, root)
return []
# Split the relative by directory, generate all the parent directories, then check each of them.
# This avoids running a loop that has different base-cases for unix/windows
# e.g. /a/b and /a/b/c/d/e.py -> ['/a/b', 'c', 'd']
dirs = [root] + os.path.relpath(os.path.dirname(path), root).split(os.path.sep)
# Search each of /a/b/c, /a/b, /a
while dirs:
search_dir = os.path.join(*dirs)
existing = list(filter(os.path.exists, [os.path.join(search_dir, n) for n in names]))
if existing:
return existing
dirs.pop()
# Otherwise nothing
return [] | 0.003481 |
def login(username):
"""
return user
"""
from uliweb.utils.date import now
from uliweb import request
User = get_model('user')
if isinstance(username, (str, unicode)):
user = User.get(User.c.username==username)
else:
user = username
user.last_login = now()
user.save()
set_user_session(user)
return True | 0.005195 |
def plot_discrete_cdf(xs, ys, ax=None, xlabel=None, ylabel=None,
label=None):
"""
Plots a normal distribution CDF with the given mean and variance.
x-axis contains the mean, the y-axis shows the cumulative probability.
Parameters
----------
xs : list-like of scalars
x values corresponding to the values in `y`s. Can be `None`, in which
case range(len(ys)) will be used.
ys : list-like of scalars
list of probabilities to be plotted which should sum to 1.
ax : matplotlib axes object, optional
If provided, the axes to draw on, otherwise plt.gca() is used.
xlim, ylim: (float,float), optional
specify the limits for the x or y axis as tuple (low,high).
If not specified, limits will be automatically chosen to be 'nice'
xlabel : str,optional
label for the x-axis
ylabel : str, optional
label for the y-axis
label : str, optional
label for the legend
Returns
-------
axis of plot
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
if xs is None:
xs = range(len(ys))
ys = np.cumsum(ys)
ax.plot(xs, ys, label=label)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax | 0.000768 |
def add(name,
password=None,
fullname=None,
description=None,
groups=None,
home=None,
homedrive=None,
profile=None,
logonscript=None):
'''
Add a user to the minion.
Args:
name (str): User name
password (str, optional): User's password in plain text.
fullname (str, optional): The user's full name.
description (str, optional): A brief description of the user account.
groups (str, optional): A list of groups to add the user to.
(see chgroups)
home (str, optional): The path to the user's home directory.
homedrive (str, optional): The drive letter to assign to the home
directory. Must be the Drive Letter followed by a colon. ie: U:
profile (str, optional): An explicit path to a profile. Can be a UNC or
a folder on the system. If left blank, windows uses it's default
profile directory.
logonscript (str, optional): Path to a login script to run when the user
logs on.
Returns:
bool: True if successful. False is unsuccessful.
CLI Example:
.. code-block:: bash
salt '*' user.add name password
'''
if six.PY2:
name = _to_unicode(name)
password = _to_unicode(password)
fullname = _to_unicode(fullname)
description = _to_unicode(description)
home = _to_unicode(home)
homedrive = _to_unicode(homedrive)
profile = _to_unicode(profile)
logonscript = _to_unicode(logonscript)
user_info = {}
if name:
user_info['name'] = name
else:
return False
user_info['password'] = password
user_info['priv'] = win32netcon.USER_PRIV_USER
user_info['home_dir'] = home
user_info['comment'] = description
user_info['flags'] = win32netcon.UF_SCRIPT
user_info['script_path'] = logonscript
try:
win32net.NetUserAdd(None, 1, user_info)
except win32net.error as exc:
log.error('Failed to create user %s', name)
log.error('nbr: %s', exc.winerror)
log.error('ctx: %s', exc.funcname)
log.error('msg: %s', exc.strerror)
return False
update(name=name,
homedrive=homedrive,
profile=profile,
fullname=fullname)
ret = chgroups(name, groups) if groups else True
return ret | 0.000827 |
def wavg(datalist, fast=False, prior=None, **fitterargs):
""" Weighted average of |GVar|\s or arrays/dicts of |GVar|\s.
The weighted average of ``N`` |GVar|\s ::
xavg = wavg([g1, g2 ... gN])
is what one obtains from a weighted least-squares fit of the
collection of |GVar|\s to the one-parameter fit function ::
def f(p):
return N * [p]
The average is the best-fit value for fit parameter ``p``. |GVar|\s
with smaller standard deviations carry more weight than those with
larger standard deviations; and the averages take account of
correlations between the |GVar|\s.
``wavg`` also works when each ``gi`` is an array of |GVar|\s or a
dictionary whose values are |GVar|\s or arrays of |GVar|\s.
Corresponding arrays in different ``gi``\s must have the same dimension,
but can have different shapes (the overlapping components are
averaged). When the ``gi`` are dictionaries, they need not all have
the same keys.
Weighted averages can become costly when the number of random samples
being averaged is large (100s or more). In such cases it might be useful
to set parameter ``fast=True``. This causes ``wavg`` to estimate the
weighted average by incorporating the random samples one at a time into a
running average::
result = datalist[0]
for di in datalist[1:]:
result = wavg([result, di], ...)
This method can be much faster when ``len(datalist)`` is large, and gives
the exact result when there are no correlations between different elements
of list ``datalist``. The results are approximately correct when
``datalist[i]`` and ``datalist[j]`` are correlated for ``i!=j``.
Args:
datalist (list): The |GVar|\s to be averaged. ``datalist`` is
a one-dimensional sequence of |GVar|\s, or of arrays of |GVar|\s,
or of dictionaries containing |GVar|\s and/or arrays of |GVar|\s.
Corresponding arrays in different ``datalist[i]``\s must have the
same dimension.
fast (bool): If ``fast=True``, ``wavg`` averages the ``datalist[i]``
sequentially. This can be much faster when averaging a large
number of sampes but is only approximate if the different
elements of ``datalist`` are correlated. Default is ``False``.
fitterargs (dict): Additional arguments (e.g., ``svdcut``) for the
:class:`lsqfit.nonlinear_fit` fitter used to do the averaging.
Returns:
The weighted average is returned as a |GVar| or an array of
|GVar|\s or a dictionary of |GVar|\s and arrays of |GVar|\s.
Results have the following extra attributes:
**chi2** - ``chi**2`` for weighted average.
**dof** - Effective number of degrees of freedom.
**Q** - Quality factor `Q` (or *p-value*) for fit:
the probability that the ``chi**2`` could have been larger,
by chance, assuming that the data are all Gaussian and consistent
with each other. Values smaller than 0.1 or so suggest that the
data are not Gaussian or are inconsistent with each other. Also
called the *p-value*.
**time** - Time required to do average.
**svdcorrection** - The *svd* corrections made to the data
when ``svdcut`` is not ``None``.
**fit** - Fit returned by :class:`lsqfit.nonlinear_fit`.
"""
if prior is not None:
datalist = list(datalist) + [prior]
warnings.warn(
'use of prior in lsqfit.wavg is deprecated',
DeprecationWarning
)
if len(datalist) <= 0:
return None
elif len(datalist) == 1:
if hasattr(datalist[0], 'keys'):
return BufferDictWAvg(datalist[0], None)
if numpy.shape(datalist[0]) == ():
return GVarWAvg(datalist[0], None)
else:
return ArrayWAvg(numpy.asarray(datalist[0]), None)
if fast:
chi2 = dof = time = svdcorrection = 0
ans = datalist[0]
for i, di in enumerate(datalist[1:]):
ans = wavg([ans, di], fast=False, **fitterargs)
chi2 += ans.chi2
dof += ans.dof
time += ans.time
svdcorrection += ans.svdcorrection
ans.fit.dof = dof
ans.fit.Q = _gammaQ(dof / 2., chi2 / 2.)
ans.fit.chi2 = chi2
ans.fit.time = time
ans.fit.svdcorrection = svdcorrection
return ans
if hasattr(datalist[0], 'keys'):
datashape = None
else:
datashape = numpy.shape(datalist[0])
datalist = [{None:di} for di in datalist]
# repack as a single dictionary
p0shape = {}
p0index = {}
data = gvar.BufferDict()
for i, di in enumerate(datalist):
for k in di:
data[k, i] = di[k]
shape = numpy.shape(di[k])
p0index[k, i] = tuple(slice(0, j) for j in shape)
if k not in p0shape:
p0shape[k] = shape
elif p0shape[k] != shape:
p0shape[k] = tuple(
max(j1, j2) for j1, j2 in zip(shape, p0shape[k])
)
# calculate p0
p0 = gvar.BufferDict()
p0count = {}
for k, i in data:
if k not in p0:
p0[k] = numpy.zeros(p0shape[k], float)
p0count[k] = numpy.zeros(p0shape[k], float)
if p0index[k, i] == ():
p0[k] += data[k, i].mean
p0count[k] += 1
else:
p0[k][p0index[k, i]] += gvar.mean(data[k, i])
p0count[k][p0index[k, i]] += 1.
for k in p0:
p0[k] /= p0count[k]
# set up fit
def fcn(p):
ans = gvar.BufferDict()
for k, i in data:
shape = data[k, i].shape
if shape == ():
ans[k, i] = p[k]
else:
ans[k, i] = p[k][p0index[k, i]]
return ans
fit = lsqfit.nonlinear_fit(data=data, fcn=fcn, p0=p0, **fitterargs)
if datashape is None:
return BufferDictWAvg(fit.p, fit)
elif datashape == ():
return GVarWAvg(fit.p[None], fit)
else:
return ArrayWAvg(fit.p[None], fit) | 0.003538 |
def exec_command(attr, cmd):
"""Runs a subproc to calculate a package attribute.
"""
import subprocess
p = popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode:
from rez.exceptions import InvalidPackageError
raise InvalidPackageError(
"Error determining package attribute '%s':\n%s" % (attr, err))
return out.strip(), err.strip() | 0.002288 |
def _varscan_paired(align_bams, ref_file, items, target_regions, out_file):
"""Run a paired VarScan analysis, also known as "somatic". """
max_read_depth = "1000"
config = items[0]["config"]
paired = get_paired_bams(align_bams, items)
if not paired.normal_bam:
affected_batch = items[0]["metadata"]["batch"]
message = ("Batch {} requires both tumor and normal BAM files for"
" VarScan cancer calling").format(affected_batch)
raise ValueError(message)
if not utils.file_exists(out_file):
assert out_file.endswith(".vcf.gz"), "Expect bgzipped output to VarScan"
normal_mpileup_cl = samtools.prep_mpileup([paired.normal_bam], ref_file,
config, max_read_depth,
target_regions=target_regions,
want_bcf=False)
tumor_mpileup_cl = samtools.prep_mpileup([paired.tumor_bam], ref_file,
config, max_read_depth,
target_regions=target_regions,
want_bcf=False)
base, ext = utils.splitext_plus(out_file)
indel_file = base + "-indel.vcf"
snp_file = base + "-snp.vcf"
with file_transaction(config, indel_file, snp_file) as (tx_indel, tx_snp):
with tx_tmpdir(items[0]) as tmp_dir:
jvm_opts = _get_jvm_opts(config, tmp_dir)
opts = " ".join(_varscan_options_from_config(config))
remove_zerocoverage = r"{ ifne grep -v -P '\t0\t\t$' || true; }"
export = utils.local_path_export()
varscan_cmd = ("{export} varscan {jvm_opts} somatic "
"<({normal_mpileup_cl} | {remove_zerocoverage}) "
"<({tumor_mpileup_cl} | {remove_zerocoverage}) "
"--output-snp {tx_snp} --output-indel {tx_indel} "
"--output-vcf {opts} ")
# add minimum AF
min_af = float(utils.get_in(paired.tumor_config, ("algorithm",
"min_allele_fraction"), 10)) / 100.0
varscan_cmd += "--min-var-freq {min_af} "
do.run(varscan_cmd.format(**locals()), "Varscan", None, None)
to_combine = []
for fname in [snp_file, indel_file]:
if utils.file_exists(fname):
fix_file = "%s-fix.vcf.gz" % (utils.splitext_plus(fname)[0])
with file_transaction(config, fix_file) as tx_fix_file:
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
normal_name = paired.normal_name
tumor_name = paired.tumor_name
cmd = ("cat {fname} | "
"{py_cl} -x 'bcbio.variation.varscan.fix_varscan_output(x,"
""" "{normal_name}", "{tumor_name}")' | """
"{fix_ambig_ref} | {fix_ambig_alt} | ifne vcfuniqalleles | "
"""{py_cl} -x 'bcbio.variation.vcfutils.add_contig_to_header(x, "{ref_file}")' | """
"""bcftools filter -m + -s REJECT -e "SS != '.' && SS != '2'" 2> /dev/null | """
"bgzip -c > {tx_fix_file}")
do.run(cmd.format(**locals()), "Varscan paired fix")
to_combine.append(fix_file)
if not to_combine:
out_file = write_empty_vcf(out_file, config)
else:
out_file = combine_variant_files(to_combine,
out_file, ref_file, config,
region=target_regions)
if os.path.getsize(out_file) == 0:
write_empty_vcf(out_file)
if out_file.endswith(".gz"):
out_file = bgzip_and_index(out_file, config) | 0.003331 |
def add_item(self, item, **options):
"""
Add a layer or table item to the export.
:param Layer|Table item: The Layer or Table to add
:rtype: self
"""
export_item = {
"item": item.url,
}
export_item.update(options)
self.items.append(export_item)
return self | 0.005731 |
def generate_index_file(filename):
"""Constructs a default home page for the project."""
with open(filename, 'w') as file:
content = open(os.path.join(os.path.dirname(__file__), 'templates/index_page.html'), 'r').read()
file.write(content) | 0.007605 |
def call(self, url, func, data=None,
headers=None,
return_json=True,
stream=False,
retry=True,
default_headers=True,
quiet=False):
'''call will issue the call, and issue a refresh token
given a 401 response, and if the client has a _update_token function
Parameters
==========
func: the function (eg, post, get) to call
url: the url to send file to
headers: if not None, update the client self.headers with dictionary
data: additional data to add to the request
return_json: return json if successful
default_headers: use the client's self.headers (default True)
'''
if data is not None:
if not isinstance(data, dict):
data = json.dumps(data)
heads = dict()
if default_headers is True:
heads = self.headers.copy()
if headers is not None:
if isinstance(headers, dict):
heads.update(headers)
response = func(url=url,
headers=heads,
data=data,
verify=self._verify(),
stream=stream)
# Errored response, try again with refresh
if response.status_code in [500, 502]:
bot.error("Beep boop! %s: %s" %(response.reason,
response.status_code))
sys.exit(1)
# Errored response, try again with refresh
if response.status_code == 404:
# Not found, we might want to continue on
if quiet is False:
bot.error("Beep boop! %s: %s" %(response.reason,
response.status_code))
sys.exit(1)
# Errored response, try again with refresh
if response.status_code == 401:
# If client has method to update token, try it once
if retry is True and hasattr(self,'_update_token'):
# A result of None indicates no update to the call
self._update_token(response)
return self._call(url, func, data=data,
headers=headers,
return_json=return_json,
stream=stream, retry=False)
bot.error("Your credentials are expired! %s: %s" %(response.reason,
response.status_code))
sys.exit(1)
elif response.status_code == 200:
if return_json:
try:
response = response.json()
except ValueError:
bot.error("The server returned a malformed response.")
sys.exit(1)
return response | 0.006102 |
def vap(x, a, b, c):
"""Vapor pressure model
Parameters
----------
x: int
a: float
b: float
c: float
Returns
-------
float
np.exp(a+b/x+c*np.log(x))
"""
return np.exp(a+b/x+c*np.log(x)) | 0.008097 |
def extract_session_details(request_headers, session_header, secret_key):
'''
a method to extract and validate jwt session token from request headers
:param request_headers: dictionary with header fields from request
:param session_header: string with name of session token header key
:param secret_key: string with secret key to json web token encryption
:return: dictionary with request details with session details or error coding
'''
session_details = {
'error': '',
'code': 200,
'session': {}
}
if not session_header in request_headers.keys():
session_details['error'] = '%s is missing.' % session_header
session_details['code'] = 400
else:
import jwt
session_token = request_headers[session_header]
try:
session_details['session'] = jwt.decode(session_token, secret_key)
except jwt.DecodeError as err:
session_details['error'] = 'Session token decoding error.'
session_details['code'] = 400
except jwt.ExpiredSignatureError as err:
session_details['error'] = 'Session token has expired.'
session_details['code'] = 400
except Exception:
session_details['error'] = 'Session token is invalid.'
session_details['code'] = 400
return session_details | 0.002123 |
def expand(self, url):
"""Base expand method. Only visits the link, and return the response
url"""
url = self.clean_url(url)
response = self._get(url)
if response.ok:
return response.url
raise ExpandingErrorException | 0.007246 |
def _lookup_model(cls, kind, default_model=None):
"""Get the model class for the kind.
Args:
kind: A string representing the name of the kind to lookup.
default_model: The model class to use if the kind can't be found.
Returns:
The model class for the requested kind.
Raises:
KindError: The kind was not found and no default_model was provided.
"""
modelclass = cls._kind_map.get(kind, default_model)
if modelclass is None:
raise KindError(
"No model class found for kind '%s'. Did you forget to import it?" %
kind)
return modelclass | 0.003241 |
def juliandate(time: datetime) -> float:
"""
Python datetime to Julian time
from D.Vallado Fundamentals of Astrodynamics and Applications p.187
and J. Meeus Astronomical Algorithms 1991 Eqn. 7.1 pg. 61
Parameters
----------
time : datetime.datetime
time to convert
Results
-------
jd : float
Julian date
"""
times = np.atleast_1d(time)
assert times.ndim == 1
jd = np.empty(times.size)
for i, t in enumerate(times):
if t.month < 3:
year = t.year - 1
month = t.month + 12
else:
year = t.year
month = t.month
A = int(year / 100.0)
B = 2 - A + int(A / 4.)
C = ((t.second / 60. + t.minute) / 60. + t.hour) / 24.
jd[i] = (int(365.25 * (year + 4716)) +
int(30.6001 * (month + 1)) + t.day + B - 1524.5 + C)
return jd.squeeze() | 0.001085 |
def get_vm_full_path(self, si, vm):
"""
:param vm: vim.VirtualMachine
:return:
"""
folder_name = None
folder = vm.parent
if folder:
folder_name = folder.name
folder_parent = folder.parent
while folder_parent and folder_parent.name and folder_parent != si.content.rootFolder and not isinstance(folder_parent, vim.Datacenter):
folder_name = folder_parent.name + '/' + folder_name
try:
folder_parent = folder_parent.parent
except Exception:
break
# at this stage we receive a path like this: vm/FOLDER1/FOLDER2;
# we're not interested in the "vm" part, so we throw that away
folder_name = '/'.join(folder_name.split('/')[1:])
# ok, now we're adding the vm name; btw, if there is no folder, that's cool, just return vm.name
return VMLocation.combine([folder_name, vm.name]) if folder_name else vm.name | 0.00485 |
def generate_map(map, name='url_map'):
"""
Generates a JavaScript function containing the rules defined in
this map, to be used with a MapAdapter's generate_javascript
method. If you don't pass a name the returned JavaScript code is
an expression that returns a function. Otherwise it's a standalone
script that assigns the function with that name. Dotted names are
resolved (so you an use a name like 'obj.url_for')
In order to use JavaScript generation, simplejson must be installed.
Note that using this feature will expose the rules
defined in your map to users. If your rules contain sensitive
information, don't use JavaScript generation!
"""
from warnings import warn
warn(DeprecationWarning('This module is deprecated'))
map.update()
rules = []
converters = []
for rule in map.iter_rules():
trace = [{
'is_dynamic': is_dynamic,
'data': data
} for is_dynamic, data in rule._trace]
rule_converters = {}
for key, converter in iteritems(rule._converters):
js_func = js_to_url_function(converter)
try:
index = converters.index(js_func)
except ValueError:
converters.append(js_func)
index = len(converters) - 1
rule_converters[key] = index
rules.append({
u'endpoint': rule.endpoint,
u'arguments': list(rule.arguments),
u'converters': rule_converters,
u'trace': trace,
u'defaults': rule.defaults
})
return render_template(name_parts=name and name.split('.') or [],
rules=dumps(rules),
converters=converters) | 0.000556 |
def deformat(value):
"""
REMOVE NON-ALPHANUMERIC CHARACTERS
FOR SOME REASON translate CAN NOT BE CALLED:
ERROR: translate() takes exactly one argument (2 given)
File "C:\Python27\lib\string.py", line 493, in translate
"""
output = []
for c in value:
if c in delchars:
continue
output.append(c)
return "".join(output) | 0.010283 |
def float_str(value, order="pprpr", size=[4, 5, 3, 6, 4],
after=False, max_denominator=1000000):
"""
Pretty string from int/float.
"Almost" automatic string formatter for integer fractions, fractions of
:math:`\pi` and float numbers with small number of digits.
Outputs a representation among ``float_str.pi``, ``float_str.frac`` (without
a symbol) strategies, as well as the usual float representation. The
formatter is chosen by counting the resulting length, trying each one in the
given ``order`` until one gets at most the given ``size`` limit parameter as
its length.
Parameters
----------
value :
A float number or an iterable with floats.
order :
A string that gives the order to try formatting. Each char should be:
- ``"p"`` for pi formatter (``float_str.pi``);
- ``"r"`` for ratio without symbol (``float_str.frac``);
- ``"f"`` for the float usual base 10 decimal representation.
Defaults to ``"pprpr"``. If no trial has the desired size, returns the
float representation.
size :
The max size allowed for each formatting in the ``order``, respectively.
Defaults to ``[4, 5, 3, 6, 4]``.
after :
Chooses the place where the :math:`\pi` symbol should appear, when such
formatter apply. If ``True``, that's the end of the string. If ``False``,
that's in between the numerator and the denominator, before the slash.
Defaults to ``False``.
max_denominator :
The data in ``value`` is rounded following the limit given by this
parameter when trying to represent it as a fraction/ratio.
Defaults to the integer 1,000,000 (one million).
Returns
-------
A string with the number written into.
Note
----
You probably want to keep ``max_denominator`` high to avoid rounding.
"""
if len(order) != len(size):
raise ValueError("Arguments 'order' and 'size' should have the same size")
str_data = {
"p": float_str.pi(value, after=after, max_denominator=max_denominator),
"r": float_str.frac(value, max_denominator=max_denominator),
"f": elementwise("v", 0)(lambda v: "{0:g}".format(v))(value)
}
sizes = {k: len(v) for k, v in iteritems(str_data)}
sizes["p"] = max(1, sizes["p"] - len(float_str.pi_symbol) + 1)
for char, max_size in xzip(order, size):
if sizes[char] <= max_size:
return str_data[char]
return str_data["f"] | 0.004603 |
def _get_repos(self):
"""Gets a list of all the installed repositories in this server.
"""
result = {}
for xmlpath in self.installed:
repo = RepositorySettings(self, xmlpath)
result[repo.name.lower()] = repo
return result | 0.006993 |
def add_analysis_attributes(self, group_name, attrs, clear=False):
""" Add attributes on the group or dataset specified.
:param group_name: The name of the group (or dataset).
:param attrs: A dictionary representing the attributes to add.
:param clear: If set, any existing attributes will be cleared.
The specified group name can be any existing path (relative to the
"Analyses" group. It can be a group or a dataset.
"""
self.assert_writeable()
group = 'Analyses/{}'.format(group_name)
self._add_attributes(group, attrs, clear) | 0.00638 |
def process_view(self, request, view_func, *args, **kwargs):
"""Process view is executed before the view function, here we get the
function name add set it as the span name.
"""
# Do not trace if the url is blacklisted
if utils.disable_tracing_url(request.path, self.blacklist_paths):
return
try:
# Get the current span and set the span name to the current
# function name of the request.
tracer = _get_current_tracer()
span = tracer.current_span()
span.name = utils.get_func_name(view_func)
except Exception: # pragma: NO COVER
log.error('Failed to trace request', exc_info=True) | 0.002766 |
def SensorsDataPost(self, parameters):
"""
Post sensor data to multiple sensors in CommonSense simultaneously.
@param parameters (dictionary) - Data to post to the sensors.
@note - http://www.sense-os.nl/59?nodeId=59&selectedId=11887
@return (bool) - Boolean indicating whether SensorsDataPost was successful.
"""
if self.__SenseApiCall__('/sensors/data.json', 'POST', parameters = parameters):
return True
else:
self.__error__ = "api call unsuccessful"
return False | 0.014129 |
def format_citation(citation, citation_type=None):
"""
This method may be built to support elements from different Tag Suite
versions with the following tag names:
citation, element-citation, mixed-citation, and nlm-citation
The citation-type attribute is optional, and may also be empty; if it has a
value then it should appear in the following prescribed list, or it will be
treated as 'other'.
book Book or book series
commun Informal or personal communication, such as a phone call or
an email message
confproc Conference proceedings
discussion Discussion among a group in some forum — public, private, or
electronic — which may or may not be moderated, for example,
a single discussion thread in a listserv
gov Government publication or government standard
journal Journal article
list Listserv or discussion group (as an entity, as opposed to a
single discussion thread which uses the value “discussion”)
other None of the listed types.
patent Patent or patent application
thesis Work written as part of the completion of an advanced degree
web Website
This method will accept a passed citation_type argument which will override
checking of the element's citation-type attribute and force the formatting
according to the passed string value. Note that this may not be appropriate
in many cases.
"""
cite_types = {'book': self.format_book_citation,
'commun': self.format_commun_citation,
'confproc': self.format_confproc_citation,
'discussion': self.format_discussion_citation,
'gov': self.format_gov_citation,
'journal': self.format_journal_citation,
'list': self.format_list_citation,
'other': self.format_other_citation,
'patent': self.format_patent_citation,
'thesis': self.format_thesis_citation,
'web': self.format_web_citation,
'': self.format_other_citation, # Empty becomes 'other'
None: self.format_other_citation} # None becomes 'other'
#Only check if no citation_type value is passed
if citation_type is None:
#Get the citation-type attribute value
if 'citation-type' in nlm_citation.attrib:
citation_type = nlm_citation.attrib['citation-type']
#Pass the citation to the appropriate function and return result
return cite_types[citation_type](citation)
@staticmethod
def format_book_citation(self, citation):
"""
citation-type=\"book\"
"""
#Get the count of authors
author_group_count = int(citation.xpath('count(person-group) + count(collab)'))
#Detect if there are non-authors
if citation.xpath('person-group[@person-group-type!=\'author\']'):
non_authors = True
else:
non_authors= False
#Detect article-title
if citation.xpath('article-title'):
article_title = True
else:
article_title = False
#Find out if there is at least one author or compiler
auth_or_comp = False
for person_group in citation.findall('person-group'):
if 'person-group-type' in person_group.attrib:
if person_group.attrib['person-group-type'] in ['author', 'compiler']:
auth_or_comp = True
break
#These pieces of information allow us to provide two special use cases
#and one general use case.
#First special case:
if author_group_count > 0 and non_authors and article_title:
pass
#Second special case
elif auth_or_comp:
pass
#General case
else:
pass
@staticmethod
def format_commun_citation(self, citation):
"""
citation-type=\"commun\"
"""
@staticmethod
def format_confproc_citation(self, citation):
"""
citation-type=\"confproc\"
"""
@staticmethod
def format_discussion_citation(self, citation):
"""
citation-type=\"discussion\"
"""
@staticmethod
def format_gov_citation(self, citation):
"""
citation-type=\"gov\"
"""
@staticmethod
def format_journal_citation(self, citation):
"""
citation-type=\"journal\"
"""
@staticmethod
def format_list_citation(self, citation):
"""
citation-type=\"list\"
"""
@staticmethod
def format_other_citation(self, citation):
"""
citation-type=\"other\"
"""
@staticmethod
def format_patent_citation(self, citation):
"""
citation-type=\"patent\"
"""
@staticmethod
def format_thesis_citation(self, citation):
"""
citation-type=\"thesis\"
"""
#Treat the same as "book"
return format_book_citation(citation)
@staticmethod
def format_web_citation(self, citation):
"""
citation-type=\"web\"
""" | 0.003197 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.