text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def filter_(predicate, *structures, **kwargs):
# pylint: disable=differing-param-doc,missing-param-doc, too-many-branches
"""Select elements of a nested structure based on a predicate function.
If multiple structures are provided as input, their structure must match and
the function will be applied to corresponding groups of elements. The nested
structure can consist of any combination of lists, tuples, and dicts.
Args:
predicate: The function to determine whether an element should be kept.
Receives one argument for every structure that is provided.
*structures: One of more nested structures.
flatten: Whether to flatten the resulting structure into a tuple. Keys of
dictionaries will be discarded.
Returns:
Nested structure.
"""
# Named keyword arguments are not allowed after *args in Python 2.
flatten = kwargs.pop('flatten', False)
assert not kwargs, 'filter() got unexpected keyword arguments.'
def impl(predicate, *structures):
if len(structures) == 0: # pylint: disable=len-as-condition
return structures
if all(isinstance(s, (tuple, list)) for s in structures):
if len(set(len(x) for x in structures)) > 1:
raise ValueError('Cannot merge tuples or lists of different length.')
# Only wrap in tuples if more than one structure provided.
if len(structures) > 1:
filtered = (impl(predicate, *x) for x in _builtin_zip(*structures))
else:
filtered = (impl(predicate, x) for x in structures[0])
# Remove empty containers and construct result structure.
if hasattr(structures[0], '_fields'): # namedtuple
filtered = (x if x != () else None for x in filtered)
return type(structures[0])(*filtered)
else: # tuple, list
filtered = (
x for x in filtered if not isinstance(x, (tuple, list, dict)) or x)
return type(structures[0])(filtered)
if all(isinstance(s, dict) for s in structures):
if len(set(frozenset(x.keys()) for x in structures)) > 1:
raise ValueError('Cannot merge dicts with different keys.')
# Only wrap in tuples if more than one structure provided.
if len(structures) > 1:
filtered = {
k: impl(predicate, *(s[k] for s in structures))
for k in structures[0]}
else:
filtered = {k: impl(predicate, v) for k, v in structures[0].items()}
# Remove empty containers and construct result structure.
filtered = {
k: v for k, v in filtered.items()
if not isinstance(v, (tuple, list, dict)) or v}
return type(structures[0])(filtered)
if len(structures) > 1:
return structures if predicate(*structures) else ()
else:
return structures[0] if predicate(structures[0]) else ()
result = impl(predicate, *structures)
if flatten:
result = flatten_(result)
return result | 0.009301 |
def _maybe_call_fn(fn,
fn_arg_list,
fn_result=None,
description='target_log_prob'):
"""Helper which computes `fn_result` if needed."""
fn_arg_list = (list(fn_arg_list) if mcmc_util.is_list_like(fn_arg_list)
else [fn_arg_list])
if fn_result is None:
fn_result = fn(*fn_arg_list)
if not fn_result.dtype.is_floating:
raise TypeError('`{}` must be a `Tensor` with `float` `dtype`.'.format(
description))
return fn_result | 0.011628 |
def current() -> 'Process':
"""
Returns the instance of the process that is executing at the current moment.
"""
curr = greenlet.getcurrent()
if not isinstance(curr, Process):
raise TypeError("Current greenlet does not correspond to a Process instance.")
return cast(Process, greenlet.getcurrent()) | 0.011173 |
def save(self):
"""
Create or update a playlist.
"""
d = self._to_dict()
if len(d.get('videoIds', [])) > 0:
if not self.id:
self.id = self.connection.post('create_playlist', playlist=d)
else:
data = self.connection.post('update_playlist', playlist=d)
if data:
self._load(data) | 0.004902 |
def _image_width(image):
"""
Returns the width of the image found at the path supplied by `image`
relative to your project's images directory.
"""
if not Image:
raise Exception("Images manipulation require PIL")
file = StringValue(image).value
path = None
try:
width = sprite_images[file][0]
except KeyError:
width = 0
if callable(STATIC_ROOT):
try:
_file, _storage = list(STATIC_ROOT(file))[0]
path = _storage.open(_file)
except:
pass
else:
_path = os.path.join(STATIC_ROOT, file)
if os.path.exists(_path):
path = open(_path, 'rb')
if path:
image = Image.open(path)
size = image.size
width = size[0]
sprite_images[file] = size
return NumberValue(width, 'px') | 0.002205 |
def _checkMode(self, ax_args):
"""Raise an exception if the mode in the attribute exchange
arguments does not match what is expected for this class.
@raises NotAXMessage: When there is no mode value in ax_args at all.
@raises AXError: When mode does not match.
"""
mode = ax_args.get('mode')
if mode != self.mode:
if not mode:
raise NotAXMessage()
else:
raise AXError(
'Expected mode %r; got %r' % (self.mode, mode)) | 0.003636 |
def load_collection_from_url(resource, url, content_type=None):
"""
Creates a new collection for the registered resource and calls
`load_into_collection_from_url` with it.
"""
coll = create_staging_collection(resource)
load_into_collection_from_url(coll, url, content_type=content_type)
return coll | 0.003067 |
def user_data(self, access_token, *args, **kwargs):
"""Load user data from OAuth Profile Google App Engine App"""
url = GOOGLE_APPENGINE_PROFILE_V2
return self.get_json(url, headers={
'Authorization': 'Bearer ' + access_token
}) | 0.007353 |
def create(self, workflow_id, email_id, data):
"""
Manually add a subscriber to a workflow, bypassing the default trigger
settings. You can also use this endpoint to trigger a series of
automated emails in an API 3.0 workflow type or add subscribers to an
automated email queue that uses the API request delay type.
:param workflow_id: The unique id for the Automation workflow.
:type workflow_id: :py:class:`str`
:param email_id: The unique id for the Automation workflow email.
:type email_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"email_address": string*
}
"""
self.workflow_id = workflow_id
self.email_id = email_id
if 'email_address' not in data:
raise KeyError('The automation email queue must have an email_address')
check_email(data['email_address'])
response = self._mc_client._post(
url=self._build_path(workflow_id, 'emails', email_id, 'queue'),
data=data
)
if response is not None:
self.subscriber_hash = response['id']
else:
self.subscriber_hash = None
return response | 0.002315 |
def getRegToken(self):
"""
Acquire a new registration token.
Once successful, all tokens and expiry times are written to the token file (if specified on initialisation).
"""
self.verifyToken(self.Auth.SkypeToken)
token, expiry, msgsHost, endpoint = SkypeRegistrationTokenProvider(self).auth(self.tokens["skype"])
self.tokens["reg"] = token
self.tokenExpiry["reg"] = expiry
self.msgsHost = msgsHost
if endpoint:
endpoint.config()
self.endpoints["main"] = endpoint
self.syncEndpoints()
if self.tokenFile:
self.writeToken() | 0.006126 |
def paintGL(self):
'''GL function called each time a frame is drawn'''
if self.post_processing:
# Render to the first framebuffer
glBindFramebuffer(GL_FRAMEBUFFER, self.fb0)
glViewport(0, 0, self.width(), self.height())
status = glCheckFramebufferStatus(GL_FRAMEBUFFER)
if (status != GL_FRAMEBUFFER_COMPLETE):
reason = dict(GL_FRAMEBUFFER_UNDEFINED='UNDEFINED',
GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT='INCOMPLETE_ATTACHMENT',
GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT='INCOMPLETE_MISSING_ATTACHMENT',
GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER='INCOMPLETE_DRAW_BUFFER',
GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER='INCOMPLETE_READ_BUFFER',
GL_FRAMEBUFFER_UNSUPPORTED='UNSUPPORTED',
)[status]
raise Exception('Framebuffer is not complete: {}'.format(reason))
else:
glBindFramebuffer(GL_FRAMEBUFFER, DEFAULT_FRAMEBUFFER)
# Clear color take floats
bg_r, bg_g, bg_b, bg_a = self.background_color
glClearColor(bg_r/255, bg_g/255, bg_b/255, bg_a/255)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
proj = self.camera.projection
cam = self.camera.matrix
self.mvproj = np.dot(proj, cam)
self.ldir = cam[:3, :3].T.dot(self.light_dir)
# Draw World
self.on_draw_world()
# Iterate over all of the post processing effects
if self.post_processing:
if len(self.post_processing) > 1:
newarg = self.textures.copy()
# Ping-pong framebuffer rendering
for i, pp in enumerate(self.post_processing[:-1]):
if i % 2:
outfb = self.fb1
outtex = self._extra_textures['fb1']
else:
outfb = self.fb2
outtex = self._extra_textures['fb2']
pp.render(outfb, newarg)
newarg['color'] = outtex
self.post_processing[-1].render(DEFAULT_FRAMEBUFFER, newarg)
else:
self.post_processing[0].render(DEFAULT_FRAMEBUFFER, self.textures)
# Draw the UI at the very last step
self.on_draw_ui() | 0.004486 |
def query(self, *args):
"""
Query a fulltext index by key and query or just a plain Lucene query,
i1 = gdb.nodes.indexes.get('people',type='fulltext', provider='lucene')
i1.query('name','do*')
i1.query('name:do*')
In this example, the last two line are equivalent.
"""
if not args or len(args) > 2:
raise TypeError('query() takes 2 or 3 arguments (a query or a key '
'and a query) (%d given)' % (len(args) + 1))
elif len(args) == 1:
query, = args
return self.get('text').query(text_type(query))
else:
key, query = args
index_key = self.get(key)
if isinstance(query, string_types):
return index_key.query(query)
else:
if query.fielded:
raise ValueError('Queries with an included key should '
'not include a field.')
return index_key.query(text_type(query)) | 0.001898 |
async def iter(
self,
url: Union[str, methods],
data: Optional[MutableMapping] = None,
headers: Optional[MutableMapping] = None,
*,
limit: int = 200,
iterkey: Optional[str] = None,
itermode: Optional[str] = None,
minimum_time: Optional[int] = None,
as_json: Optional[bool] = None
) -> AsyncIterator[dict]:
"""
Iterate over a slack API method supporting pagination
When using :class:`slack.methods` the request is made `as_json` if available
Args:
url: :class:`slack.methods` or url string
data: JSON encodable MutableMapping
headers:
limit: Maximum number of results to return per call.
iterkey: Key in response data to iterate over (required for url string).
itermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`)
minimum_time: Minimum elapsed time (in seconds) between two calls to the Slack API (default to 0).
If not reached the client will sleep for the remaining time.
as_json: Post JSON to the slack API
Returns:
Async iterator over `response_data[key]`
"""
itervalue = None
if not data:
data = {}
last_request_time = None
while True:
current_time = time.time()
if (
minimum_time
and last_request_time
and last_request_time + minimum_time > current_time
):
await self.sleep(last_request_time + minimum_time - current_time)
data, iterkey, itermode = sansio.prepare_iter_request(
url,
data,
iterkey=iterkey,
itermode=itermode,
limit=limit,
itervalue=itervalue,
)
last_request_time = time.time()
response_data = await self.query(url, data, headers, as_json)
itervalue = sansio.decode_iter_request(response_data)
for item in response_data[iterkey]:
yield item
if not itervalue:
break | 0.003575 |
def match(lon1, lat1, lon2, lat2, tol=None, nnearest=1):
"""
Adapted from Eric Tollerud.
Finds matches in one catalog to another.
Parameters
lon1 : array-like
Longitude of the first catalog (degrees)
lat1 : array-like
Latitude of the first catalog (shape of array must match `lon1`)
lon2 : array-like
Longitude of the second catalog
lat2 : array-like
Latitude of the second catalog (shape of array must match `lon2`)
tol : float or None, optional
Proximity (degrees) of a match to count as a match. If None,
all nearest neighbors for the first catalog will be returned.
nnearest : int, optional
The nth neighbor to find. E.g., 1 for the nearest nearby, 2 for the
second nearest neighbor, etc. Particularly useful if you want to get
the nearest *non-self* neighbor of a catalog. To do this, use:
``spherematch(lon, lat, lon, lat, nnearest=2)``
Returns
-------
idx1 : int array
Indices into the first catalog of the matches. Will never be
larger than `lon1`/`lat1`.
idx2 : int array
Indices into the second catalog of the matches. Will never be
larger than `lon2`/`lat2`.
ds : float array
Distance (in degrees) between the matches
"""
from scipy.spatial import cKDTree
lon1 = np.asarray(lon1)
lat1 = np.asarray(lat1)
lon2 = np.asarray(lon2)
lat2 = np.asarray(lat2)
if lon1.shape != lat1.shape:
raise ValueError('lon1 and lat1 do not match!')
if lon2.shape != lat2.shape:
raise ValueError('lon2 and lat2 do not match!')
rotator = SphericalRotator(0,0)
# This is equivalent, but faster than just doing np.array([x1, y1, z1]).T
x1, y1, z1 = rotator.cartesian(lon1.ravel(),lat1.ravel())
coords1 = np.empty((x1.size, 3))
coords1[:, 0] = x1
coords1[:, 1] = y1
coords1[:, 2] = z1
x2, y2, z2 = rotator.cartesian(lon2.ravel(),lat2.ravel())
coords2 = np.empty((x2.size, 3))
coords2[:, 0] = x2
coords2[:, 1] = y2
coords2[:, 2] = z2
tree = cKDTree(coords2)
if nnearest == 1:
idxs2 = tree.query(coords1)[1]
elif nnearest > 1:
idxs2 = tree.query(coords1, nnearest)[1][:, -1]
else:
raise ValueError('invalid nnearest ' + str(nnearest))
ds = angsep(lon1, lat1, lon2[idxs2], lat2[idxs2])
idxs1 = np.arange(lon1.size)
if tol is not None:
msk = ds < tol
idxs1 = idxs1[msk]
idxs2 = idxs2[msk]
ds = ds[msk]
return idxs1, idxs2, ds | 0.006156 |
def recompile(self, nick=None, new_nick=None, **kw):
"""recompile regexp on new nick"""
if self.bot.nick == nick.nick:
self.bot.config['nick'] = new_nick
self.bot.recompile() | 0.009346 |
def get_client_token(self, client_id=None, client_secret=None,
op_host=None, op_discovery_path=None, scope=None,
auto_update=True):
"""Function to get the client token which can be used for protection in
all future communication. The access token received by this method is
stored in the config file and used as the `protection_access_token`
for all subsequent calls to oxd.
Parameters:
* **client_id (str, optional):** client id from OP or from previous `setup_client` call
* **client_secret (str, optional):** client secret from the OP or from `setup_client` call
* **op_host (str, optional):** OP Host URL, default is read from the site configuration file
* **op_discovery_path (str, optional):** op discovery path provided by OP
* **scope (list, optional):** scopes of access required, default values are obtained from the config file
* **auto_update(bool, optional):** automatically get a new access_token when the current one expires. If this is set to False, then the application must call `get_client_token` when the token expires to update the client with a new access token.
Returns:
**dict:** The client token and the refresh token in the form.
Example response ::
{
"access_token":"6F9619FF-8B86-D011-B42D-00CF4FC964FF",
"expires_in": 399,
"refresh_token": "fr459f",
"scope": "openid"
}
"""
# override the values from config
params = dict(client_id=client_id, client_secret=client_secret,
op_host=op_host)
if op_discovery_path:
params['op_discovery_path'] = op_discovery_path
if scope and isinstance(scope, list):
params['scope'] = scope
# If client id and secret aren't passed, then just read from the config
if not client_id:
params["client_id"] = self.config.get("client", "client_id")
if not client_secret:
params["client_secret"] = self.config.get("client",
"client_secret")
if not op_host:
params["op_host"] = self.config.get("client", "op_host")
logger.debug("Sending command `get_client_token` with params %s",
params)
response = self.msgr.request("get_client_token", **params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
self.config.set("client", "protection_access_token",
response["data"]["access_token"])
self.msgr.access_token = response["data"]["access_token"]
# Setup a new timer thread to refresh the access token.
if auto_update:
interval = int(response['data']['expires_in'])
args = [client_id, client_secret, op_host, op_discovery_path,
scope, auto_update]
logger.info("Setting up a threading.Timer to get_client_token in "
"%s seconds", interval)
t = Timer(interval, self.get_client_token, args)
t.start()
return response['data'] | 0.002944 |
def service(self):
""" Returns a Splunk service object for this command invocation or None.
The service object is created from the Splunkd URI and authentication token passed to the command invocation in
the search results info file. This data is not passed to a command invocation by default. You must request it by
specifying this pair of configuration settings in commands.conf:
.. code-block:: python
enableheader = true
requires_srinfo = true
The :code:`enableheader` setting is :code:`true` by default. Hence, you need not set it. The
:code:`requires_srinfo` setting is false by default. Hence, you must set it.
:return: :class:`splunklib.client.Service`, if :code:`enableheader` and :code:`requires_srinfo` are both
:code:`true`. Otherwise, if either :code:`enableheader` or :code:`requires_srinfo` are :code:`false`, a value
of :code:`None` is returned.
"""
if self._service is not None:
return self._service
metadata = self._metadata
if metadata is None:
return None
try:
searchinfo = self._metadata.searchinfo
except AttributeError:
return None
splunkd_uri = searchinfo.splunkd_uri
if splunkd_uri is None:
return None
uri = urlsplit(splunkd_uri, allow_fragments=False)
self._service = Service(
scheme=uri.scheme, host=uri.hostname, port=uri.port, app=searchinfo.app, token=searchinfo.session_key)
return self._service | 0.0062 |
def _get_reverse_relationships(opts):
"""
Returns an `OrderedDict` of field names to `RelationInfo`.
"""
# Note that we have a hack here to handle internal API differences for
# this internal API across Django 1.7 -> Django 1.8.
# See: https://code.djangoproject.com/ticket/24208
reverse_relations = OrderedDict()
for relation in get_all_related_objects(opts):
accessor_name = relation.get_accessor_name()
related = getattr(relation, 'related_model', relation.model)
reverse_relations[accessor_name] = RelationInfo(
model_field=None,
related_model=related,
to_many=relation.field.rel.multiple,
to_field=_get_to_field(relation.field),
has_through_model=False
)
# Deal with reverse many-to-many relationships.
for relation in get_all_related_many_to_many_objects(opts):
accessor_name = relation.get_accessor_name()
related = getattr(relation, 'related_model', relation.model)
reverse_relations[accessor_name] = RelationInfo(
model_field=None,
related_model=related,
to_many=True,
# manytomany do not have to_fields
to_field=None,
has_through_model=(
(getattr(relation.field.rel, 'through', None) is not None) and
not relation.field.rel.through._meta.auto_created
)
)
return reverse_relations | 0.000678 |
def _expand_tag_query(self, query, table_name = None):
""" Expand Tag query dict into a WHERE-clause.
If you need to prefix each column reference with a table
name, that can be supplied via the table_name argument.
"""
where = unicode()
opt = list()
# handle table name, can be None
if table_name is None:
col_prefix = ""
else:
col_prefix = table_name + "."
if type(query['val1']) == dict and type(query['val2']) == dict:
# Sub expression, recurse! This is used for boolean operators: AND OR
# add parantheses
sub_where1, opt1 = self._expand_tag_query(query['val1'], table_name)
sub_where2, opt2 = self._expand_tag_query(query['val2'], table_name)
try:
where += unicode(" (%s %s %s) " % (sub_where1, _operation_map[query['operator']], sub_where2) )
except KeyError:
raise NipapNoSuchOperatorError("No such operator %s" % unicode(query['operator']))
opt += opt1
opt += opt2
else:
# TODO: raise exception if someone passes one dict and one "something else"?
# val1 is variable, val2 is string.
tag_attr = dict()
tag_attr['name'] = 'name'
if query['val1'] not in tag_attr:
raise NipapInputError('Search variable \'%s\' unknown' % unicode(query['val1']))
# workaround for handling equal matches of NULL-values
if query['operator'] == 'equals' and query['val2'] is None:
query['operator'] = 'is'
elif query['operator'] == 'not_equals' and query['val2'] is None:
query['operator'] = 'is_not'
# build where clause
if query['operator'] not in _operation_map:
raise NipapNoSuchOperatorError("No such operator %s" % query['operator'])
where = unicode(" %s%s %s %%s " %
( col_prefix, tag_attr[query['val1']],
_operation_map[query['operator']] )
)
opt.append(query['val2'])
return where, opt | 0.00772 |
def down(returns, factor_returns, **kwargs):
"""
Calculates a given statistic filtering only negative factor return periods.
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns (optional): float / series
Benchmark return to compare returns against.
function:
the function to run for each rolling window.
(other keywords): other keywords that are required to be passed to the
function in the 'function' argument may also be passed in.
Returns
-------
Same as the return of the 'function'
"""
func = kwargs.pop('function')
returns = returns[factor_returns < 0]
factor_returns = factor_returns[factor_returns < 0]
return func(returns, factor_returns, **kwargs) | 0.001121 |
def view_conflicts(L, normalize=True, colorbar=True):
"""Display an [m, m] matrix of conflicts"""
L = L.todense() if sparse.issparse(L) else L
C = _get_conflicts_matrix(L, normalize=normalize)
plt.imshow(C, aspect="auto")
plt.title("Conflicts")
if colorbar:
plt.colorbar()
plt.show() | 0.003135 |
def annotation(self, type, set=None):
"""Will return a **single** annotation (even if there are multiple). Raises a ``NoSuchAnnotation`` exception if none was found"""
l = self.count(type,set,True,default_ignore_annotations)
if len(l) >= 1:
return l[0]
else:
raise NoSuchAnnotation() | 0.020649 |
def encode(self, response):
"""Encode a response to a L{WebResponse}, signing it first if appropriate.
@raises EncodingError: When I can't figure out how to encode this
message.
@raises AlreadySigned: When this response is already signed.
@returntype: L{WebResponse}
"""
# the isinstance is a bit of a kludge... it means there isn't really
# an adapter to make the interfaces quite match.
if (not isinstance(response, Exception)) and response.needsSigning():
if not self.signatory:
raise ValueError(
"Must have a store to sign this request: %s" %
(response,), response)
if response.fields.hasKey(OPENID_NS, 'sig'):
raise AlreadySigned(response)
response = self.signatory.sign(response)
return super(SigningEncoder, self).encode(response) | 0.003219 |
def get_checksum(content, encoding="utf8", block_size=8192):
"""
Returns the MD5 checksum in hex for the given content. If 'content'
is a file-like object, the content will be obtained from its read()
method. If 'content' is a file path, that file is read and its
contents used. Otherwise, 'content' is assumed to be the string whose
checksum is desired. If the content is unicode, it will be encoded
using the specified encoding.
To conserve memory, files and file-like objects will be read in blocks,
with the default block size of 8192 bytes, which is 64 * the digest block
size of md5 (128). This is optimal for most cases, but you can change this
by passing in a different value for `block_size`.
"""
md = hashlib.md5()
def safe_update(txt):
if isinstance(txt, six.text_type):
txt = txt.encode(encoding)
md.update(txt)
try:
isfile = os.path.isfile(content)
except (TypeError, ValueError):
# Will happen with binary content.
isfile = False
if isfile:
with open(content, "rb") as ff:
txt = ff.read(block_size)
while txt:
safe_update(txt)
txt = ff.read(block_size)
elif hasattr(content, "read"):
pos = content.tell()
content.seek(0)
txt = content.read(block_size)
while txt:
safe_update(txt)
txt = content.read(block_size)
content.seek(pos)
else:
safe_update(content)
return md.hexdigest() | 0.00064 |
def index_nearest(array, value):
"""
Finds index of nearest value in array.
Args:
array: numpy array
value:
Returns:
int
http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
"""
idx = (np.abs(array-value)).argmin()
return idx | 0.003106 |
def version(versioninfo=False):
'''
.. versionadded:: 2015.8.0
Returns the version of Git installed on the minion
versioninfo : False
If ``True``, return the version in a versioninfo list (e.g. ``[2, 5,
0]``)
CLI Example:
.. code-block:: bash
salt myminion git.version
'''
contextkey = 'git.version'
contextkey_info = 'git.versioninfo'
if contextkey not in __context__:
try:
version_ = _git_run(['git', '--version'])['stdout']
except CommandExecutionError as exc:
log.error(
'Failed to obtain the git version (error follows):\n%s',
exc
)
version_ = 'unknown'
try:
__context__[contextkey] = version_.split()[-1]
except IndexError:
# Somehow git --version returned no stdout while not raising an
# error. Should never happen but we should still account for this
# possible edge case.
log.error('Running \'git --version\' returned no stdout')
__context__[contextkey] = 'unknown'
if not versioninfo:
return __context__[contextkey]
if contextkey_info not in __context__:
# Set ptr to the memory location of __context__[contextkey_info] to
# prevent repeated dict lookups
ptr = __context__.setdefault(contextkey_info, [])
for part in __context__[contextkey].split('.'):
try:
ptr.append(int(part))
except ValueError:
ptr.append(part)
return __context__[contextkey_info] | 0.000616 |
def _id_for_pc(self, name):
""" Given the name of the PC, return the database identifier. """
if not name in self.pc2id_lut:
self.c.execute("INSERT INTO pcs (name) VALUES ( ? )", (name,))
self.pc2id_lut[name] = self.c.lastrowid
self.id2pc_lut[self.c.lastrowid] = name
return self.pc2id_lut[name] | 0.008451 |
def clean_obs_names(data, base='[AGTCBDHKMNRSVWY]', ID_length=12, copy=False):
"""Cleans up the obs_names and identifies sample names.
For example an obs_name 'samlple1_AGTCdate' is changed to 'AGTC' of the sample 'sample1_date'.
The sample name is then saved in obs['sample_batch'].
The genetic codes are identified according to according to https://www.neb.com/tools-and-resources/usage-guidelines/the-genetic-code.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
base: `str` (default: `[AGTCBDHKMNRSVWY]`)
Genetic code letters to be identified.
ID_length: `int` (default: 12)
Length of the Genetic Codes in the samples.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
obs_names: list
updated names of the observations
sample_batch: `.obs`
names of the identified sample batches
"""
def get_base_list(name, base):
base_list = base
while re.search(base_list + base, name) is not None:
base_list += base
if len(base_list) == 0:
raise ValueError('Encountered an invalid ID in obs_names: ', name)
return base_list
adata = data.copy() if copy else data
names = adata.obs_names
base_list = get_base_list(names[0], base)
if len(np.unique([len(name) for name in adata.obs_names])) == 1:
start, end = re.search(base_list, names[0]).span()
newIDs = [name[start:end] for name in names]
start, end = 0, len(newIDs[0])
for i in range(end - ID_length):
if np.any([ID[i] not in base for ID in newIDs]): start += 1
if np.any([ID[::-1][i] not in base for ID in newIDs]): end -= 1
newIDs = [ID[start:end] for ID in newIDs]
prefixes = [names[i].replace(newIDs[i], '') for i in range(len(names))]
else:
prefixes, newIDs = [], []
for name in names:
match = re.search(base_list, name)
newID = re.search(get_base_list(name, base), name).group() if match is None else match.group()
newIDs.append(newID)
prefixes.append(name.replace(newID, ''))
adata.obs_names = newIDs
if len(prefixes[0]) > 0 and len(np.unique(prefixes)) > 1:
#idx_names = np.random.choice(len(names), size=20, replace=False)
#for i in range(len(names[0])):
# if np.all([re.search(names[0][:i], names[ix]) for ix in idx_names]) is not None: obs_key = names[0][:i]
adata.obs['sample_batch'] = pd.Categorical(prefixes) if len(np.unique(prefixes)) < adata.n_obs else prefixes
adata.obs_names_make_unique()
return adata if copy else None | 0.003593 |
def trigger_revisions(self, trigger_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/triggers#list-trigger-revisions"
api_path = "/api/v2/triggers/{trigger_id}/revisions.json"
api_path = api_path.format(trigger_id=trigger_id)
return self.call(api_path, **kwargs) | 0.009677 |
def lreg(self, xcol, ycol, name="Regression"):
"""
Add a column to the main dataframe populted with
the model's linear regression for a column
"""
try:
x = self.df[xcol].values.reshape(-1, 1)
y = self.df[ycol]
lm = linear_model.LinearRegression()
lm.fit(x, y)
predictions = lm.predict(x)
self.df[name] = predictions
except Exception as e:
self.err(e, "Can not calculate linear regression") | 0.003839 |
def calculate_dimensions(image_size, desired_size):
"""Return the Tuple with the arguments to pass to Image.crop.
If the image is smaller than than the desired_size Don't do
anything. Otherwise, first calculate the (truncated) center and then
take half the width and height (truncated again) for x and y.
x0, y0: the center coordinates
"""
current_x, current_y = image_size
target_x, target_y = desired_size
if current_x < target_x and current_y < target_y:
return None
if current_x > target_x:
new_x0 = floor(current_x / 2)
new_x = new_x0 - ceil(target_x / 2)
new_width = target_x
else:
new_x = 0
new_width = current_x
if current_y > target_y:
new_y0 = floor(current_y / 2)
new_y = new_y0 - ceil(target_y / 2)
new_height = target_y
else:
new_y = 0
new_height = current_y
return (int(new_x), int(new_y), new_width, new_height) | 0.001857 |
def _read_mode_pocsp(self, size, kind):
"""Read Partial Order Connection Service Profile option.
Positional arguments:
* size - int, length of option
* kind - int, 10 (POC-Serv Profile)
Returns:
* dict -- extracted Partial Order Connection Service Profile (POC-SP) option
Structure of TCP POC-SP Option [RFC 1693][RFC 6247]:
1 bit 1 bit 6 bits
+----------+----------+------------+----------+--------+
| Kind=10 | Length=3 | Start_flag | End_flag | Filler |
+----------+----------+------------+----------+--------+
Octets Bits Name Description
0 0 tcp.pocsp.kind Kind (10)
1 8 tcp.pocsp.length Length (3)
2 16 tcp.pocsp.start Start Flag
2 17 tcp.pocsp.end End Flag
2 18 tcp.pocsp.filler Filler
"""
temp = self._read_binary(size)
data = dict(
kind=kind,
length=size,
start=True if int(temp[0]) else False,
end=True if int(temp[1]) else False,
filler=bytes(chr(int(temp[2:], base=2)), encoding='utf-8'),
)
return data | 0.002151 |
def sample_rwalk(args):
"""
Return a new live point proposed by random walking away from an
existing live point.
Parameters
----------
u : `~numpy.ndarray` with shape (npdim,)
Position of the initial sample. **This is a copy of an existing live
point.**
loglstar : float
Ln(likelihood) bound.
axes : `~numpy.ndarray` with shape (ndim, ndim)
Axes used to propose new points. For random walks new positions are
proposed using the :class:`~dynesty.bounding.Ellipsoid` whose
shape is defined by axes.
scale : float
Value used to scale the provided axes.
prior_transform : function
Function transforming a sample from the a unit cube to the parameter
space of interest according to the prior.
loglikelihood : function
Function returning ln(likelihood) given parameters as a 1-d `~numpy`
array of length `ndim`.
kwargs : dict
A dictionary of additional method-specific parameters.
Returns
-------
u : `~numpy.ndarray` with shape (npdim,)
Position of the final proposed point within the unit cube.
v : `~numpy.ndarray` with shape (ndim,)
Position of the final proposed point in the target parameter space.
logl : float
Ln(likelihood) of the final proposed point.
nc : int
Number of function calls used to generate the sample.
blob : dict
Collection of ancillary quantities used to tune :data:`scale`.
"""
# Unzipping.
(u, loglstar, axes, scale,
prior_transform, loglikelihood, kwargs) = args
rstate = np.random
# Periodicity.
nonperiodic = kwargs.get('nonperiodic', None)
# Setup.
n = len(u)
walks = kwargs.get('walks', 25) # number of steps
accept = 0
reject = 0
fail = 0
nfail = 0
nc = 0
ncall = 0
drhat, dr, du, u_prop, logl_prop = np.nan, np.nan, np.nan, np.nan, np.nan
while nc < walks or accept == 0:
while True:
# Check scale-factor.
if scale == 0.:
raise RuntimeError("The random walk sampling is stuck! "
"Some useful output quantities:\n"
"u: {0}\n"
"drhat: {1}\n"
"dr: {2}\n"
"du: {3}\n"
"u_prop: {4}\n"
"loglstar: {5}\n"
"logl_prop: {6}\n"
"axes: {7}\n"
"scale: {8}."
.format(u, drhat, dr, du, u_prop,
loglstar, logl_prop, axes, scale))
# Propose a direction on the unit n-sphere.
drhat = rstate.randn(n)
drhat /= linalg.norm(drhat)
# Scale based on dimensionality.
dr = drhat * rstate.rand()**(1./n)
# Transform to proposal distribution.
du = np.dot(axes, dr)
u_prop = u + scale * du
# Check unit cube constraints.
if unitcheck(u_prop, nonperiodic):
break
else:
fail += 1
nfail += 1
# Check if we're stuck generating bad numbers.
if fail > 100 * walks:
warnings.warn("Random number generation appears to be "
"extremely inefficient. Adjusting the "
"scale-factor accordingly.")
fail = 0
scale *= math.exp(-1. / n)
# Check proposed point.
v_prop = prior_transform(np.array(u_prop))
logl_prop = loglikelihood(np.array(v_prop))
if logl_prop >= loglstar:
u = u_prop
v = v_prop
logl = logl_prop
accept += 1
else:
reject += 1
nc += 1
ncall += 1
# Check if we're stuck generating bad points.
if nc > 50 * walks:
scale *= math.exp(-1. / n)
warnings.warn("Random walk proposals appear to be "
"extremely inefficient. Adjusting the "
"scale-factor accordingly.")
nc, accept, reject = 0, 0, 0 # reset values
blob = {'accept': accept, 'reject': reject, 'fail': nfail, 'scale': scale}
return u, v, logl, ncall, blob | 0.00022 |
def create_cache_cluster(CacheClusterId=None, ReplicationGroupId=None, AZMode=None, PreferredAvailabilityZone=None, PreferredAvailabilityZones=None, NumCacheNodes=None, CacheNodeType=None, Engine=None, EngineVersion=None, CacheParameterGroupName=None, CacheSubnetGroupName=None, CacheSecurityGroupNames=None, SecurityGroupIds=None, Tags=None, SnapshotArns=None, SnapshotName=None, PreferredMaintenanceWindow=None, Port=None, NotificationTopicArn=None, AutoMinorVersionUpgrade=None, SnapshotRetentionLimit=None, SnapshotWindow=None, AuthToken=None):
"""
Creates a cache cluster. All nodes in the cache cluster run the same protocol-compliant cache engine software, either Memcached or Redis.
See also: AWS API Documentation
:example: response = client.create_cache_cluster(
CacheClusterId='string',
ReplicationGroupId='string',
AZMode='single-az'|'cross-az',
PreferredAvailabilityZone='string',
PreferredAvailabilityZones=[
'string',
],
NumCacheNodes=123,
CacheNodeType='string',
Engine='string',
EngineVersion='string',
CacheParameterGroupName='string',
CacheSubnetGroupName='string',
CacheSecurityGroupNames=[
'string',
],
SecurityGroupIds=[
'string',
],
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
SnapshotArns=[
'string',
],
SnapshotName='string',
PreferredMaintenanceWindow='string',
Port=123,
NotificationTopicArn='string',
AutoMinorVersionUpgrade=True|False,
SnapshotRetentionLimit=123,
SnapshotWindow='string',
AuthToken='string'
)
:type CacheClusterId: string
:param CacheClusterId: [REQUIRED]
The node group (shard) identifier. This parameter is stored as a lowercase string.
Constraints:
A name must contain from 1 to 20 alphanumeric characters or hyphens.
The first character must be a letter.
A name cannot end with a hyphen or contain two consecutive hyphens.
:type ReplicationGroupId: string
:param ReplicationGroupId:
Warning
Due to current limitations on Redis (cluster mode disabled), this operation or parameter is not supported on Redis (cluster mode enabled) replication groups.
The ID of the replication group to which this cache cluster should belong. If this parameter is specified, the cache cluster is added to the specified replication group as a read replica; otherwise, the cache cluster is a standalone primary that is not part of any replication group.
If the specified replication group is Multi-AZ enabled and the Availability Zone is not specified, the cache cluster is created in Availability Zones that provide the best spread of read replicas across Availability Zones.
Note
This parameter is only valid if the Engine parameter is redis .
:type AZMode: string
:param AZMode: Specifies whether the nodes in this Memcached cluster are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region.
This parameter is only supported for Memcached cache clusters.
If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache assumes single-az mode.
:type PreferredAvailabilityZone: string
:param PreferredAvailabilityZone: The EC2 Availability Zone in which the cache cluster is created.
All nodes belonging to this Memcached cache cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use PreferredAvailabilityZones .
Default: System chosen Availability Zone.
:type PreferredAvailabilityZones: list
:param PreferredAvailabilityZones: A list of the Availability Zones in which cache nodes are created. The order of the zones in the list is not important.
This option is only supported on Memcached.
Note
If you are creating your cache cluster in an Amazon VPC (recommended) you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group.
The number of Availability Zones listed must equal the value of NumCacheNodes .
If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone instead, or repeat the Availability Zone multiple times in the list.
Default: System chosen Availability Zones.
(string) --
:type NumCacheNodes: integer
:param NumCacheNodes: The initial number of cache nodes that the cache cluster has.
For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 20.
If you need more than 20 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/ .
:type CacheNodeType: string
:param CacheNodeType: The compute and memory capacity of the nodes in the node group (shard).
Valid node types are as follows:
General purpose:
Current generation: cache.t2.micro , cache.t2.small , cache.t2.medium , cache.m3.medium , cache.m3.large , cache.m3.xlarge , cache.m3.2xlarge , cache.m4.large , cache.m4.xlarge , cache.m4.2xlarge , cache.m4.4xlarge , cache.m4.10xlarge
Previous generation: cache.t1.micro , cache.m1.small , cache.m1.medium , cache.m1.large , cache.m1.xlarge
Compute optimized: cache.c1.xlarge
Memory optimized:
Current generation: cache.r3.large , cache.r3.xlarge , cache.r3.2xlarge , cache.r3.4xlarge , cache.r3.8xlarge
Previous generation: cache.m2.xlarge , cache.m2.2xlarge , cache.m2.4xlarge
Notes:
All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).
Redis backup/restore is not supported for Redis (cluster mode disabled) T1 and T2 instances. Backup/restore is supported on Redis (cluster mode enabled) T2 instances.
Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.
For a complete listing of node types and specifications, see Amazon ElastiCache Product Features and Details and either Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis .
:type Engine: string
:param Engine: The name of the cache engine to be used for this cache cluster.
Valid values for this parameter are: memcached | redis
:type EngineVersion: string
:param EngineVersion: The version number of the cache engine to be used for this cache cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cache cluster or replication group and create it anew with the earlier engine version.
:type CacheParameterGroupName: string
:param CacheParameterGroupName: The name of the parameter group to associate with this cache cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has cluster-enabled='yes' when creating a cluster.
:type CacheSubnetGroupName: string
:param CacheSubnetGroupName: The name of the subnet group to be used for the cache cluster.
Use this parameter only when you are creating a cache cluster in an Amazon Virtual Private Cloud (Amazon VPC).
Warning
If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups .
:type CacheSecurityGroupNames: list
:param CacheSecurityGroupNames: A list of security group names to associate with this cache cluster.
Use this parameter only when you are creating a cache cluster outside of an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type SecurityGroupIds: list
:param SecurityGroupIds: One or more VPC security groups associated with the cache cluster.
Use this parameter only when you are creating a cache cluster in an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type Tags: list
:param Tags: A list of cost allocation tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value.
(dict) --A cost allocation Tag that can be added to an ElastiCache cluster or replication group. Tags are composed of a Key/Value pair. A tag with a null Value is permitted.
Key (string) --The key for the tag. May not be null.
Value (string) --The tag's value. May be null.
:type SnapshotArns: list
:param SnapshotArns: A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.
Note
This parameter is only valid if the Engine parameter is redis .
Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb
(string) --
:type SnapshotName: string
:param SnapshotName: The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to restoring while the new node group (shard) is being created.
Note
This parameter is only valid if the Engine parameter is redis .
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:
Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.
Valid values for ddd are:
sun
mon
tue
wed
thu
fri
sat
Example: sun:23:00-mon:01:30
:type Port: integer
:param Port: The port number on which each of the cache nodes accepts connections.
:type NotificationTopicArn: string
:param NotificationTopicArn: The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.
Note
The Amazon SNS topic owner must be the same as the cache cluster owner.
:type AutoMinorVersionUpgrade: boolean
:param AutoMinorVersionUpgrade: This parameter is currently disabled.
:type SnapshotRetentionLimit: integer
:param SnapshotRetentionLimit: The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot taken today is retained for 5 days before being deleted.
Note
This parameter is only valid if the Engine parameter is redis .
Default: 0 (i.e., automatic backups are disabled for this cache cluster).
:type SnapshotWindow: string
:param SnapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).
Example: 05:00-09:00
If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.
Note: This parameter is only valid if the Engine parameter is redis .
:type AuthToken: string
:param AuthToken:
Reserved parameter. The password used to access a password protected server.
Password constraints:
Must be only printable ASCII characters.
Must be at least 16 characters and no more than 128 characters in length.
Cannot contain any of the following characters: '/', ''', or '@'.
For more information, see AUTH password at Redis.
:rtype: dict
:return: {
'CacheCluster': {
'CacheClusterId': 'string',
'ConfigurationEndpoint': {
'Address': 'string',
'Port': 123
},
'ClientDownloadLandingPage': 'string',
'CacheNodeType': 'string',
'Engine': 'string',
'EngineVersion': 'string',
'CacheClusterStatus': 'string',
'NumCacheNodes': 123,
'PreferredAvailabilityZone': 'string',
'CacheClusterCreateTime': datetime(2015, 1, 1),
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'NumCacheNodes': 123,
'CacheNodeIdsToRemove': [
'string',
],
'EngineVersion': 'string',
'CacheNodeType': 'string'
},
'NotificationConfiguration': {
'TopicArn': 'string',
'TopicStatus': 'string'
},
'CacheSecurityGroups': [
{
'CacheSecurityGroupName': 'string',
'Status': 'string'
},
],
'CacheParameterGroup': {
'CacheParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'CacheNodeIdsToReboot': [
'string',
]
},
'CacheSubnetGroupName': 'string',
'CacheNodes': [
{
'CacheNodeId': 'string',
'CacheNodeStatus': 'string',
'CacheNodeCreateTime': datetime(2015, 1, 1),
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ParameterGroupStatus': 'string',
'SourceCacheNodeId': 'string',
'CustomerAvailabilityZone': 'string'
},
],
'AutoMinorVersionUpgrade': True|False,
'SecurityGroups': [
{
'SecurityGroupId': 'string',
'Status': 'string'
},
],
'ReplicationGroupId': 'string',
'SnapshotRetentionLimit': 123,
'SnapshotWindow': 'string'
}
}
:returns:
General purpose:
Current generation: cache.t2.micro , cache.t2.small , cache.t2.medium , cache.m3.medium , cache.m3.large , cache.m3.xlarge , cache.m3.2xlarge , cache.m4.large , cache.m4.xlarge , cache.m4.2xlarge , cache.m4.4xlarge , cache.m4.10xlarge
Previous generation: cache.t1.micro , cache.m1.small , cache.m1.medium , cache.m1.large , cache.m1.xlarge
Compute optimized: cache.c1.xlarge
Memory optimized:
Current generation: cache.r3.large , cache.r3.xlarge , cache.r3.2xlarge , cache.r3.4xlarge , cache.r3.8xlarge
Previous generation: cache.m2.xlarge , cache.m2.2xlarge , cache.m2.4xlarge
"""
pass | 0.005844 |
def to_dict(self):
'''Save this service port into a dictionary.'''
d = {'name': self.name}
if self.visible != True:
d[RTS_EXT_NS_YAML + 'visible'] = self.visible
if self.comment:
d[RTS_EXT_NS_YAML + 'comment'] = self.comment
props = []
for name in self.properties:
p = {'name': name}
if self.properties[name]:
p['value'] = str(self.properties[name])
props.append(p)
if props:
d[RTS_EXT_NS_YAML + 'properties'] = props
return d | 0.00519 |
def detachRequest(GmmCause_presence=0):
"""DETACH REQUEST Section 9.4.5"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0x5) # 00000101
c = DetachTypeAndForceToStandby()
packet = a / b / c
if GmmCause_presence is 1:
e = GmmCause(ieiGC=0x25)
packet = packet / e
return packet | 0.003175 |
def setViews(self, received, windowId=None):
'''
Sets L{self.views} to the received value splitting it into lines.
@type received: str
@param received: the string received from the I{View Server}
'''
if not received or received == "":
raise ValueError("received is empty")
self.views = []
''' The list of Views represented as C{str} obtained after splitting it into lines after being received from the server. Done by L{self.setViews()}. '''
self.__parseTree(received.split("\n"), windowId)
if DEBUG:
print >>sys.stderr, "there are %d views in this dump" % len(self.views) | 0.0059 |
def add_chassis(self, chassis):
"""
:param chassis: chassis object
"""
res = self._request(RestMethod.post, self.user_url, params={'ip': chassis.ip, 'port': chassis.port})
assert(res.status_code == 201) | 0.012346 |
def execute_loaders(self, env=None, silent=None, key=None, filename=None):
"""Execute all internal and registered loaders
:param env: The environment to load
:param silent: If loading erros is silenced
:param key: if provided load a single key
:param filename: optional custom filename to load
"""
if key is None:
default_loader(self, self._defaults)
env = (env or self.current_env).upper()
silent = silent or self.SILENT_ERRORS_FOR_DYNACONF
settings_loader(
self, env=env, silent=silent, key=key, filename=filename
)
self.load_extra_yaml(env, silent, key) # DEPRECATED
enable_external_loaders(self)
for loader in self.loaders:
self.logger.debug("Dynaconf executing: %s", loader.__name__)
loader.load(self, env, silent=silent, key=key)
self.load_includes(env, silent=silent, key=key)
self.logger.debug("Loaded Files: %s", deduplicate(self._loaded_files)) | 0.001938 |
def _address_rxp(self, addr):
""" Create a regex string for addresses, that matches several representations:
- with(out) '0x' prefix
- `pex` version
This function takes care of maintaining additional lookup keys for substring matches.
In case the given string is no address, it returns the original string.
"""
try:
addr = to_checksum_address(addr)
rxp = '(?:0x)?' + pex(address_checksum_and_decode(addr)) + f'(?:{addr.lower()[10:]})?'
self._extra_keys[pex(address_checksum_and_decode(addr))] = addr.lower()
self._extra_keys[addr[2:].lower()] = addr.lower()
except ValueError:
rxp = addr
return rxp | 0.009383 |
def create_course(self, courseid, init_content):
"""
:param courseid: the course id of the course
:param init_content: initial descriptor content
:raise InvalidNameException or CourseAlreadyExistsException
Create a new course folder and set initial descriptor content, folder can already exist
"""
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
course_fs = self.get_course_fs(courseid)
course_fs.ensure_exists()
if course_fs.exists("course.yaml") or course_fs.exists("course.json"):
raise CourseAlreadyExistsException("Course with id " + courseid + " already exists.")
else:
course_fs.put("course.yaml", get_json_or_yaml("course.yaml", init_content))
get_course_logger(courseid).info("Course %s created in the factory.", courseid) | 0.006557 |
def _coloredhelp(s):
""" Colorize the usage string for docopt
(ColorDocoptExit, docoptextras)
"""
newlines = []
bigindent = (' ' * 16)
in_opts = False
for line in s.split('\n'):
linestripped = line.strip('\n').strip().strip(':')
if linestripped == 'Usage':
# label
line = line.replace('Usage', str(C('Usage', **ARGS_LABEL)))
elif linestripped == 'Options':
line = line.replace('Options', str(C('Options', **ARGS_LABEL)))
in_opts = True
elif (':' in line) and (not line.startswith(bigindent)):
# opt,desc line. colorize it.
lineparts = line.split(':')
opt = lineparts[0]
vals = [lineparts[1]] if len(lineparts) == 2 else lineparts[1:]
# colorize opt
if ',' in opt:
opts = opt.split(',')
else:
opts = [opt]
optstr = ','.join(str(C(o, **ARGS_OPTIONS)) for o in opts)
# colorize desc
valstr = ':'.join(str(C(val, **ARGS_DESC)) for val in vals)
line = ':'.join((optstr, valstr))
elif in_opts and line.startswith(bigindent):
# continued desc string..
# Make any 'Default:Value' parts look the same as the opt,desc.
line = ':'.join(str(C(s, **ARGS_DESC)) for s in line.split(':'))
elif (not line.startswith(' ')):
# header line.
line = str(C(line, **ARGS_HEADER))
else:
# Everything else, usage mainly.
if SCRIPT:
line = line.replace(SCRIPT, str(C(SCRIPT, **ARGS_SCRIPT)))
newlines.append(
'{}{}'.format(line, C('', style='reset_all'))
)
return '\n'.join(newlines) | 0.000557 |
def mk_kwargs(cls, kwargs):
"""
Pop recognized arguments from a keyword list.
"""
ret = {}
kws = ['row_factory', 'body', 'parent']
for k in kws:
if k in kwargs:
ret[k] = kwargs.pop(k)
return ret | 0.007168 |
def profile_stats(adapter, threshold = 0.9):
"""
Compares the pairwise hamming distances for all the sample profiles in
the database. Returns a table of the number of distances within given
ranges.
Args:
adapter (MongoAdapter): Adapter to mongodb
threshold (float): If any distance is found above this threshold
a warning will be given, stating the two matching samples.
Returns:
distance_dict (dict): dictionary with ranges as keys, and the number
of distances that are within these ranges as values.
"""
profiles = []
samples = []
#Instatiate the distance dictionary with a count 0 for all the ranges
distance_dict = {key: 0 for key in HAMMING_RANGES.keys()}
for case in adapter.cases():
for individual in case['individuals']:
if individual.get('profile'):
#Make sample name <case_id>.<sample_id>
sample_id = f"{case['case_id']}.{individual['ind_id']}"
ind_profile = individual['profile']
#Numpy array to hold all the distances for this samples profile
distance_array = np.array([], dtype=np.float)
for sample, profile in zip(samples, profiles):
#Get distance and append to distance array
distance = compare_profiles(ind_profile, profile)
distance_array = np.append(distance_array, distance)
#Issue warning if above threshold
if distance >= threshold:
LOG.warning(f"{sample_id} is {distance} similar to {sample}")
#Check number of distances in each range and add to distance_dict
for key,range in HAMMING_RANGES.items():
#Calculate the number of hamming distances found within the
#range for current individual
distance_dict[key] += np.sum(
(distance_array >= range[0]) & (distance_array < range[1])
)
#Append profile and sample_id for this sample for the next
#iteration
profiles.append(ind_profile)
samples.append(sample_id)
return distance_dict | 0.007676 |
def Policy(self, data=None, subset=None):
"""{dynamic_docstring}"""
return self.factory.get_object(jssobjects.Policy, data, subset) | 0.013605 |
def GetEntries(self, parser_mediator, cache=None, database=None, **kwargs):
"""Extracts event objects from the database.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache (Optional[ESEDBCache]): cache.
database (Optional[pyesedb.file]): ESE database.
Raises:
ValueError: If the database attribute is not valid.
"""
if database is None:
raise ValueError('Invalid database.')
for table_name, callback_method in iter(self._tables.items()):
if parser_mediator.abort:
break
if not callback_method:
# Table names without a callback method are allowed to improve
# the detection of a database based on its table names.
continue
callback = getattr(self, callback_method, None)
if callback is None:
logger.warning(
'[{0:s}] missing callback method: {1:s} for table: {2:s}'.format(
self.NAME, callback_method, table_name))
continue
esedb_table = database.get_table_by_name(table_name)
if not esedb_table:
logger.warning('[{0:s}] missing table: {1:s}'.format(
self.NAME, table_name))
continue
# The database is passed in case the database contains table names
# that are assigned dynamically and cannot be defined by
# the table name-callback mechanism.
callback(
parser_mediator, cache=cache, database=database, table=esedb_table,
**kwargs) | 0.007653 |
def nested_tuple(container):
"""Recursively transform a container structure to a nested tuple.
The function understands container types inheriting from the selected abstract base
classes in `collections.abc`, and performs the following replacements:
`Mapping`
`tuple` of key-value pair `tuple`s. The order is preserved in the case of an
`OrderedDict`, otherwise the key-value pairs are sorted if orderable and
otherwise kept in the order of iteration.
`Sequence`
`tuple` containing the same elements in unchanged order.
`Container and Iterable and Sized` (equivalent to `Collection` in python >= 3.6)
`tuple` containing the same elements in sorted order if orderable and otherwise
kept in the order of iteration.
The function recurses into these container types to perform the same replacement,
and leaves objects of other types untouched.
The returned container is hashable if and only if all the values contained in the
original data structure are hashable.
Parameters
----------
container
Data structure to transform into a nested tuple.
Returns
-------
tuple
Nested tuple containing the same data as `container`.
"""
if isinstance(container, OrderedDict):
return tuple(map(nested_tuple, container.items()))
if isinstance(container, Mapping):
return tuple(sorted_if_possible(map(nested_tuple, container.items())))
if not isinstance(container, (str, bytes)):
if isinstance(container, Sequence):
return tuple(map(nested_tuple, container))
if (
isinstance(container, Container)
and isinstance(container, Iterable)
and isinstance(container, Sized)
):
return tuple(sorted_if_possible(map(nested_tuple, container)))
return container | 0.004251 |
def _frange(start, stop=None, step=None):
"""
_frange range like function for float inputs
:param start:
:type start:
:param stop:
:type stop:
:param step:
:type step:
:return:
:rtype:
"""
if stop is None:
stop = start
start = 0.0
if step is None:
step = 1.0
r = start
while r < stop:
yield r
r += step | 0.002488 |
def create_user_profile(sender, instance, created, **kwargs):
"""Create the UserProfile when a new User is saved"""
if created:
profile = UserProfile.objects.get_or_create(user=instance)[0]
profile.hash_pass = create_htpasswd(instance.hash_pass)
profile.save()
else:
# update password
try:
up = UserProfile.objects.get(user=instance.id)
up.hash_pass = create_htpasswd(instance.hash_pass)
up.save()
except AttributeError:
pass | 0.001876 |
def _install_packages(path, packages):
"""Install all packages listed to the target directory.
Ignores any package that includes Python itself and python-lambda as well
since its only needed for deploying and not running the code
:param str path:
Path to copy installed pip packages to.
:param list packages:
A list of packages to be installed via pip.
"""
def _filter_blacklist(package):
blacklist = ['-i', '#', 'Python==', 'python-lambda==']
return all(package.startswith(entry) is False for entry in blacklist)
filtered_packages = filter(_filter_blacklist, packages)
for package in filtered_packages:
if package.startswith('-e '):
package = package.replace('-e ', '')
print('Installing {package}'.format(package=package))
subprocess.check_call([sys.executable, '-m', 'pip', 'install', package, '-t', path, '--ignore-installed'])
print ('Install directory contents are now: {directory}'.format(directory=os.listdir(path))) | 0.003865 |
def disable_search_updates():
"""
Context manager used to temporarily disable auto_sync.
This is useful when performing bulk updates on objects - when
you may not want to flood the indexing process.
>>> with disable_search_updates():
... for obj in model.objects.all():
... obj.save()
The function works by temporarily removing the apps._on_model_save
signal handler from the model.post_save signal receivers, and then
restoring them after.
"""
_receivers = signals.post_save.receivers.copy()
signals.post_save.receivers = _strip_on_model_save()
yield
signals.post_save.receivers = _receivers | 0.001506 |
def parse_limit(limit_def):
"""Parse a structured flux limit definition as obtained from a YAML file
Returns a tuple of reaction, lower and upper bound.
"""
lower, upper = get_limits(limit_def)
reaction = limit_def.get('reaction')
return reaction, lower, upper | 0.003484 |
def load_config(from_key, to_key):
"""Load configuration from config.
Meant to run only once per system process as
class variable in subclasses."""
from .mappings import mappings
kbs = {}
for key, values in mappings['config'].iteritems():
parse_dict = {}
for mapping in values:
# {'inspire': 'Norwegian', 'cds': 'nno'}
# -> {"Norwegian": "nno"}
parse_dict[mapping[from_key]] = mapping[to_key]
kbs[key] = parse_dict
return kbs | 0.003534 |
def palette(self, label_im):
'''
Transfer the VOC color palette to an output mask for visualization.
'''
if label_im.ndim == 3:
label_im = label_im[0]
label = Image.fromarray(label_im, mode='P')
label.palette = copy.copy(self.palette)
return label | 0.006349 |
def ind_nodes(self, graph=None):
""" Returns a list of all nodes in the graph with no dependencies. """
if graph is None:
graph = self.graph
dependent_nodes = set(
node for dependents in six.itervalues(graph) for node in dependents
)
return [node for node in graph.keys() if node not in dependent_nodes] | 0.005435 |
def init(self, formula, incr=False):
"""
Initialize the internal SAT oracle. The oracle is used
incrementally and so it is initialized only once when
constructing an object of class :class:`RC2`. Given an
input :class:`.WCNF` formula, the method bootstraps the
oracle with its hard clauses. It also augments the soft
clauses with "fresh" selectors and adds them to the oracle
afterwards.
Optional input parameter ``incr`` (``False`` by default)
regulates whether or not Glucose's incremental mode [6]_
is turned on.
:param formula: input formula
:param incr: apply incremental mode of Glucose
:type formula: :class:`.WCNF`
:type incr: bool
"""
# creating a solver object
self.oracle = Solver(name=self.solver, bootstrap_with=formula.hard,
incr=incr, use_timer=True)
# adding soft clauses to oracle
for i, cl in enumerate(formula.soft):
selv = cl[0] # if clause is unit, selector variable is its literal
if len(cl) > 1:
self.topv += 1
selv = self.topv
cl.append(-self.topv)
self.oracle.add_clause(cl)
if selv not in self.wght:
# record selector and its weight
self.sels.append(selv)
self.wght[selv] = formula.wght[i]
self.smap[selv] = i
else:
# selector is not new; increment its weight
self.wght[selv] += formula.wght[i]
# storing the set of selectors
self.sels_set = set(self.sels)
# at this point internal and external variables are the same
for v in range(1, formula.nv + 1):
self.vmap.e2i[v] = v
self.vmap.i2e[v] = v
if self.verbose > 1:
print('c formula: {0} vars, {1} hard, {2} soft'.format(formula.nv,
len(formula.hard), len(formula.soft))) | 0.001913 |
def _get_framed(self, buf, offset, insert_payload):
"""Returns the framed message and updates the CRC.
"""
header_offset = offset + self._header_len
self.length = insert_payload(buf, header_offset, self.payload)
struct.pack_into(self._header_fmt,
buf,
offset,
self.preamble,
self.msg_type,
self.sender,
self.length)
crc_offset = header_offset + self.length
preamble_bytes = 1
crc_over_len = self._header_len + self.length - preamble_bytes
self.crc = crc16jit(buf, offset+1, 0, crc_over_len)
struct.pack_into(self._crc_fmt, buf, crc_offset, self.crc)
length = preamble_bytes + crc_over_len + self._crc_len
return length | 0.001261 |
def find_nearest(sorted_list, x):
"""
Find the nearest item of x from sorted array.
:type array: list
:param array: an iterable object that support inex
:param x: a comparable value
note: for finding the nearest item from a descending array, I recommend
find_nearest(sorted_list[::-1], x). Because the built-in list[::-1] method
is super fast.
Usage::
>>> find_nearest([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 5.1)
5
**中文文档**
在正序数组中, 返回最接近x的数。
"""
if x <= sorted_list[0]:
return sorted_list[0]
elif x >= sorted_list[-1]:
return sorted_list[-1]
else:
lower = find_le(sorted_list, x)
upper = find_ge(sorted_list, x)
if (x - lower) > (upper - x):
return upper
else:
return lower | 0.001218 |
def multi_path_generator(pathnames):
"""
yields (name,chunkgen) for all of the files found under the list
of pathnames given. This is recursive, so directories will have
their contents emitted. chunkgen is a function that can called and
iterated over to obtain the contents of the file in multiple
reads.
"""
for pathname in pathnames:
if isdir(pathname):
for entry in directory_generator(pathname):
yield entry
else:
yield pathname, file_chunk(pathname) | 0.001842 |
def _run_single(self, thread_id, agent, environment, deterministic=False,
max_episode_timesteps=-1, episode_finished=None, testing=False, sleep=None):
"""
The target function for a thread, runs an agent and environment until signaled to stop.
Adds rewards to shared episode rewards list.
Args:
thread_id (int): The ID of the thread that's running this target function.
agent (Agent): The Agent object that this particular thread uses.
environment (Environment): The Environment object that this particular thread uses.
max_episode_timesteps (int): Max. number of timesteps per episode. Use -1 or 0 for non-limited episodes.
episode_finished (callable): Function called after each episode that takes an episode summary spec and
returns False, if this single run should terminate after this episode.
Can be used e.g. to set a particular mean reward threshold.
"""
# figure out whether we are using the deprecated way of "episode_finished" reporting
old_episode_finished = False
if episode_finished is not None and len(getargspec(episode_finished).args) == 1:
old_episode_finished = True
episode = 0
# Run this single worker (episode loop) as long as global count thresholds have not been reached.
while not self.should_stop:
state = environment.reset()
agent.reset()
self.global_timestep, self.global_episode = agent.timestep, agent.episode
episode_reward = 0
# Time step (within episode) loop
time_step = 0
time_start = time.time()
while True:
action, internals, states = agent.act(states=state, deterministic=deterministic, buffered=False)
reward = 0
for repeat in xrange(self.repeat_actions):
state, terminal, step_reward = environment.execute(action=action)
reward += step_reward
if terminal:
break
if not testing:
# agent.observe(reward=reward, terminal=terminal)
# Insert everything at once.
agent.atomic_observe(
states=state,
actions=action,
internals=internals,
reward=reward,
terminal=terminal
)
if sleep is not None:
time.sleep(sleep)
time_step += 1
episode_reward += reward
if terminal or time_step == max_episode_timesteps:
break
# Abort the episode (discard its results) when global says so.
if self.should_stop:
return
self.global_timestep += time_step
# Avoid race condition where order in episode_rewards won't match order in episode_timesteps.
self.episode_list_lock.acquire()
self.episode_rewards.append(episode_reward)
self.episode_timesteps.append(time_step)
self.episode_times.append(time.time() - time_start)
self.episode_list_lock.release()
if episode_finished is not None:
# old way of calling episode_finished
if old_episode_finished:
summary_data = {
"thread_id": thread_id,
"episode": episode,
"timestep": time_step,
"episode_reward": episode_reward
}
if not episode_finished(summary_data):
return
# New way with BasicRunner (self) and thread-id.
elif not episode_finished(self, thread_id):
return
episode += 1 | 0.004215 |
def prepare_encoder(inputs, hparams, attention_type="local_1d"):
"""Prepare encoder for images."""
x = prepare_image(inputs, hparams, name="enc_channels")
# Add position signals.
x = add_pos_signals(x, hparams, "enc_pos")
x_shape = common_layers.shape_list(x)
if attention_type == "local_1d":
x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], hparams.hidden_size])
x.set_shape([None, None, hparams.hidden_size])
elif attention_type == "local_2d":
x.set_shape([None, None, None, hparams.hidden_size])
return x | 0.016667 |
def obfuscate(module, tokens, options, name_generator=None, table=None):
"""
Obfuscates *tokens* in-place. *options* is expected to be the options
variable passed through from pyminifier.py.
*module* must be the name of the module we're currently obfuscating
If *name_generator* is provided it will be used to obtain replacement values
for identifiers. If not, a new instance of
If *table* is given (should be a list containing a single dictionary), it
will be used to perform lookups of replacements and any new replacements
will be added to it.
"""
# Need a universal instance of our generator to avoid duplicates
identifier_length = int(options.replacement_length)
ignore_length = False
if not name_generator:
if options.use_nonlatin:
ignore_length = True
if sys.version_info[0] == 3:
name_generator = obfuscation_machine(
use_unicode=True, identifier_length=identifier_length)
else:
print(
"ERROR: You can't use nonlatin characters without Python 3")
else:
name_generator = obfuscation_machine(
identifier_length=identifier_length)
if options.obfuscate:
variables = find_obfuscatables(
tokens, obfuscatable_variable, ignore_length=ignore_length)
classes = find_obfuscatables(
tokens, obfuscatable_class)
functions = find_obfuscatables(
tokens, obfuscatable_function)
for variable in variables:
replace_obfuscatables(
module,
tokens,
obfuscate_variable,
variable,
name_generator,
table
)
for function in functions:
replace_obfuscatables(
module,
tokens,
obfuscate_function,
function,
name_generator,
table
)
for _class in classes:
replace_obfuscatables(
module, tokens, obfuscate_class, _class, name_generator, table)
obfuscate_global_import_methods(module, tokens, name_generator, table)
obfuscate_builtins(module, tokens, name_generator, table)
else:
if options.obf_classes:
classes = find_obfuscatables(
tokens, obfuscatable_class)
for _class in classes:
replace_obfuscatables(
module,
tokens,
obfuscate_class,
_class,
name_generator,
table
)
if options.obf_functions:
functions = find_obfuscatables(
tokens, obfuscatable_function)
for function in functions:
replace_obfuscatables(
module,
tokens,
obfuscate_function,
function,
name_generator,
table
)
if options.obf_variables:
variables = find_obfuscatables(
tokens, obfuscatable_variable)
for variable in variables:
replace_obfuscatables(
module,
tokens,
obfuscate_variable,
variable,
name_generator,
table
)
if options.obf_import_methods:
obfuscate_global_import_methods(
module, tokens, name_generator, table)
if options.obf_builtins:
obfuscate_builtins(module, tokens, name_generator, table) | 0.00079 |
def put(self, item):
"""Adds the passed in item object to the queue and calls :func:`flush` if the size of the queue is larger
than :func:`max_queue_length`. This method does nothing if the passed in item is None.
Args:
item (:class:`contracts.Envelope`) item the telemetry envelope object to send to the service.
"""
if not item:
return
self._queue.put(item)
if self._queue.qsize() >= self._max_queue_length:
self.flush() | 0.009709 |
def execute(self, conn, acquisition_era_name,end_date, transaction = False):
"""
for a given block_id
"""
if not conn:
dbsExceptionHandler("dbsException-failed-connect2host", "dbs/dao/Oracle/AcquisitionEra/updateEndDate expects db connection from upper layer.", self.logger.exception)
binds = { "acquisition_era_name" :acquisition_era_name , "end_date" : end_date }
result = self.dbi.processData(self.sql, binds, conn, transaction) | 0.039583 |
def __deserialize_model(self, data, klass):
"""
Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
if not klass.swagger_types:
return data
kwargs = {}
for attr, attr_type in iteritems(klass.swagger_types):
if data is not None \
and klass.attribute_map[attr] in data \
and isinstance(data, (list, dict)):
value = data[klass.attribute_map[attr]]
kwargs[attr] = self.__deserialize(value, attr_type)
instance = klass(**kwargs)
return instance | 0.004367 |
def getitem(self, index, context=None):
"""Get an item from this node if subscriptable.
:param index: The node to use as a subscript index.
:type index: Const or Slice
:raises AstroidTypeError: When the given index cannot be used as a
subscript index, or if this node is not subscriptable.
"""
if isinstance(index, Const):
index_value = index.value
elif isinstance(index, Slice):
index_value = _infer_slice(index, context=context)
else:
raise exceptions.AstroidTypeError(
"Could not use type {} as subscript index".format(type(index))
)
try:
if isinstance(self.value, (str, bytes)):
return Const(self.value[index_value])
except IndexError as exc:
raise exceptions.AstroidIndexError(
message="Index {index!r} out of range",
node=self,
index=index,
context=context,
) from exc
except TypeError as exc:
raise exceptions.AstroidTypeError(
message="Type error {error!r}", node=self, index=index, context=context
) from exc
raise exceptions.AstroidTypeError("%r (value=%s)" % (self, self.value)) | 0.002269 |
def _translate_glob(pat):
"""Translate a glob PATTERN to a regular expression."""
translated_parts = []
for part in _iexplode_path(pat):
translated_parts.append(_translate_glob_part(part))
os_sep_class = '[%s]' % re.escape(SEPARATORS)
res = _join_translated(translated_parts, os_sep_class)
return '{res}\\Z(?ms)'.format(res=res) | 0.002778 |
def Union(self, mask1, mask2):
"""Merges mask1 and mask2 into this FieldMask."""
_CheckFieldMaskMessage(mask1)
_CheckFieldMaskMessage(mask2)
tree = _FieldMaskTree(mask1)
tree.MergeFromFieldMask(mask2)
tree.ToFieldMask(self) | 0.004049 |
def save_objective(self, objective_form, *args, **kwargs):
"""Pass through to provider ObjectiveAdminSession.update_objective"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.update_resource
if objective_form.is_for_update():
return self.update_objective(objective_form, *args, **kwargs)
else:
return self.create_objective(objective_form, *args, **kwargs) | 0.004425 |
def _sync_content_metadata(self, serialized_data, http_method):
"""
Synchronize content metadata using the Degreed course content API.
Args:
serialized_data: JSON-encoded object containing content metadata.
http_method: The HTTP method to use for the API request.
Raises:
ClientError: If Degreed API request fails.
"""
try:
status_code, response_body = getattr(self, '_' + http_method)(
urljoin(self.enterprise_configuration.degreed_base_url, self.global_degreed_config.course_api_path),
serialized_data,
self.CONTENT_PROVIDER_SCOPE
)
except requests.exceptions.RequestException as exc:
raise ClientError(
'DegreedAPIClient request failed: {error} {message}'.format(
error=exc.__class__.__name__,
message=str(exc)
)
)
if status_code >= 400:
raise ClientError(
'DegreedAPIClient request failed with status {status_code}: {message}'.format(
status_code=status_code,
message=response_body
)
) | 0.003185 |
def set_client_cert(self, cert):
"""*Sets the client cert for the requests.*
The cert is either a path to a .pem file, or a JSON array, or a list
having the cert path and the key path.
Values ``null`` and ``${None}`` can be used for clearing the cert.
*Examples*
| `Set Client Cert` | ${CURDIR}/client.pem |
| `Set Client Cert` | ["${CURDIR}/client.cert", "${CURDIR}/client.key"] |
| `Set Client Cert` | ${paths_list} |
"""
self.request["cert"] = self._input_client_cert(cert)
return self.request["cert"] | 0.005034 |
def clear():
"""
Clear all data on the local server. Useful for debugging purposed.
"""
utils.check_for_local_server()
click.confirm(
"Are you sure you want to do this? It will delete all of your data",
abort=True
)
server = Server(config["local_server"]["url"])
for db_name in all_dbs:
del server[db_name] | 0.002762 |
def ReleaseSW(self):
' Go away from Limit Switch '
while self.ReadStatusBit(2) == 1: # is Limit Switch ON ?
spi.SPI_write(self.CS, [0x92, 0x92] | (~self.Dir & 1)) # release SW
while self.IsBusy():
pass
self.MoveWait(10) | 0.013245 |
def generate_xml(self):
"""Generates an XML-formatted report for a single binding site"""
report = et.Element('bindingsite')
identifiers = et.SubElement(report, 'identifiers')
longname = et.SubElement(identifiers, 'longname')
ligtype = et.SubElement(identifiers, 'ligtype')
hetid = et.SubElement(identifiers, 'hetid')
chain = et.SubElement(identifiers, 'chain')
position = et.SubElement(identifiers, 'position')
composite = et.SubElement(identifiers, 'composite')
members = et.SubElement(identifiers, 'members')
smiles = et.SubElement(identifiers, 'smiles')
inchikey = et.SubElement(identifiers, 'inchikey')
# Ligand properties. Number of (unpaired) functional atoms and rings.
lig_properties = et.SubElement(report, 'lig_properties')
num_heavy_atoms = et.SubElement(lig_properties, 'num_heavy_atoms')
num_hbd = et.SubElement(lig_properties, 'num_hbd')
num_hbd.text = str(self.ligand.num_hbd)
num_unpaired_hbd = et.SubElement(lig_properties, 'num_unpaired_hbd')
num_unpaired_hbd.text = str(self.complex.num_unpaired_hbd)
num_hba = et.SubElement(lig_properties, 'num_hba')
num_hba.text = str(self.ligand.num_hba)
num_unpaired_hba = et.SubElement(lig_properties, 'num_unpaired_hba')
num_unpaired_hba.text = str(self.complex.num_unpaired_hba)
num_hal = et.SubElement(lig_properties, 'num_hal')
num_hal.text = str(self.ligand.num_hal)
num_unpaired_hal = et.SubElement(lig_properties, 'num_unpaired_hal')
num_unpaired_hal.text = str(self.complex.num_unpaired_hal)
num_aromatic_rings = et.SubElement(lig_properties, 'num_aromatic_rings')
num_aromatic_rings.text = str(self.ligand.num_rings)
num_rot_bonds = et.SubElement(lig_properties, 'num_rotatable_bonds')
num_rot_bonds.text = str(self.ligand.num_rot_bonds)
molweight = et.SubElement(lig_properties, 'molweight')
molweight.text = str(self.ligand.molweight)
logp = et.SubElement(lig_properties, 'logp')
logp.text = str(self.ligand.logp)
ichains = et.SubElement(report, 'interacting_chains')
bsresidues = et.SubElement(report, 'bs_residues')
for i, ichain in enumerate(self.interacting_chains):
c = et.SubElement(ichains, 'interacting_chain', id=str(i + 1))
c.text = ichain
for i, bsres in enumerate(self.bs_res):
contact = 'True' if bsres in self.bs_res_interacting else 'False'
distance = '%.1f' % self.min_dist[bsres][0]
aatype = self.min_dist[bsres][1]
c = et.SubElement(bsresidues, 'bs_residue', id=str(i + 1), contact=contact, min_dist=distance, aa=aatype)
c.text = bsres
hetid.text, chain.text, position.text = self.ligand.hetid, self.ligand.chain, str(self.ligand.position)
composite.text = 'True' if len(self.lig_members) > 1 else 'False'
longname.text = self.longname
ligtype.text = self.ligtype
smiles.text = self.ligand.smiles
inchikey.text = self.ligand.inchikey
num_heavy_atoms.text = str(self.ligand.heavy_atoms) # Number of heavy atoms in ligand
for i, member in enumerate(self.lig_members):
bsid = ":".join(str(element) for element in member)
m = et.SubElement(members, 'member', id=str(i + 1))
m.text = bsid
interactions = et.SubElement(report, 'interactions')
def format_interactions(element_name, features, interaction_information):
"""Returns a formatted element with interaction information."""
interaction = et.Element(element_name)
# Sort results first by res number, then by distance and finally ligand coordinates to get a unique order
interaction_information = sorted(interaction_information, key=itemgetter(0, 2, -2))
for j, single_contact in enumerate(interaction_information):
if not element_name == 'metal_complexes':
new_contact = et.SubElement(interaction, element_name[:-1], id=str(j + 1))
else: # Metal Complex[es]
new_contact = et.SubElement(interaction, element_name[:-2], id=str(j + 1))
for i, feature in enumerate(single_contact):
# Just assign the value unless it's an atom list, use subelements in this case
if features[i] == 'LIG_IDX_LIST':
feat = et.SubElement(new_contact, features[i].lower())
for k, atm_idx in enumerate(feature.split(',')):
idx = et.SubElement(feat, 'idx', id=str(k + 1))
idx.text = str(atm_idx)
elif features[i].endswith('COO'):
feat = et.SubElement(new_contact, features[i].lower())
xc, yc, zc = feature
xcoo = et.SubElement(feat, 'x')
xcoo.text = '%.3f' % xc
ycoo = et.SubElement(feat, 'y')
ycoo.text = '%.3f' % yc
zcoo = et.SubElement(feat, 'z')
zcoo.text = '%.3f' % zc
else:
feat = et.SubElement(new_contact, features[i].lower())
feat.text = str(feature)
return interaction
interactions.append(format_interactions('hydrophobic_interactions', self.hydrophobic_features,
self.hydrophobic_info))
interactions.append(format_interactions('hydrogen_bonds', self.hbond_features, self.hbond_info))
interactions.append(format_interactions('water_bridges', self.waterbridge_features, self.waterbridge_info))
interactions.append(format_interactions('salt_bridges', self.saltbridge_features, self.saltbridge_info))
interactions.append(format_interactions('pi_stacks', self.pistacking_features, self.pistacking_info))
interactions.append(format_interactions('pi_cation_interactions', self.pication_features, self.pication_info))
interactions.append(format_interactions('halogen_bonds', self.halogen_features, self.halogen_info))
interactions.append(format_interactions('metal_complexes', self.metal_features, self.metal_info))
# Mappings
mappings = et.SubElement(report, 'mappings')
smiles_to_pdb = et.SubElement(mappings, 'smiles_to_pdb') # SMILES numbering to PDB file numbering (atoms)
bsid = ':'.join([self.ligand.hetid, self.ligand.chain, str(self.ligand.position)])
if self.ligand.atomorder is not None:
smiles_to_pdb_map = [(key, self.ligand.Mapper.mapid(self.ligand.can_to_pdb[key],
mtype='protein', bsid=bsid)) for key in self.ligand.can_to_pdb]
smiles_to_pdb.text = ','.join([str(mapping[0])+':'+str(mapping[1]) for mapping in smiles_to_pdb_map])
else:
smiles_to_pdb.text = ''
return report | 0.003485 |
def process_startup():
"""Call this at Python startup to perhaps measure coverage.
If the environment variable COVERAGE_PROCESS_START is defined, coverage
measurement is started. The value of the variable is the config file
to use.
There are two ways to configure your Python installation to invoke this
function when Python starts:
#. Create or append to sitecustomize.py to add these lines::
import coverage
coverage.process_startup()
#. Create a .pth file in your Python installation containing::
import coverage; coverage.process_startup()
"""
cps = os.environ.get("COVERAGE_PROCESS_START")
if cps:
cov = coverage(config_file=cps, auto_data=True)
cov.start()
cov._warn_no_data = False
cov._warn_unimported_source = False | 0.001202 |
def to_content_range_header(self, length):
"""Converts the object into `Content-Range` HTTP header,
based on given length
"""
range_for_length = self.range_for_length(length)
if range_for_length is not None:
return "%s %d-%d/%d" % (
self.units,
range_for_length[0],
range_for_length[1] - 1,
length,
)
return None | 0.004464 |
def _fix_set_options(cls, options):
"""Alter the set options from None/strings to sets in place."""
optional_set_options = ('ignore', 'select')
mandatory_set_options = ('add_ignore', 'add_select')
def _get_set(value_str):
"""Split `value_str` by the delimiter `,` and return a set.
Removes any occurrences of '' in the set.
Also expand error code prefixes, to avoid doing this for every
file.
"""
return cls._expand_error_codes(set(value_str.split(',')) - {''})
for opt in optional_set_options:
value = getattr(options, opt)
if value is not None:
setattr(options, opt, _get_set(value))
for opt in mandatory_set_options:
value = getattr(options, opt)
if value is None:
value = ''
if not isinstance(value, Set):
value = _get_set(value)
setattr(options, opt, value)
return options | 0.001936 |
def list_resource_groups(access_token, subscription_id):
'''List the resource groups in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/',
'?api-version=', RESOURCE_API])
return do_get(endpoint, access_token) | 0.001901 |
def main(notify, hour, minute):
"""Runs billing report. Optionally sends notifications to billing"""
# Read the config file and get the admin context
config_opts = ['--config-file', '/etc/neutron/neutron.conf']
config.init(config_opts)
# Have to load the billing module _after_ config is parsed so
# that we get the right network strategy
network_strategy.STRATEGY.load()
billing.PUBLIC_NETWORK_ID = network_strategy.STRATEGY.get_public_net_id()
config.setup_logging()
context = neutron_context.get_admin_context()
# A query to get all IPAddress objects from the db
query = context.session.query(models.IPAddress)
(period_start, period_end) = billing.calc_periods(hour, minute)
full_day_ips = billing.build_full_day_ips(query,
period_start,
period_end)
partial_day_ips = billing.build_partial_day_ips(query,
period_start,
period_end)
if notify:
# '==================== Full Day ============================='
for ipaddress in full_day_ips:
click.echo('start: {}, end: {}'.format(period_start, period_end))
payload = billing.build_payload(ipaddress,
billing.IP_EXISTS,
start_time=period_start,
end_time=period_end)
billing.do_notify(context,
billing.IP_EXISTS,
payload)
# '==================== Part Day ============================='
for ipaddress in partial_day_ips:
click.echo('start: {}, end: {}'.format(period_start, period_end))
payload = billing.build_payload(ipaddress,
billing.IP_EXISTS,
start_time=ipaddress.allocated_at,
end_time=period_end)
billing.do_notify(context,
billing.IP_EXISTS,
payload)
else:
click.echo('Case 1 ({}):\n'.format(len(full_day_ips)))
for ipaddress in full_day_ips:
pp(billing.build_payload(ipaddress,
billing.IP_EXISTS,
start_time=period_start,
end_time=period_end))
click.echo('\n===============================================\n')
click.echo('Case 2 ({}):\n'.format(len(partial_day_ips)))
for ipaddress in partial_day_ips:
pp(billing.build_payload(ipaddress,
billing.IP_EXISTS,
start_time=ipaddress.allocated_at,
end_time=period_end)) | 0.000332 |
def _extract_buffers(obj, threshold=MAX_BYTES):
"""Extract buffers larger than a certain threshold."""
buffers = []
if isinstance(obj, CannedObject) and obj.buffers:
for i, buf in enumerate(obj.buffers):
nbytes = _nbytes(buf)
if nbytes > threshold:
# buffer larger than threshold, prevent pickling
obj.buffers[i] = None
buffers.append(buf)
# buffer too small for separate send, coerce to bytes
# because pickling buffer objects just results in broken pointers
elif isinstance(buf, memoryview):
obj.buffers[i] = buf.tobytes()
elif isinstance(buf, buffer):
obj.buffers[i] = bytes(buf)
return buffers | 0.001294 |
def StopTiming(self, profile_name):
"""Stops timing CPU time.
Args:
profile_name (str): name of the profile to sample.
"""
measurements = self._profile_measurements.get(profile_name)
if measurements:
measurements.SampleStop()
sample = '{0:f}\t{1:s}\t{2:f}\n'.format(
measurements.start_sample_time, profile_name,
measurements.total_cpu_time)
self._WritesString(sample) | 0.009217 |
def color_array_by_hue_mix(value, palette):
"""
Figure out the appropriate color for a binary string value by averaging
the colors corresponding the indices of each one that it contains. Makes
for visualizations that intuitively show patch overlap.
"""
if int(value, 2) > 0:
# Convert bits to list and reverse order to avoid issues with
# differing lengths
int_list = [int(i) for i in list(value[2:])]
int_list.reverse()
# since this is a 1D array, we need the zeroth elements
# of np.nonzero.
locs = np.nonzero(int_list)[0]
# print(locs)
# print(palette)
rgb_vals = [palette[i] for i in locs]
rgb = [0]*len(rgb_vals[0]) # We don't know if it's rgb or rgba
for val in rgb_vals:
for index in range(len(val)):
rgb[index] += val[index]
for i in range(len(rgb)):
rgb[i] /= len(locs)
return tuple(rgb)
if int(value, 2) == 0:
return (1, 1, 1) if len(palette[0]) == 3 else (1, 1, 1, 1)
return -1 | 0.000919 |
def runExperimentPool(numObjects,
numLocations,
numFeatures,
numColumns,
networkType=["MultipleL4L2Columns"],
longDistanceConnectionsRange = [0.0],
numWorkers=7,
nTrials=1,
pointRange=1,
numPoints=10,
numInferenceRpts=1,
l2Params=None,
l4Params=None,
resultsName="convergence_results.pkl"):
"""
Allows you to run a number of experiments using multiple processes.
For each parameter except numWorkers, pass in a list containing valid values
for that parameter. The cross product of everything is run, and each
combination is run nTrials times.
Returns a list of dict containing detailed results from each experiment.
Also pickles and saves the results in resultsName for later analysis.
Example:
results = runExperimentPool(
numObjects=[10],
numLocations=[5],
numFeatures=[5],
numColumns=[2,3,4,5,6],
numWorkers=8,
nTrials=5)
"""
# Create function arguments for every possibility
args = []
for c in reversed(numColumns):
for o in reversed(numObjects):
for l in numLocations:
for f in numFeatures:
for n in networkType:
for p in longDistanceConnectionsRange:
for t in range(nTrials):
args.append(
{"numObjects": o,
"numLocations": l,
"numFeatures": f,
"numColumns": c,
"trialNum": t,
"pointRange": pointRange,
"numPoints": numPoints,
"networkType" : n,
"longDistanceConnections" : p,
"plotInferenceStats": False,
"settlingTime": 3,
"numInferenceRpts": numInferenceRpts,
"l2Params": l2Params,
"l4Params": l4Params
}
)
print "{} experiments to run, {} workers".format(len(args), numWorkers)
# Run the pool
if numWorkers > 1:
pool = Pool(processes=numWorkers)
result = pool.map(runExperiment, args)
else:
result = []
for arg in args:
result.append(runExperiment(arg))
# print "Full results:"
# pprint.pprint(result, width=150)
# Pickle results for later use
with open(resultsName,"wb") as f:
cPickle.dump(result,f)
return result | 0.009215 |
def list_load_balancers(access_token, subscription_id):
'''List the load balancers in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of load balancer list with properties.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Network/',
'/loadBalancers?api-version=', NETWORK_API])
return do_get(endpoint, access_token) | 0.001669 |
def checkArgs(args):
"""Checks the arguments and options.
:param args: an object containing the options of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1.
.. note::
Only one operation for markers and one operation for samples can be
done at a time. Hence, one of ``--exclude`` or ``--extract`` can be
done for markers, and one of ``--remove`` or ``--keep`` can be done for
samples.
"""
# Check the input files
if not args.is_bfile and not args.is_tfile and not args.is_file:
msg = "needs one input file type (--is-bfile, --is-tfile or --is-file)"
raise ProgramError(msg)
if args.is_bfile and not args.is_tfile and not args.is_file:
for fileName in [args.ifile + i for i in [".bed", ".bim", ".fam"]]:
if not os.path.isfile(fileName):
msg = "{}: no such file".format(fileName)
raise ProgramError(msg)
elif args.is_tfile and not args.is_bfile and not args.is_file:
for fileName in [args.ifile + i for i in [".tped", ".tfam"]]:
if not os.path.isfile(fileName):
msg = "{}: no such file".format(fileName)
raise ProgramError(msg)
elif args.is_file and not args.is_bfile and not args.is_tfile:
for fileName in [args.ifile + i for i in [".ped", ".map"]]:
if not os.path.isfile(fileName):
msg = "{}: no such file". format(fileName)
raise ProgramError(msg)
else:
msg = ("needs only one input file type (--is-bfile, --is-tfile or "
"--is-file)")
raise ProgramError(msg)
# Check that we have at least one of exclude, extract remove or keep
if args.exclude is None and args.extract is None and \
args.remove is None and args.keep is None:
msg = "needs at least one of --exclude, --extract, --remove or --keep"
raise ProgramError(msg)
# Check for SNPs
if args.exclude is not None and args.extract is None:
if not os.path.isfile(args.exclude):
msg = "{}: no such file".format(args.exclude)
raise ProgramError(msg)
elif args.extract is not None and args.exclude is None:
if not os.path.isfile(args.extract):
msg = "{}: no such file".format(args.extract)
raise ProgramError(msg)
elif args.exclude is not None and args.extract is not None:
msg = "use only one of --extract or --exclude"
raise ProgramError(msg)
# Check for samples
if args.remove is not None and args.keep is None:
if not os.path.isfile(args.remove):
msg = "{}: no such file".format(args.remove)
raise ProgramError(msg)
elif args.keep is not None and args.remove is None:
if not os.path.isfile(args.keep):
msg = "{}: no such file".format(args.keep)
raise ProgramError(msg)
elif args.remove is not None and args.keep is not None:
msg = "use only one of --keep or --remove"
raise ProgramError(msg)
return True | 0.000301 |
def session_state_view(request, template_name, **kwargs):
'Example view that exhibits the use of sessions to store state'
session = request.session
demo_count = session.get('django_plotly_dash', {})
ind_use = demo_count.get('ind_use', 0)
ind_use += 1
demo_count['ind_use'] = ind_use
context = {'ind_use' : ind_use}
session['django_plotly_dash'] = demo_count
return render(request, template_name=template_name, context=context) | 0.004274 |
def detectCustomImportPaths(self, prefix):
"""
Some prefixes does not reflect provider prefix
e.g. camlistore.org/pkg/googlestorage is actually at
github.com/camlistore/camlistore repository under
pkg/googlestorage directory.
"""
for assignment in self.ip2pp_mapping:
if prefix.startswith(assignment["ipprefix"]):
return {"prefix": assignment["ipprefix"], "provider_prefix": assignment["provider_prefix"]}
return {} | 0.029613 |
def calculate_dc_coefficients(contour):
"""Calculate the :math:`A_0` and :math:`C_0` coefficients of the elliptic Fourier series.
:param numpy.ndarray contour: A contour array of size ``[M x 2]``.
:return: The :math:`A_0` and :math:`C_0` coefficients.
:rtype: tuple
"""
dxy = np.diff(contour, axis=0)
dt = np.sqrt((dxy ** 2).sum(axis=1))
t = np.concatenate([([0., ]), np.cumsum(dt)])
T = t[-1]
xi = np.cumsum(dxy[:, 0]) - (dxy[:, 0] / dt) * t[1:]
A0 = (1 / T) * np.sum(((dxy[:, 0] / (2 * dt)) * np.diff(t ** 2)) + xi * dt)
delta = np.cumsum(dxy[:, 1]) - (dxy[:, 1] / dt) * t[1:]
C0 = (1 / T) * np.sum(((dxy[:, 1] / (2 * dt)) * np.diff(t ** 2)) + delta * dt)
# A0 and CO relate to the first point of the contour array as origin.
# Adding those values to the coefficients to make them relate to true origin.
return contour[0, 0] + A0, contour[0, 1] + C0 | 0.004353 |
def domain(self, expparams):
"""
Returns a list of :class:`Domain` objects, one for each input expparam.
:param numpy.ndarray expparams: Array of experimental parameters. This
array must be of dtype agreeing with the ``expparams_dtype``
property.
:rtype: list of ``Domain``
"""
return [
MultinomialDomain(n_elements=self.n_sides, n_meas=ep['n_meas'])
for ep in expparams
] | 0.008299 |
def _set_link_crc_monitoring(self, v, load=False):
"""
Setter method for link_crc_monitoring, mapped from YANG variable /sysmon/link_crc_monitoring (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_crc_monitoring is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_crc_monitoring() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=link_crc_monitoring.link_crc_monitoring, is_container='container', presence=False, yang_name="link-crc-monitoring", rest_name="link-crc-monitoring", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Link CRC Monitoring', u'callpoint': u'linkCrcMonitoring', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-sysmon', defining_module='brocade-sysmon', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_crc_monitoring must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=link_crc_monitoring.link_crc_monitoring, is_container='container', presence=False, yang_name="link-crc-monitoring", rest_name="link-crc-monitoring", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Link CRC Monitoring', u'callpoint': u'linkCrcMonitoring', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-sysmon', defining_module='brocade-sysmon', yang_type='container', is_config=True)""",
})
self.__link_crc_monitoring = t
if hasattr(self, '_set'):
self._set() | 0.005184 |
def tolist(self):
"""
Return the array as a list of rows.
Each row is a `dict` of values. Facilitates inserting data into a database.
.. versionadded:: 0.3.1
Returns
-------
quotes : list
A list in which each entry is a dictionary representing
a single options quote.
"""
return [_todict(key, self.data.loc[key, :]) for key in self.data.index] | 0.006787 |
def _BuildFindSpecsFromGroupName(self, group_name, environment_variables):
"""Builds find specifications from a artifact group name.
Args:
group_name (str): artifact group name.
environment_variables (list[str]): environment variable attributes used to
dynamically populate environment variables in file and registry
artifacts.
Returns:
list[dfwinreg.FindSpec|dfvfs.FindSpec]: find specifications or None if no
artifact with the given name can be retrieved.
"""
definition = self._artifacts_registry.GetDefinitionByName(group_name)
if not definition:
return None
return self._BuildFindSpecsFromArtifact(definition, environment_variables) | 0.004155 |
def node_version():
"""Get node version."""
version = check_output(('node', '--version'))
return tuple(int(x) for x in version.strip()[1:].split(b'.')) | 0.006135 |
def strip_tags(html):
"""Stripts HTML tags from text.
Note fields on several Mambu entities come with additional HTML tags
(they are rich text fields, I guess that's why). Sometimes they are
useless, so stripping them is a good idea.
"""
from html.parser import HTMLParser
class MLStripper(HTMLParser):
"""Aux class for stripping HTML tags.
fields on several Mambu entities come with additional HTML tags
(they are rich text fields, I guess that's why). Sometimes they are
useless, so stripping them is a good idea.
"""
def __init__(self):
try:
super().__init__() # required for python3
except TypeError as e:
pass # with python2 raises TypeError
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
s = MLStripper()
s.feed(html.replace(" "," "))
return s.get_data() | 0.006686 |
def use_comparative_assessment_offered_view(self):
"""Pass through to provider AssessmentOfferedLookupSession.use_comparative_assessment_offered_view"""
self._object_views['assessment_offered'] = COMPARATIVE
# self._get_provider_session('assessment_offered_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_comparative_assessment_offered_view()
except AttributeError:
pass | 0.007576 |
def user_group_membership_make_default(self, user_id, membership_id, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/group_memberships#set-membership-as-default"
api_path = "/api/v2/users/{user_id}/group_memberships/{membership_id}/make_default.json"
api_path = api_path.format(user_id=user_id, membership_id=membership_id)
return self.call(api_path, method="PUT", data=data, **kwargs) | 0.013761 |
def is_git_repo():
"""Check whether the current folder is a Git repo."""
cmd = "git", "rev-parse", "--git-dir"
try:
subprocess.run(cmd, stdout=subprocess.DEVNULL, check=True)
return True
except subprocess.CalledProcessError:
return False | 0.00361 |
def backend_inst_from_mod(mod, encoding, encoding_errors, kwargs):
"""Given a mod and a set of opts return an instantiated
Backend class.
"""
kw = dict(encoding=encoding, encoding_errors=encoding_errors,
kwargs=kwargs)
try:
klass = getattr(mod, "Backend")
except AttributeError:
raise AttributeError("%r mod does not define any backend class" % mod)
inst = klass(**kw)
try:
inst.check(title=False)
except Exception as err:
bin_mod = "fulltext.backends.__bin"
warn("can't use %r due to %r; use %r backend instead" % (
mod, str(err), bin_mod))
inst = import_mod(bin_mod).Backend(**kw)
inst.check(title=False)
LOGGER.debug("using %r" % inst)
return inst | 0.001287 |
def start_event_stream(self):
""" Start streaming events from `gerrit stream-events`. """
if not self._stream:
self._stream = GerritStream(self, ssh_client=self._ssh_client)
self._stream.start() | 0.008547 |
def get_binary_stream(name):
"""Returns a system stream for byte processing. This essentially
returns the stream from the sys module with the given name but it
solves some compatibility issues between different Python versions.
Primarily this function is necessary for getting binary streams on
Python 3.
:param name: the name of the stream to open. Valid names are ``'stdin'``,
``'stdout'`` and ``'stderr'``
"""
opener = binary_streams.get(name)
if opener is None:
raise TypeError('Unknown standard stream %r' % name)
return opener() | 0.001661 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.