text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def fill_symbolic(self):
"""
Fill the class with constrained symbolic values.
"""
self.wYear = self.state.solver.BVS('cur_year', 16, key=('api', 'GetLocalTime', 'cur_year'))
self.wMonth = self.state.solver.BVS('cur_month', 16, key=('api', 'GetLocalTime', 'cur_month'))
self.wDayOfWeek = self.state.solver.BVS('cur_dayofweek', 16, key=('api', 'GetLocalTime', 'cur_dayofweek'))
self.wDay = self.state.solver.BVS('cur_day', 16, key=('api', 'GetLocalTime', 'cur_day'))
self.wHour = self.state.solver.BVS('cur_hour', 16, key=('api', 'GetLocalTime', 'cur_hour'))
self.wMinute = self.state.solver.BVS('cur_minute', 16, key=('api', 'GetLocalTime', 'cur_minute'))
self.wSecond = self.state.solver.BVS('cur_second', 16, key=('api', 'GetLocalTime', 'cur_second'))
self.wMilliseconds = self.state.solver.BVS('cur_millisecond', 16, key=('api', 'GetLocalTime', 'cur_millisecond'))
self.state.add_constraints(self.wYear >= 1601)
self.state.add_constraints(self.wYear <= 30827)
self.state.add_constraints(self.wMonth >= 1)
self.state.add_constraints(self.wMonth <= 12)
self.state.add_constraints(self.wDayOfWeek <= 6)
self.state.add_constraints(self.wDay >= 1)
self.state.add_constraints(self.wDay <= 31)
self.state.add_constraints(self.wHour <= 23)
self.state.add_constraints(self.wMinute <= 59)
self.state.add_constraints(self.wSecond <= 59)
self.state.add_constraints(self.wMilliseconds <= 999) | 0.006418 |
def _handle_request(self, request: dict) -> dict:
"""Processes Alexa requests from skill server and returns responses to Alexa.
Args:
request: Dict with Alexa request payload and metadata.
Returns:
result: Alexa formatted or error response.
"""
request_body: bytes = request['request_body']
signature_chain_url: str = request['signature_chain_url']
signature: str = request['signature']
alexa_request: dict = request['alexa_request']
if not self._verify_request(signature_chain_url, signature, request_body):
return {'error': 'failed certificate/signature check'}
timestamp_str = alexa_request['request']['timestamp']
timestamp_datetime = datetime.strptime(timestamp_str, '%Y-%m-%dT%H:%M:%SZ')
now = datetime.utcnow()
delta = now - timestamp_datetime if now >= timestamp_datetime else timestamp_datetime - now
if abs(delta.seconds) > REQUEST_TIMESTAMP_TOLERANCE_SECS:
log.error(f'Failed timestamp check for request: {request_body.decode("utf-8", "replace")}')
return {'error': 'failed request timestamp check'}
conversation_key = alexa_request['session']['user']['userId']
if conversation_key not in self.conversations.keys():
if self.config['multi_instance']:
conv_agent = self._init_agent()
log.info('New conversation instance level agent initiated')
else:
conv_agent = self.agent
self.conversations[conversation_key] = \
Conversation(config=self.config,
agent=conv_agent,
conversation_key=conversation_key,
self_destruct_callback=lambda: self._del_conversation(conversation_key))
log.info(f'Created new conversation, key: {conversation_key}')
conversation = self.conversations[conversation_key]
response = conversation.handle_request(alexa_request)
return response | 0.00383 |
def pymmh3_hash128(key: Union[bytes, bytearray],
seed: int = 0,
x64arch: bool = True) -> int:
"""
Implements 128bit murmur3 hash, as per ``pymmh3``.
Args:
key: data to hash
seed: seed
x64arch: is a 64-bit architecture available?
Returns:
integer hash
"""
if x64arch:
return pymmh3_hash128_x64(key, seed)
else:
return pymmh3_hash128_x86(key, seed) | 0.00216 |
def find_playlist_by_id(self, playlist_id):
"""doc: http://open.youku.com/docs/doc?id=66
"""
url = 'https://openapi.youku.com/v2/playlists/show.json'
params = {
'client_id': self.client_id,
'playlist_id': playlist_id
}
r = requests.get(url, params=params)
check_error(r)
return r.json() | 0.005348 |
def create(self, name, volume, description=None, force=False):
"""
Adds exception handling to the default create() call.
"""
try:
snap = super(CloudBlockStorageSnapshotManager, self).create(
name=name, volume=volume, description=description,
force=force)
except exc.BadRequest as e:
msg = str(e)
if "Invalid volume: must be available" in msg:
# The volume for the snapshot was attached.
raise exc.VolumeNotAvailable("Cannot create a snapshot from an "
"attached volume. Detach the volume before trying "
"again, or pass 'force=True' to the create_snapshot() "
"call.")
else:
# Some other error
raise
except exc.ClientException as e:
if e.code == 409:
if "Request conflicts with in-progress" in str(e):
txt = ("The volume is current creating a snapshot. You "
"must wait until that completes before attempting "
"to create an additional snapshot.")
raise exc.VolumeNotAvailable(txt)
else:
raise
else:
raise
return snap | 0.005054 |
def instance_path_for(name, identifier_type, identifier_key=None):
"""
Get a path for thing.
"""
return "/{}/<{}:{}>".format(
name_for(name),
identifier_type,
identifier_key or "{}_id".format(name_for(name)),
) | 0.003922 |
def transform(self, x, warn=True):
"""Obtain the transformed values
"""
# 1. split across last dimension
# 2. re-use ranges
# 3. Merge
array_list = [encodeSplines(x[..., i].reshape((-1, 1)),
n_bases=self.n_bases,
spline_order=self.degree,
warn=warn,
start=self.data_min_[i],
end=self.data_max_[i]).reshape(x[..., i].shape + (self.n_bases,))
for i in range(x.shape[-1])]
return np.stack(array_list, axis=-2) | 0.004518 |
def surf_vol(length, girth):
'''Calculate the surface volume of an animal from its length and girth
Args
----
length: float or ndarray
Length of animal (m)
girth: float or ndarray
Girth of animal (m)
Returns
-------
surf:
Surface area of animal (m^2)
vol: float or ndarray
Volume of animal (m^3)
'''
import numpy
a_r = 0.01 * girth / (2 * numpy.pi)
stl_l = 0.01 * length
c_r = stl_l / 2
e = numpy.sqrt(1-(a_r**2/c_r**2))
surf = ((2*numpy.pi * a_r**2) + \
(2*numpy.pi * ((a_r * c_r)/e)) * 1/(numpy.sin(e)))
vol = (((4/3) * numpy.pi)*(a_r**2) * c_r)
return surf, vol | 0.010101 |
def get_char_type(ch):
"""
0, 汉字
1, 英文字母
2. 数字
3. 其他
"""
if re.match(en_p, ch):
return 1
elif re.match("\d+", ch):
return 2
elif re.match(re_han, ch):
return 3
else:
return 4 | 0.00813 |
def batch_id(self, batch_id):
"""The ID of the batch job used to push data and/or retrieve status.
Args:
batch_id (integer): The id of the batch job.
"""
self._request_uri = '{}/{}'.format(self._api_uri, batch_id)
self._request_entity = 'batchStatus' | 0.006601 |
def _is_requirement(line):
"""Returns whether the line is a valid package requirement."""
line = line.strip()
return line and not (line.startswith("-r") or line.startswith("#")) | 0.005291 |
def update_record(self, record, data=None, priority=None,
ttl=None, comment=None):
"""
Modifies an existing record for this domain.
"""
return self.manager.update_record(self, record, data=data,
priority=priority, ttl=ttl, comment=comment) | 0.013559 |
def combination_step(self):
"""Update auxiliary state by a smart combination of previous
updates in the frequency domain (standard FISTA
:cite:`beck-2009-fast`).
"""
# Update t step
tprv = self.t
self.t = 0.5 * float(1. + np.sqrt(1. + 4. * tprv**2))
# Update Y
if not self.opt['FastSolve']:
self.Yfprv = self.Yf.copy()
self.Yf = self.Xf + ((tprv - 1.) / self.t) * (self.Xf - self.Xfprv) | 0.004167 |
def get_pdb_contents_to_pose_residue_map(pdb_file_contents, rosetta_scripts_path, rosetta_database_path = None, pdb_id = None, extra_flags = ''):
'''Takes a string containing a PDB file, the RosettaScripts executable, and the Rosetta database and then uses the features database to map PDB residue IDs to pose residue IDs.
On success, (True, the residue mapping) is returned. On failure, (False, a list of errors) is returned.
Note: extra_flags should typically include '-ignore_zero_occupancy false' and '-ignore_unrecognized_res'.'''
filename = write_temp_file("/tmp", pdb_file_contents)
success, mapping = get_pdb_to_pose_residue_map(filename, rosetta_scripts_path, rosetta_database_path = rosetta_database_path, pdb_id = pdb_id, extra_flags = extra_flags)
os.remove(filename)
return success, mapping | 0.021454 |
def _parse_last_build_date(self):
"""
Returns the last build date of the RSS feed as datetime.datetime
object. Returned datetime is not time-zone aware
"""
date = self._channel.find('lastBuildDate').text
date = parser.parse(date, ignoretz=True)
return date | 0.00641 |
def _golden(self, triplet, fun):
"""Reduce the size of the bracket until the minimum is found"""
self.num_golden = 0
(qa, fa), (qb, fb), (qc, fc) = triplet
while True:
self.num_golden += 1
qd = qa + (qb-qa)*phi/(1+phi)
fd = fun(qd)
if fd < fb:
#print "golden d"
(qa, fa), (qb, fb) = (qb, fb), (qd, fd)
else:
#print "golden b"
(qa, fa), (qc, fc) = (qd, fd), (qa, fa)
if abs(qa-qb) < self.qtol:
return qc, fc | 0.00678 |
def visit_tree(node, previsit, postvisit):
"""
Scans the tree under the node depth-first using an explicit stack. It avoids implicit recursion
via the function call stack to avoid hitting 'maximum recursion depth exceeded' error.
It calls ``previsit()`` and ``postvisit()`` as follows:
* ``previsit(node, par_value)`` - should return ``(par_value, value)``
``par_value`` is as returned from ``previsit()`` of the parent.
* ``postvisit(node, par_value, value)`` - should return ``value``
``par_value`` is as returned from ``previsit()`` of the parent, and ``value`` is as
returned from ``previsit()`` of this node itself. The return ``value`` is ignored except
the one for the root node, which is returned from the overall ``visit_tree()`` call.
For the initial node, ``par_value`` is None. Either ``previsit`` and ``postvisit`` may be None.
"""
if not previsit:
previsit = lambda node, pvalue: (None, None)
if not postvisit:
postvisit = lambda node, pvalue, value: None
iter_children = iter_children_func(node)
done = set()
ret = None
stack = [(node, None, _PREVISIT)]
while stack:
current, par_value, value = stack.pop()
if value is _PREVISIT:
assert current not in done # protect againt infinite loop in case of a bad tree.
done.add(current)
pvalue, post_value = previsit(current, par_value)
stack.append((current, par_value, post_value))
# Insert all children in reverse order (so that first child ends up on top of the stack).
ins = len(stack)
for n in iter_children(current):
stack.insert(ins, (n, pvalue, _PREVISIT))
else:
ret = postvisit(current, par_value, value)
return ret | 0.016166 |
def SetOption(self, section, option, value, overwrite=True):
"""Set the value of an option in the config file.
Args:
section: string, the section of the config file to check.
option: string, the option to set the value of.
value: string, the value to set the option.
overwrite: bool, True to overwrite an existing value in the config file.
"""
if not overwrite and self.config.has_option(section, option):
return
if not self.config.has_section(section):
self.config.add_section(section)
self.config.set(section, option, str(value)) | 0.005068 |
def rank(self, dim, pct=False, keep_attrs=None):
"""Ranks the data.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within
that set.
Ranks begin at 1, not 0. If pct is True, computes percentage ranks.
NaNs in the input array are returned as NaNs.
The `bottleneck` library is required.
Parameters
----------
dim : str
Dimension over which to compute rank.
pct : bool, optional
If True, compute percentage ranks, otherwise compute integer ranks.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
Returns
-------
ranked : Dataset
Variables that do not depend on `dim` are dropped.
"""
if dim not in self.dims:
raise ValueError(
'Dataset does not contain the dimension: %s' % dim)
variables = OrderedDict()
for name, var in self.variables.items():
if name in self.data_vars:
if dim in var.dims:
variables[name] = var.rank(dim, pct=pct)
else:
variables[name] = var
coord_names = set(self.coords)
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
attrs = self.attrs if keep_attrs else None
return self._replace_vars_and_dims(variables, coord_names, attrs=attrs) | 0.00119 |
def analyze_logfile(self, logfile_path):
self._run_stats['logSource'] = logfile_path
"""Analyzes queries from a given log file"""
with open(logfile_path) as obj:
self.analyze_logfile_object(obj)
self._output_aggregated_report(sys.stdout)
return 0 | 0.006667 |
def _error_to_string(self, error_id):
"""Returns an error string from libiperf
:param error_id: The error_id produced by libiperf
:rtype: string
"""
strerror = self.lib.iperf_strerror
strerror.restype = c_char_p
return strerror(error_id).decode('utf-8') | 0.006452 |
def add_special_file(self, mask, path, from_quick_server, ctype=None):
"""Adds a special file that might have a different actual path than
its address.
Parameters
----------
mask : string
The URL that must be matched to perform this request.
path : string
The actual file path.
from_quick_server : bool
If set the file path is relative to *this* script otherwise it is
relative to the process.
ctype : string
Optional content type.
"""
full_path = path if not from_quick_server else os.path.join(
os.path.dirname(__file__), path)
def read_file(_req, _args):
with open(full_path, 'rb') as f_out:
return Response(f_out.read(), ctype=ctype)
self.add_text_get_mask(mask, read_file)
self.set_file_argc(mask, 0) | 0.002179 |
def get_texts(self):
""" Parse documents from a .txt file assuming 1 document per line, yielding lists of filtered tokens """
with self.getstream() as text_stream:
for i, line in enumerate(text_stream):
line = to_unicode(line)
line = (TweetCorpus.case_normalizer or passthrough)(line)
# line = self.case_normalizer(line)
if self.mask is not None and not self.mask[i]:
continue
ngrams = []
for ng in tokens2ngrams((TweetCorpus.tokenizer or str.split)(line), n=self.num_grams):
if self.ignore_matcher(ng):
continue
ngrams += [ng]
if not (i % 1000):
print(line)
print(ngrams)
yield ngrams | 0.004624 |
def iteritems(self, prefix=None):
"""Like dict.iteritems."""
query = Setting.query
if prefix:
query = query.filter(Setting.key.startswith(prefix))
for s in query.yield_per(1000):
yield (s.key, s.value) | 0.007752 |
def comments(case_id):
"""Upload a new comment."""
text = request.form['text']
variant_id = request.form.get('variant_id')
username = request.form.get('username')
case_obj = app.db.case(case_id)
app.db.add_comment(case_obj, text, variant_id=variant_id, username=username)
return redirect(request.referrer) | 0.006006 |
def AddEthernetDevice(self, device_name, iface_name, state):
'''Add an ethernet device.
You have to specify device_name, device interface name (e. g. eth0), and
state. You can use the predefined DeviceState values (e. g.
DeviceState.ACTIVATED) or supply a numeric value. For valid state values
please visit
http://projects.gnome.org/NetworkManager/developers/api/09/spec.html#type-NM_DEVICE_STATE
Please note that this does not set any global properties.
Returns the new object path.
'''
path = '/org/freedesktop/NetworkManager/Devices/' + device_name
wired_props = {'Carrier': False,
'HwAddress': dbus.String('78:DD:08:D2:3D:43'),
'PermHwAddress': dbus.String('78:DD:08:D2:3D:43'),
'Speed': dbus.UInt32(0)}
self.AddObject(path,
'org.freedesktop.NetworkManager.Device.Wired',
wired_props,
[])
props = {'DeviceType': dbus.UInt32(1),
'State': dbus.UInt32(state),
'Interface': iface_name,
'ActiveConnection': dbus.ObjectPath('/'),
'AvailableConnections': dbus.Array([], signature='o'),
'AutoConnect': False,
'Managed': True,
'Driver': 'dbusmock',
'IpInterface': ''}
obj = dbusmock.get_object(path)
obj.AddProperties(DEVICE_IFACE, props)
self.object_manager_emit_added(path)
NM = dbusmock.get_object(MANAGER_OBJ)
devices = NM.Get(MANAGER_IFACE, 'Devices')
devices.append(path)
NM.Set(MANAGER_IFACE, 'Devices', devices)
NM.EmitSignal('org.freedesktop.NetworkManager', 'DeviceAdded', 'o', [path])
return path | 0.000583 |
def revoke_permission_from_user_groups(self, permission, **kwargs): # noqa: E501
"""Revokes a single permission from user group(s) # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.revoke_permission_from_user_groups(permission, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str permission: Permission to revoke from user group(s). (required)
:param list[str] body: List of user groups.
:return: ResponseContainerUserGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.revoke_permission_from_user_groups_with_http_info(permission, **kwargs) # noqa: E501
else:
(data) = self.revoke_permission_from_user_groups_with_http_info(permission, **kwargs) # noqa: E501
return data | 0.00181 |
def contains(self, other):
"""Determine whether this range contains another."""
return self._start <= other.start and self._end >= other.end | 0.012821 |
def remove_unsupported_kwargs(module_or_fn, all_kwargs_dict):
"""Removes any kwargs not supported by `module_or_fn` from `all_kwargs_dict`.
A new dict is return with shallow copies of keys & values from
`all_kwargs_dict`, as long as the key is accepted by module_or_fn. The
returned dict can then be used to connect `module_or_fn` (along with some
other inputs, ie non-keyword arguments, in general).
`snt.supports_kwargs` is used to tell whether a given kwarg is supported. Note
that this method may give false negatives, which would lead to extraneous
removals in the result of this function. Please read the docstring for
`snt.supports_kwargs` for details, and manually inspect the results from this
function if in doubt.
Args:
module_or_fn: some callable which can be interrogated by
`snt.supports_kwargs`. Generally a Sonnet module or a method (wrapped in
`@reuse_variables`) of a Sonnet module.
all_kwargs_dict: a dict containing strings as keys, or None.
Raises:
ValueError: if `all_kwargs_dict` is not a dict.
Returns:
A dict containing some subset of the keys and values in `all_kwargs_dict`.
This subset may be empty. If `all_kwargs_dict` is None, this will be an
empty dict.
"""
if all_kwargs_dict is None:
all_kwargs_dict = {}
if not isinstance(all_kwargs_dict, dict):
raise ValueError("all_kwargs_dict must be a dict with string keys.")
return {
kwarg: value for kwarg, value in all_kwargs_dict.items()
if supports_kwargs(module_or_fn, kwarg) != NOT_SUPPORTED
} | 0.003819 |
def request(self, url, method='GET', params=None, data=None,
expected_response_code=200):
"""Make a http request to API."""
url = "{0}/{1}".format(self._baseurl, url)
if params is None:
params = {}
auth = {
'u': self._username,
'p': self._password
}
params.update(auth)
if data is not None and not isinstance(data, str):
data = json.dumps(data)
retry = True
_try = 0
# Try to send the request more than once by default (see #103)
while retry:
try:
response = session.request(
method=method,
url=url,
params=params,
data=data,
headers=self._headers,
verify=self._verify_ssl,
timeout=self._timeout
)
break
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout):
_try += 1
if self._retries != 0:
retry = _try < self._retries
else:
raise requests.exceptions.ConnectionError
if response.status_code == expected_response_code:
return response
else:
raise InfluxDBClientError(response.content, response.status_code) | 0.002099 |
def get_chunk_coords(self):
"""
Return the x,z coordinates and length of the chunks that are defined in te regionfile.
This includes chunks which may not be readable for whatever reason.
This method is deprecated. Use :meth:`get_metadata` instead.
"""
chunks = []
for x in range(32):
for z in range(32):
m = self.metadata[x,z]
if m.is_created():
chunks.append({'x': x, 'z': z, 'length': m.blocklength})
return chunks | 0.009042 |
def wrap_scene(cls, root, refobjinter):
"""Wrap all refobjects in the scene in a :class:`Reftrack` instance
and set the right parents, also add suggestions for the current scene
When you want to quickly scan the scene and display the reftracks in a tool,
this is the easiest function.
It uses wrap on all refobjects in the scene, then adds suggestions for the
current scene.
:param root: the root that groups all reftracks and makes it possible to search for parents
:type root: :class:`ReftrackRoot`
:param refobjinter: a programm specific reftrack object interface
:type refobjinter: :class:`RefobjInterface`
:returns: list with the wrapped :class:`Reftrack` instances
:rtype: list
:raises: None
"""
refobjects = cls.get_unwrapped(root, refobjinter)
tracks = cls.wrap(root, refobjinter, refobjects)
sugs = root.get_scene_suggestions(refobjinter)
for typ, element in sugs:
r = cls(root=root, refobjinter=refobjinter, typ=typ, element=element)
tracks.append(r)
return tracks | 0.005204 |
def _mul8(ins):
""" Multiplies 2 las values from the stack.
Optimizations:
* If any of the ops is ZERO,
then do A = 0 ==> XOR A, cause A * 0 = 0 * A = 0
* If any ot the ops is ONE, do NOTHING
A * 1 = 1 * A = A
"""
op1, op2 = tuple(ins.quad[2:])
if _int_ops(op1, op2) is not None:
op1, op2 = _int_ops(op1, op2)
output = _8bit_oper(op1)
if op2 == 1: # A * 1 = 1 * A = A
output.append('push af')
return output
if op2 == 0:
output.append('xor a')
output.append('push af')
return output
if op2 == 2: # A * 2 == A SLA 1
output.append('add a, a')
output.append('push af')
return output
if op2 == 4: # A * 4 == A SLA 2
output.append('add a, a')
output.append('add a, a')
output.append('push af')
return output
output.append('ld h, %i' % int8(op2))
else:
if op2[0] == '_': # stack optimization
op1, op2 = op2, op1
output = _8bit_oper(op1, op2)
output.append('call __MUL8_FAST') # Inmmediate
output.append('push af')
REQUIRES.add('mul8.asm')
return output | 0.000796 |
def _reduce_opacity(self):
"""
Reduce opacity for watermark image.
"""
if self.image.mode != 'RGBA':
image = self.image.convert('RGBA')
else:
image = self.image.copy()
alpha = image.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(self.opacity)
image.putalpha(alpha)
self.image = image | 0.005102 |
def maketabdesc(descs=[]):
"""Create a table description.
Creates a table description from a set of column descriptions. The
resulting table description can be used in the :class:`table` constructor.
For example::
scd1 = makescacoldesc("col2", "aa")
scd2 = makescacoldesc("col1", 1, "IncrementalStMan")
scd3 = makescacoldesc("colrec1", {})
acd1 = makearrcoldesc("arr1", 1, 0, [2,3,4])
acd2 = makearrcoldesc("arr2", 0.+0j)
td = maketabdesc([scd1, scd2, scd3, acd1, acd2])
t = table("mytable", td, nrow=100)
| This creates a table description `td` from five column descriptions
and then creates a 100-row table called `mytable` from the table
description.
| The columns contain respectivily strings, integer scalars, records,
3D integer arrays with fixed shape [2,3,4], and complex arrays with
variable shape.
"""
rec = {}
# If a single dict is given, make a list of it.
if isinstance(descs, dict):
descs = [descs]
for desc in descs:
colname = desc['name']
if colname in rec:
raise ValueError('Column name ' + colname + ' multiply used in table description')
rec[colname] = desc['desc']
return rec | 0.00159 |
def delete(self, file_id):
"""Given an file_id, delete this stored file's files collection document
and associated chunks from a GridFS bucket.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
# Get _id of file to delete
file_id = fs.upload_from_stream("test_file", "data I want to store!")
fs.delete(file_id)
Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists.
:Parameters:
- `file_id`: The _id of the file to be deleted.
"""
res = self._files.delete_one({"_id": file_id})
self._chunks.delete_many({"files_id": file_id})
if not res.deleted_count:
raise NoFile(
"no file could be deleted because none matched %s" % file_id) | 0.003659 |
def default_type(self):
"""The default value type for this Slot.
The Python equivalent of the CLIPS deftemplate-slot-defaultp function.
"""
return TemplateSlotDefaultType(
lib.EnvDeftemplateSlotDefaultP(self._env, self._tpl, self._name)) | 0.007067 |
def provider_for_url(self, url):
"""
Find the right provider for a URL
"""
for provider, regex in self.get_registry().items():
if re.match(regex, url) is not None:
return provider
raise OEmbedMissingEndpoint('No endpoint matches URL: %s' % url) | 0.009346 |
def get_en_words() -> Set[str]:
"""
Returns a list of English words which can be used to filter out
code-switched sentences.
"""
pull_en_words()
with open(config.EN_WORDS_PATH) as words_f:
raw_words = words_f.readlines()
en_words = set([word.strip().lower() for word in raw_words])
NA_WORDS_IN_EN_DICT = set(["kore", "nani", "karri", "imi", "o", "yaw", "i",
"bi", "aye", "imi", "ane", "kubba", "kab", "a-",
"ad", "a", "mak", "selim", "ngai", "en", "yo",
"wud", "mani", "yak", "manu", "ka-", "mong",
"manga", "ka-", "mane", "kala", "name", "kayo",
"kare", "laik", "bale", "ni", "rey", "bu",
"re", "iman", "bom", "wam",
"alu", "nan", "kure", "kuri", "wam", "ka", "ng",
"yi", "na", "m", "arri", "e", "kele", "arri", "nga",
"kakan", "ai", "ning", "mala", "ti", "wolk",
"bo", "andi", "ken", "ba", "aa", "kun", "bini",
"wo", "bim", "man", "bord", "al", "mah", "won",
"ku", "ay", "belen", "wen", "yah", "muni",
"bah", "di", "mm", "anu", "nane", "ma", "kum",
"birri", "ray", "h", "kane", "mumu", "bi", "ah",
"i-", "n", "mi", "bedman", "rud", "le", "babu",
"da", "kakkak", "yun", "ande", "naw", "kam", "bolk",
"woy", "u", "bi-",
])
EN_WORDS_NOT_IN_EN_DICT = set(["screenprinting"])
en_words = en_words.difference(NA_WORDS_IN_EN_DICT)
en_words = en_words | EN_WORDS_NOT_IN_EN_DICT
return en_words | 0.010383 |
def sort_languages(self, order=Qt.AscendingOrder):
"""
Sorts the Model languages.
:param order: Order. ( Qt.SortOrder )
"""
self.beginResetModel()
self.__languages = sorted(self.__languages, key=lambda x: (x.name), reverse=order)
self.endResetModel() | 0.00974 |
def download(self, bucket, key, fileobj, extra_args=None,
subscribers=None):
"""Downloads a file from S3
:type bucket: str
:param bucket: The name of the bucket to download from
:type key: str
:param key: The name of the key to download from
:type fileobj: str or seekable file-like object
:param fileobj: The name of a file to download or a seekable file-like
object to download. It is recommended to use a filename because
file-like objects may result in higher memory usage.
:type extra_args: dict
:param extra_args: Extra arguments that may be passed to the
client operation
:type subscribers: list(s3transfer.subscribers.BaseSubscriber)
:param subscribers: The list of subscribers to be invoked in the
order provided based on the event emit during the process of
the transfer request.
:rtype: s3transfer.futures.TransferFuture
:returns: Transfer future representing the download
"""
if extra_args is None:
extra_args = {}
if subscribers is None:
subscribers = []
self._validate_all_known_args(extra_args, self.ALLOWED_DOWNLOAD_ARGS)
call_args = CallArgs(
bucket=bucket, key=key, fileobj=fileobj, extra_args=extra_args,
subscribers=subscribers
)
extra_main_kwargs = {'io_executor': self._io_executor}
if self._bandwidth_limiter:
extra_main_kwargs['bandwidth_limiter'] = self._bandwidth_limiter
return self._submit_transfer(
call_args, DownloadSubmissionTask, extra_main_kwargs) | 0.001752 |
def _remove_session_save_objects(self):
"""Used during exception handling in case we need to remove() session:
keep instances and merge them in the new session.
"""
if self.testing:
return
# Before destroying the session, get all instances to be attached to the
# new session. Without this, we get DetachedInstance errors, like when
# tryin to get user's attribute in the error page...
old_session = db.session()
g_objs = []
for key in iter(g):
obj = getattr(g, key)
if isinstance(obj, db.Model) and sa.orm.object_session(obj) in (
None,
old_session,
):
g_objs.append((key, obj, obj in old_session.dirty))
db.session.remove()
session = db.session()
for key, obj, load in g_objs:
# replace obj instance in bad session by new instance in fresh
# session
setattr(g, key, session.merge(obj, load=load))
# refresh `current_user`
user = getattr(_request_ctx_stack.top, "user", None)
if user is not None and isinstance(user, db.Model):
_request_ctx_stack.top.user = session.merge(user, load=load) | 0.002372 |
def train(self, data, epochs, autostop=False):
"""!
@brief Trains self-organized feature map (SOM).
@param[in] data (list): Input data - list of points where each point is represented by list of features, for example coordinates.
@param[in] epochs (uint): Number of epochs for training.
@param[in] autostop (bool): Automatic termination of learining process when adaptation is not occurred.
@return (uint) Number of learining iterations.
"""
self._data = data
if self.__ccore_som_pointer is not None:
return wrapper.som_train(self.__ccore_som_pointer, data, epochs, autostop)
self._sqrt_distances = self.__initialize_distances(self._size, self._location)
for i in range(self._size):
self._award[i] = 0
self._capture_objects[i].clear()
# weights
self._create_initial_weights(self._params.init_type)
previous_weights = None
for epoch in range(1, epochs + 1):
# Depression term of coupling
self._local_radius = (self._params.init_radius * math.exp(-(epoch / epochs))) ** 2
self._learn_rate = self._params.init_learn_rate * math.exp(-(epoch / epochs))
# Clear statistics
if autostop:
for i in range(self._size):
self._award[i] = 0
self._capture_objects[i].clear()
for i in range(len(self._data)):
# Step 1: Competition:
index = self._competition(self._data[i])
# Step 2: Adaptation:
self._adaptation(index, self._data[i])
# Update statistics
if (autostop == True) or (epoch == epochs):
self._award[index] += 1
self._capture_objects[index].append(i)
# Check requirement of stopping
if autostop:
if previous_weights is not None:
maximal_adaptation = self._get_maximal_adaptation(previous_weights)
if maximal_adaptation < self._params.adaptation_threshold:
return epoch
previous_weights = [item[:] for item in self._weights]
return epochs | 0.010338 |
def show_gallery(slug, size="100x100", crop="center", **kwargs):
"""
Тег отображения фотогалереи
Пример использования::
{% show_gallery "gallery-slug" "150x110" "center" class='gallery-class' %}
:param slug: символьный код фотогалереи
:param size: размер
:param crop: параметры кропа
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
try:
album = PhotoAlbum.objects.published().get(slug=slug)
photos = album.photo_set.published().order_by('sort').all()
return {'album': album, 'photos': photos, 'size': size, 'crop': crop, 'data': kwargs}
except PhotoAlbum.DoesNotExist:
return None | 0.004399 |
def attribute_invoked(self, sender, name, args, kwargs):
"Handles the creation of ExpectationBuilder when an attribute is invoked."
return ExpectationBuilder(self.sender, self.delegate, self.add_invocation, self.add_expectations, '__call__')(*args, **kwargs) | 0.014599 |
def is_lesser(a, b):
"""
Verify that an item *a* is <= then an item *b*
:param a: An item
:param b: Another item
:return: True or False
"""
if type(a) != type(b):
return False
if isinstance(a, str) and isinstance(b, str):
return a == b
elif isinstance(a, bool) and isinstance(b, bool):
return a == b
elif isinstance(a, list) and isinstance(b, list):
for element in a:
flag = 0
for e in b:
if is_lesser(element, e):
flag = 1
break
if not flag:
return False
return True
elif isinstance(a, dict) and isinstance(b, dict):
if is_lesser(list(a.keys()), list(b.keys())):
for key, val in a.items():
if not is_lesser(val, b[key]):
return False
return True
return False
elif isinstance(a, int) and isinstance(b, int):
return a <= b
elif isinstance(a, float) and isinstance(b, float):
return a <= b
return False | 0.001812 |
def populateFromFile(self, dataUrl, indexFile=None):
"""
Populates the instance variables of this ReadGroupSet from the
specified dataUrl and indexFile. If indexFile is not specified
guess usual form.
"""
self._dataUrl = dataUrl
self._indexFile = indexFile
if indexFile is None:
self._indexFile = dataUrl + ".bai"
samFile = self.getFileHandle(self._dataUrl)
self._setHeaderFields(samFile)
if 'RG' not in samFile.header or len(samFile.header['RG']) == 0:
readGroup = HtslibReadGroup(self, self.defaultReadGroupName)
self.addReadGroup(readGroup)
else:
for readGroupHeader in samFile.header['RG']:
readGroup = HtslibReadGroup(self, readGroupHeader['ID'])
readGroup.populateFromHeader(readGroupHeader)
self.addReadGroup(readGroup)
self._bamHeaderReferenceSetName = None
for referenceInfo in samFile.header['SQ']:
if 'AS' not in referenceInfo:
infoDict = parseMalformedBamHeader(referenceInfo)
else:
infoDict = referenceInfo
name = infoDict.get('AS', references.DEFAULT_REFERENCESET_NAME)
if self._bamHeaderReferenceSetName is None:
self._bamHeaderReferenceSetName = name
elif self._bamHeaderReferenceSetName != name:
raise exceptions.MultipleReferenceSetsInReadGroupSet(
self._dataUrl, name, self._bamFileReferenceName)
self._numAlignedReads = samFile.mapped
self._numUnalignedReads = samFile.unmapped | 0.001201 |
def combine_keys(pks: Iterable[Ed25519PublicPoint]) -> Ed25519PublicPoint:
"""Combine a list of Ed25519 points into a "global" CoSi key."""
P = [_ed25519.decodepoint(pk) for pk in pks]
combine = reduce(_ed25519.edwards_add, P)
return Ed25519PublicPoint(_ed25519.encodepoint(combine)) | 0.003344 |
def p_duration_number_duration_unit(self, p):
'duration : NUMBER DURATION_UNIT'
logger.debug('duration = number %s, duration unit %s', p[1], p[2])
p[0] = Duration.from_quantity_unit(p[1], p[2]) | 0.009217 |
def edit_section(self, id, course_section_end_at=None, course_section_name=None, course_section_restrict_enrollments_to_section_dates=None, course_section_sis_section_id=None, course_section_start_at=None):
"""
Edit a section.
Modify an existing section.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - course_section[name]
"""The name of the section"""
if course_section_name is not None:
data["course_section[name]"] = course_section_name
# OPTIONAL - course_section[sis_section_id]
"""The sis ID of the section"""
if course_section_sis_section_id is not None:
data["course_section[sis_section_id]"] = course_section_sis_section_id
# OPTIONAL - course_section[start_at]
"""Section start date in ISO8601 format, e.g. 2011-01-01T01:00Z"""
if course_section_start_at is not None:
data["course_section[start_at]"] = course_section_start_at
# OPTIONAL - course_section[end_at]
"""Section end date in ISO8601 format. e.g. 2011-01-01T01:00Z"""
if course_section_end_at is not None:
data["course_section[end_at]"] = course_section_end_at
# OPTIONAL - course_section[restrict_enrollments_to_section_dates]
"""Set to true to restrict user enrollments to the start and end dates of the section."""
if course_section_restrict_enrollments_to_section_dates is not None:
data["course_section[restrict_enrollments_to_section_dates]"] = course_section_restrict_enrollments_to_section_dates
self.logger.debug("PUT /api/v1/sections/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/sections/{id}".format(**path), data=data, params=params, single_item=True) | 0.003992 |
def make_clean_visible(_html, tag_replacement_char=' '):
'''
Takes an HTML-like Unicode string as input and returns a UTF-8
encoded string with all tags replaced by whitespace. In particular,
all Unicode characters inside HTML are replaced with a single
whitespace character.
This does not detect comments, style, script, link. It also does
do anything with HTML-escaped characters. All of these are
handled by the clean_html pre-cursor step.
Pre-existing whitespace of any kind (newlines, tabs) is converted
to single spaces ' ', which has the same byte length (and
character length).
This is a simple state machine iterator without regexes
'''
def non_tag_chars(html):
n = 0
while n < len(html):
angle = html.find('<', n)
if angle == -1:
yield html[n:]
n = len(html)
break
yield html[n:angle]
n = angle
while n < len(html):
nl = html.find('\n', n)
angle = html.find('>', n)
if angle == -1:
yield ' ' * (len(html) - n)
n = len(html)
break
elif nl == -1 or angle < nl:
yield ' ' * (angle + 1 - n)
n = angle + 1
break
else:
yield ' ' * (nl - n) + '\n'
n = nl + 1
# do not break
if not isinstance(_html, unicode):
_html = unicode(_html, 'utf-8')
# Protect emails by substituting with unique key
_html = fix_emails(_html)
#Strip tags with previous logic
non_tag = ''.join(non_tag_chars(_html))
return non_tag.encode('utf-8') | 0.001109 |
def iter_assets(self, number=-1, etag=None):
"""Iterate over the assets available for this release.
:param int number: (optional), Number of assets to return
:param str etag: (optional), last ETag header sent
:returns: generator of :class:`Asset <Asset>` objects
"""
url = self._build_url('assets', base_url=self._api)
return self._iter(number, url, Asset, etag=etag) | 0.004717 |
def _get_session(team, timeout=None):
"""
Creates a session or returns an existing session.
"""
global _sessions # pylint:disable=C0103
session = _sessions.get(team)
if session is None:
auth = _create_auth(team, timeout)
_sessions[team] = session = _create_session(team, auth)
assert session is not None
return session | 0.002639 |
def fitToSize(rect, targetWidth, targetHeight, bounds):
"""
Pads or crops a rectangle as necessary to achieve the target dimensions,
ensuring the modified rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
# Determine the difference between the current size and target size
x,y,w,h = rect
diffX = w - targetWidth
diffY = h - targetHeight
# Determine if we are cropping or padding the width
if diffX > 0:
cropLeft = math.floor(diffX / 2)
cropRight = diffX - cropLeft
x,y,w,h = cropRect((x,y,w,h), 0, 0, cropLeft, cropRight)
elif diffX < 0:
padLeft = math.floor(abs(diffX) / 2)
padRight = abs(diffX) - padLeft
x,y,w,h = padRect((x,y,w,h), 0, 0, padLeft, padRight, bounds, False)
# Determine if we are cropping or padding the height
if diffY > 0:
cropTop = math.floor(diffY / 2)
cropBottom = diffY - cropTop
x,y,w,h = cropRect((x,y,w,h), cropTop, cropBottom, 0, 0)
elif diffY < 0:
padTop = math.floor(abs(diffY) / 2)
padBottom = abs(diffY) - padTop
x,y,w,h = padRect((x,y,w,h), padTop, padBottom, 0, 0, bounds, False)
return (x,y,w,h) | 0.065756 |
def get_context_data(self,**kwargs):
''' Pass the initial kwargs, then update with the needed registration info. '''
context_data = super(RegistrationSummaryView,self).get_context_data(**kwargs)
regSession = self.request.session[REG_VALIDATION_STR]
reg_id = regSession["temp_reg_id"]
reg = TemporaryRegistration.objects.get(id=reg_id)
discount_codes = regSession.get('discount_codes',None)
discount_amount = regSession.get('total_discount_amount',0)
voucher_names = regSession.get('voucher_names',[])
total_voucher_amount = regSession.get('total_voucher_amount',0)
addons = regSession.get('addons',[])
if reg.priceWithDiscount == 0:
# Create a new Invoice if one does not already exist.
new_invoice = Invoice.get_or_create_from_registration(reg,status=Invoice.PaymentStatus.paid)
new_invoice.processPayment(0,0,forceFinalize=True)
isFree = True
else:
isFree = False
context_data.update({
'registration': reg,
"totalPrice": reg.totalPrice,
'subtotal': reg.priceWithDiscount,
'taxes': reg.addTaxes,
"netPrice": reg.priceWithDiscountAndTaxes,
"addonItems": addons,
"discount_codes": discount_codes,
"discount_code_amount": discount_amount,
"voucher_names": voucher_names,
"total_voucher_amount": total_voucher_amount,
"total_discount_amount": discount_amount + total_voucher_amount,
"currencyCode": getConstant('general__currencyCode'),
'payAtDoor': reg.payAtDoor,
'is_free': isFree,
})
if self.request.user:
door_permission = self.request.user.has_perm('core.accept_door_payments')
invoice_permission = self.request.user.has_perm('core.send_invoices')
if door_permission or invoice_permission:
context_data['form'] = DoorAmountForm(
user=self.request.user,
doorPortion=door_permission,
invoicePortion=invoice_permission,
payerEmail=reg.email,
discountAmount=max(reg.totalPrice - reg.priceWithDiscount,0),
)
return context_data | 0.008068 |
def verify_authentication_data(self, key):
'''
Verify the current authentication data based on the current key-id and
the given key.
'''
correct_authentication_data = self.calculate_authentication_data(key)
return self.authentication_data == correct_authentication_data | 0.006309 |
def send_reply_to(address, reply=EMPTY):
"""Reply to a message previously received
:param address: a nw0 address (eg from `nw0.advertise`)
:param reply: any simple Python object, including text & tuples
"""
_logger.debug("Sending reply %s to %s", reply, address)
return sockets._sockets.send_reply_to(address, reply) | 0.005797 |
def parse(cls, line, encoding=pydle.protocol.DEFAULT_ENCODING):
"""
Parse given line into IRC message structure.
Returns a TaggedMessage.
"""
valid = True
# Decode message.
try:
message = line.decode(encoding)
except UnicodeDecodeError:
# Try our fallback encoding.
message = line.decode(pydle.protocol.FALLBACK_ENCODING)
# Sanity check for message length.
if len(message) > TAGGED_MESSAGE_LENGTH_LIMIT:
valid = False
# Strip message separator.
if message.endswith(rfc1459.protocol.LINE_SEPARATOR):
message = message[:-len(rfc1459.protocol.LINE_SEPARATOR)]
elif message.endswith(rfc1459.protocol.MINIMAL_LINE_SEPARATOR):
message = message[:-len(rfc1459.protocol.MINIMAL_LINE_SEPARATOR)]
raw = message
# Parse tags.
tags = {}
if message.startswith(TAG_INDICATOR):
message = message[len(TAG_INDICATOR):]
raw_tags, message = message.split(' ', 1)
for raw_tag in raw_tags.split(TAG_SEPARATOR):
if TAG_VALUE_SEPARATOR in raw_tag:
tag, value = raw_tag.split(TAG_VALUE_SEPARATOR, 1)
else:
tag = raw_tag
value = True
tags[tag] = value
# Parse rest of message.
message = super().parse(message.lstrip().encode(encoding), encoding=encoding)
return TaggedMessage(_raw=raw, _valid=message._valid and valid, tags=tags, **message._kw) | 0.0025 |
def _read_stimtime_AFNI(stimtime_files, n_C, n_S, scan_onoff):
""" Utility called by gen_design. It reads in one or more stimulus timing
file comforming to AFNI style, and return a list
(size of ``[number of runs \\* number of conditions]``)
of dictionary including onsets, durations and weights of each event.
Parameters
----------
stimtime_files: a string or a list of string.
Each string is the name of the file storing the stimulus
timing information of one task condition.
The contents in the files should follow the style of AFNI
stimulus timing files, refer to gen_design.
n_C: integer, number of task conditions
n_S: integer, number of scans
scan_onoff: list of numbers.
The onset of each scan after concatenating all scans,
together with the offset of the last scan.
For example, if 3 scans of duration 100s, 150s, 120s are run,
scan_onoff is [0, 100, 250, 370]
Returns
-------
design_info: list of stimulus information
The first level of the list correspond to different scans.
The second level of the list correspond to different conditions.
Each item in the list is a dictiornary with keys "onset",
"duration" and "weight". If one condition includes no event
in a scan, the values of these keys in that scan of the condition
are empty lists.
See also
--------
gen_design
"""
design_info = [[{'onset': [], 'duration': [], 'weight': []}
for i_c in range(n_C)] for i_s in range(n_S)]
# Read stimulus timing files
for i_c in range(n_C):
with open(stimtime_files[i_c]) as f:
text = f.readlines()
assert len(text) == n_S, \
'Number of lines does not match number of runs!'
for i_s, line in enumerate(text):
events = line.strip().split()
if events[0] == '*':
continue
for event in events:
assert event != '*'
tmp = str.split(event, ':')
if len(tmp) == 2:
duration = float(tmp[1])
else:
duration = 1.0
tmp = str.split(tmp[0], '*')
if len(tmp) == 2:
weight = float(tmp[1])
else:
weight = 1.0
if (float(tmp[0]) >= 0
and float(tmp[0])
< scan_onoff[i_s + 1] - scan_onoff[i_s]):
design_info[i_s][i_c]['onset'].append(float(tmp[0]))
design_info[i_s][i_c]['duration'].append(duration)
design_info[i_s][i_c]['weight'].append(weight)
return design_info | 0.000345 |
def get_suffix(name):
"""Check if file name have valid suffix for formatting.
if have suffix return it else return False.
"""
a = name.count(".")
if a:
ext = name.split(".")[-1]
if ext in LANGS.keys():
return ext
return False
else:
return False | 0.003175 |
def binglookup(w1i, w2i):
"""
Bingham statistics lookup table.
"""
K = {'0.06': {'0.02': ['-25.58', '-8.996'], '0.06': ['-9.043', '-9.043'], '0.04': ['-13.14', '-9.019']}, '0.22': {'0.08': ['-6.944', '-2.644'], '0.02': ['-25.63', '-2.712'], '0.20': ['-2.649', '-2.354'], '0.06': ['-9.027', '-2.673'], '0.04': ['-13.17', '-2.695'], '0.14': ['-4.071', '-2.521'], '0.16': ['-3.518', '-2.470'], '0.10': ['-5.658', '-2.609'], '0.12': ['-4.757', '-2.568'], '0.18': ['-3.053', '-2.414'], '0.22': ['-2.289', '-2.289']}, '0.46': {'0.02': ['-25.12', '-0.250'], '0.08': ['-6.215', '0.000'], '0.06': ['-8.371', '-0.090'], '0.04': ['-12.58', '-0.173']}, '0.44': {'0.08': ['-6.305', '-0.186'], '0.02': ['-25.19', '-0.418'], '0.06': ['-8.454', '-0.270'], '0.04': ['-12.66', '-0.347'], '0.10': ['-4.955', '-0.097'], '0.12': ['-3.992', '0.000']}, '0.42': {'0.08': ['-6.388', '-0.374'], '0.02': ['-25.5', '-0.589'], '0.06': ['-8.532', '-0.452'], '0.04': ['-12.73', '-0.523'], '0.14': ['-3.349', '-0.104'], '0.16': ['-2.741', '0.000'], '0.10': ['-5.045', '-0.290'], '0.12': ['-4.089', '-0.200']}, '0.40': {'0.08': ['-6.466', '-0.564'], '0.02': ['-25.31', '-0.762'], '0.20': ['-1.874', '-0.000'], '0.06': ['-8.604', '-0.636'], '0.04': ['-12.80', '-0.702'], '0.14': ['-3.446', '-0.312'], '0.16': ['-2.845', '-0.215'], '0.10': ['-5.126', '-0.486'], '0.12': ['-4.179', '-0.402'], '0.18': ['-2.330', '-0.111']}, '0.08': {'0.02': ['-25.6', '-6.977'], '0.08': ['-7.035', '-7.035'], '0.06': ['-9.065', '-7.020'], '0.04': ['-13.16', '-6.999']}, '0.28': {'0.08': ['-6.827', '-1.828'], '0.28': ['-1.106', '-1.106'], '0.02': ['-25.57', '-1.939'], '0.20': ['-2.441', '-1.458'], '0.26': ['-1.406', '-1.203'], '0.24': ['-1.724', '-1.294'], '0.06': ['-8.928', '-1.871'], '0.04': ['-13.09', '-1.908'], '0.14': ['-3.906', '-1.665'], '0.16': ['-3.338', '-1.601'], '0.10': ['-5.523', '-1.779'], '0.12': ['-4.606', '-1.725'], '0.18': ['-2.859', '-1.532'], '0.22': ['-2.066', '-1.378']}, '0.02': {'0.02': ['-25.55', '-25.55']}, '0.26': {'0.08': ['-6.870', '-2.078'], '0.02': ['-25.59', '-2.175'], '0.20': ['-2.515', '-1.735'], '0.26': ['-1.497', '-1.497'], '0.24': ['-1.809', '-1.582'], '0.06': ['-8.96 6', '-2.117'], '0.04': ['-13.12', '-2.149'], '0.14': ['-3.965', '-1.929'], '0.16': ['-3.403', '-1.869'], '0.10': ['-5.573', '-2.034'], '0.12': ['-4.661', '-1.984'], '0.18': ['-2.928', '-1.805'], '0.22': ['-2.1 46', '-1.661']}, '0.20': {'0.08': ['-6.974', '-2.973'], '0.02': ['-25.64', '-3.025'], '0.20': ['-2.709', '-2.709'], '0.06': ['-9.05', '-2.997'], '0.04': ['-13.18', '-3.014'], '0.14': ['-4.118', '-2.863'], '0.1 6': ['-3.570', '-2.816'], '0.10': ['-5.694', '-2.942'], '0.12': ['-4.799', '-2.905'], '0.18': ['-3.109', '-2.765']}, '0.04': {'0.02': ['-25.56', '-13.09'], '0.04': ['-13.11', '-13.11']}, '0.14': {'0.08': ['-7. 033', '-4.294'], '0.02': ['-25.64', '-4.295'], '0.06': ['-9.087', '-4.301'], '0.04': ['-13.20', '-4.301'], '0.14': ['-4.231', '-4.231'], '0.10': ['-5.773', '-4.279'], '0.12': ['-4.896', '-4.258']}, '0.16': {'0 .08': ['-7.019', '-3.777'], '0.02': ['-25.65', '-3.796'], '0.06': ['-9.081', '-3.790'], '0.04': ['-13.20', '-3.796'], '0.14': ['-4.198', '-3.697'], '0.16': ['-3.659', '-3.659'], '0.10': ['-5.752', '-3.756'], ' 0.12': ['-4.868', '-3.729']}, '0.10': {'0.02': ['-25.62', '-5.760'],
'0.08': ['-7.042', '-5.798'], '0.06': ['-9.080', '-5.791'], '0.10': ['-5.797', '-5.797'], '0.04': ['-13.18', '-5.777']}, '0.12': {'0.08': [' -7.041', '-4.941'], '0.02': ['-25.63', '-4.923'], '0.06': ['-9.087', '-4.941'], '0.04': ['-13.19', '-4.934'], '0.10': ['-5.789', '-4.933'], '0.12': ['-4.917', '-4.917']}, '0.18': {'0.08': ['-6.999', '-3.345'], '0.02': ['-25.65', '-3.381'], '0.06': ['-9.068', '-3.363'], '0.04': ['-13.19', '-3.375'], '0.14': ['-4.160', '-3.249'], '0.16': ['-3.616', '-3.207'], '0.10': ['-5.726', '-3.319'], '0.12': ['-4.836', '-3.287'], '0.18': ['-3.160', '-3.160']}, '0.38': {'0.08': ['-6.539', '-0.757'], '0.02': ['-25.37', '-0.940'], '0.20': ['-1.986', '-0.231'], '0.24': ['-1.202', '0.000'], '0.06': ['-8.670', '-0.824'], '0.04': ['-12.86', '-0.885'], '0.14': ['-3.536', '-0.522'], '0.16': ['-2.941', '-0.432'], '0.10': ['-5.207', '-0.684'], '0.12': ['-4.263', '-0.606'], '0.18': ['-2.434', '-0.335'], '0.22': ['-1.579', '-0.120']}, '0.36': {'0.08': ['-6.606', '-9.555'], '0.28': ['-0.642', '0.000'], '0.02': ['-25.42', '-1.123'], '0.20': ['-2.089', '-0.464'], '0.26': ['-0.974', '-0.129'], '0.24': ['-1.322', '-0.249'], '0.06': ['-8.731', '-1.017'], '0.04': ['-12.91', '-1.073'], '0.14': ['-3.620', '-0.736'], '0.16': ['-3.032', '-0.651'], '0.10': ['-5.280', '-0.887'], '0.12': ['-4.342', '-0.814'], '0.18': ['-2.531', '-0.561'], '0.22': ['-1.690', '-0.360']}, '0.34 ': {'0.08': ['-6.668', '-1.159'], '0.28': ['-0.771', '-0.269'], '0.02': ['-25.46', '-1.312'], '0.20': ['-2.186', '-0.701'], '0.26': ['-1.094', '-0.389'], '0.24': ['-1.433', '-0.500'], '0.06': ['-8.788', '-1.21 6'], '0.32': ['-0.152', '0.000'], '0.04': ['-12.96', '-1.267'], '0.30': ['-0.459', '-0.140'], '0.14': ['-3.699', '-0.955'], '0.16': ['-3.116', '-0.876'], '0.10': ['-5.348', '-1.096'], '0.12': ['-4.415', '-1.02 8'], '0.18': ['-2.621', '-0.791'], '0.22': ['-1.794', '-0.604']}, '0.32': {'0.08': ['-6.725', '-1.371'], '0.28': ['-0.891', '-0.541'], '0.02': ['-25.50', '-1.510'], '0.20': ['-2.277', '-0.944'], '0.26': ['-1.2 06', '-0.653'], '0.24': ['-1.537', '-0.756'], '0.06': ['-8.839', '-1.423'], '0.32': ['-0.292', '-0.292'], '0.04': ['-13.01', '-1.470'], '0.30': ['-0.588', '-0.421'], '0.14': ['-3.773', '-1.181'], '0.16': ['-3. 195', '-1.108'], '0.10': ['-5.411', '-1.313'], '0.12': ['-4.484', '-1.250'], '0.18': ['-2.706', '-1.028'], '0.22': ['-1.891', '-0.853']}, '0.30': {'0.08': ['-6.778', '-1.596'], '0.28': ['-1.002', '-0.819'], '0 .02': ['-25.54', '-1.718'], '0.20': ['-2.361', '-1.195'], '0.26': ['-1.309', '-0.923'], '0.24': ['-1.634', '-1.020'], '0.06': ['-8.886', '-1.641'], '0.04': ['-13.05', '-1.682'], '0.30': ['-0.708', '-0.708'], ' 0.14': ['-3.842', '-1.417'], '0.16': ['-3.269', '-1.348'], '0.10': ['-5.469', '-1.540'], '0.12': ['-4.547', '-1.481'], '0.18': ['-2.785', '-1.274'], '0.22': ['-1.981', '-1.110']}, '0.24': {'0.08': ['-6.910', ' -2.349'], '0.02': ['-25.61', '-2.431'], '0.20': ['-2.584', '-2.032'], '0.24': ['-1.888', '-1.888'], '0.06': ['-8.999', '-2.382'], '0.04': ['-23.14', '-2.410'], '0.14': ['-4.021', '-2.212'], '0.16': ['-3.463', '-2.157'], '0.10': ['-5.618', '-2.309'], '0.12': ['-4.711', '-2.263'], '0.18': ['-2.993', '-2.097'], '0.22': ['-2.220', '-1.963']}}
w1, w2 = 0., 0.
wstart, incr = 0.01, 0.02
if w1i < wstart:
w1 = '%4.2f' % (wstart + old_div(incr, 2.))
if w2i < wstart:
w2 = '%4.2f' % (wstart + old_div(incr, 2.))
wnext = wstart + incr
while wstart < 0.5:
if w1i >= wstart and w1i < wnext:
w1 = '%4.2f' % (wstart + old_div(incr, 2.))
if w2i >= wstart and w2i < wnext:
w2 = '%4.2f' % (wstart + old_div(incr, 2.))
wstart += incr
wnext += incr
k1, k2 = float(K[w2][w1][0]), float(K[w2][w1][1])
return k1, k2 | 0.000293 |
def get_next_appointment(self, appointment_group_ids=None):
"""
Get next appointment.
Return the next appointment available to sign up for. The appointment
is returned in a one-element array. If no future appointments are
available, an empty array is returned.
"""
path = {}
data = {}
params = {}
# OPTIONAL - appointment_group_ids
"""List of ids of appointment groups to search."""
if appointment_group_ids is not None:
params["appointment_group_ids"] = appointment_group_ids
self.logger.debug("GET /api/v1/appointment_groups/next_appointment with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/appointment_groups/next_appointment".format(**path), data=data, params=params, all_pages=True) | 0.004348 |
def factory(
cls, file_id=None, path=None, url=None, blob=None, mime=None,
prefer_local_download=True, prefer_str=False, create_instance=True
):
"""
Creates a new InputFile subclass instance fitting the given parameters.
:param prefer_local_download: If `True`, we download the file and send it to telegram. This is the default.
If `False`, we send Telegram just the URL, and they'll try to download it.
:type prefer_local_download: bool
:param prefer_str: Return just the `str` instead of a `InputFileUseFileID` or `InputFileUseUrl` object.
:type prefer_str: bool
:param create_instance: If we should return a instance ready to use (default),
or the building parts being a tuple of `(class, args_tuple, kwargs_dict)`.
Setting this to `False` is probably only ever required for internal usage
by the :class:`InputFile` constructor which uses this very factory.
:type create_instance: bool
:returns: if `create_instance=True` it returns a instance of some InputFile subclass or a string,
if `create_instance=False` it returns a tuple of the needed class, args and kwargs needed
to create a instance.
:rtype: InputFile|InputFileFromBlob|InputFileFromDisk|InputFileFromURL|str|tuple
"""
if create_instance:
clazz, args, kwargs = cls.factory(
file_id=file_id,
path=path,
url=url,
blob=blob,
mime=mime,
create_instance=False,
)
return clazz(*args, **kwargs)
if file_id:
if prefer_str:
assert_type_or_raise(file_id, str, parameter_name='file_id')
return str, (file_id,), dict()
# end if
return InputFileUseFileID, (file_id,), dict()
if blob:
name = "file"
suffix = ".blob"
if path:
name = os_path.basename(os_path.normpath(path)) # http://stackoverflow.com/a/3925147/3423324#last-part
name, suffix = os_path.splitext(name) # http://stackoverflow.com/a/541394/3423324#extension
elif url:
# http://stackoverflow.com/a/18727481/3423324#how-to-extract-a-filename-from-a-url
url = urlparse(url)
name = os_path.basename(url.path)
name, suffix = os_path.splitext(name)
# end if
if mime:
import mimetypes
suffix = mimetypes.guess_extension(mime)
suffix = '.jpg' if suffix == '.jpe' else suffix # .jpe -> .jpg
# end if
if not suffix or not suffix.strip().lstrip("."):
logger.debug("suffix was empty. Using '.blob'")
suffix = ".blob"
# end if
name = "{filename}{suffix}".format(filename=name, suffix=suffix)
return InputFileFromBlob, (blob,), dict(name=name, mime=mime)
if path:
return InputFileFromDisk, (path,), dict(mime=mime)
if url:
if prefer_local_download:
return InputFileFromURL, (url,), dict(mime=mime)
# end if
# else -> so we wanna let telegram handle it
if prefer_str:
assert_type_or_raise(url, str, parameter_name='url')
return str, (url,), dict()
# end if
return InputFileUseUrl, (url,), dict()
# end if
raise ValueError('Could not find a matching subclass. You might need to do it manually instead.') | 0.004217 |
def filter_creation_date(groups, start, end):
"""Filter log groups by their creation date.
Also sets group specific value for start to the minimum
of creation date or start.
"""
results = []
for g in groups:
created = datetime.fromtimestamp(g['creationTime'] / 1000.0)
if created > end:
continue
if created > start:
g['exportStart'] = created
else:
g['exportStart'] = start
results.append(g)
return results | 0.001957 |
def loadSchema(uri, base_uri=None):
"""Load an XSD XML document (specified by filename or URL), and return a
:class:`lxml.etree.XMLSchema`.
"""
# uri to use for reporting errors - include base uri if any
if uri in _loaded_schemas:
return _loaded_schemas[uri]
error_uri = uri
if base_uri is not None:
error_uri += ' (base URI %s)' % base_uri
try:
logger.debug('Loading schema %s' % uri)
_loaded_schemas[uri] = etree.XMLSchema(etree.parse(uri,
parser=_get_xmlparser(),
base_url=base_uri))
return _loaded_schemas[uri]
except IOError as io_err:
# add a little more detail to the error message - but should still be an IO error
raise IOError('Failed to load schema %s : %s' % (error_uri, io_err))
except etree.XMLSchemaParseError as parse_err:
# re-raise as a schema parse error, but ensure includes details about schema being loaded
raise etree.XMLSchemaParseError('Failed to parse schema %s -- %s' % (error_uri, parse_err)) | 0.005199 |
def _get_substitute_element(head, elt, ps):
'''if elt matches a member of the head substitutionGroup, return
the GED typecode.
head -- ElementDeclaration typecode,
elt -- the DOM element being parsed
ps -- ParsedSoap Instance
'''
if not isinstance(head, ElementDeclaration):
return None
return ElementDeclaration.getSubstitutionElement(head, elt, ps) | 0.007614 |
def model_tree(name, model_cls, visited=None):
"""Create a simple tree of model's properties and its related models.
It traverse trough relations, but ignore any loops.
:param name: name of the model
:type name: str
:param model_cls: model class
:param visited: set of visited models
:type visited: list or None
:return: a dictionary where values are lists of string or other \
dictionaries
"""
if not visited:
visited = set()
visited.add(model_cls)
mapper = class_mapper(model_cls)
columns = [column.key for column in mapper.column_attrs]
related = [model_tree(rel.key, rel.mapper.entity, visited)
for rel in mapper.relationships if rel.mapper.entity not in visited]
return {name: columns + related} | 0.002525 |
def satisfaction_ratings_list(self, score=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/satisfaction_ratings#list-satisfaction-ratings"
api_path = "/api/v2/satisfaction_ratings.json"
api_query = {}
if "query" in kwargs.keys():
api_query.update(kwargs["query"])
del kwargs["query"]
if score:
api_query.update({
"score": score,
})
return self.call(api_path, query=api_query, **kwargs) | 0.005825 |
def define_selector(by, value, el_class):
"""
:param by:
:param value:
:param el_class:
:rtype: tuple[type, str|tuple[str, str]]
:return:
"""
el = el_class
selector = by
if isinstance(value, six.string_types):
selector = (by, value)
elif value is not None:
el = value
if el is None:
el = elements.PageElement
return el, selector | 0.002463 |
def _convert_schemas(mapping, schemas):
"""Convert schemas to be compatible with storage schemas.
Foreign keys related operations.
Args:
mapping (dict): mapping between resource name and table name
schemas (list): schemas
Raises:
ValueError: if there is no resource
for some foreign key in given mapping
Returns:
list: converted schemas
"""
schemas = deepcopy(schemas)
for schema in schemas:
for fk in schema.get('foreignKeys', []):
resource = fk['reference']['resource']
if resource != 'self':
if resource not in mapping:
message = 'Not resource "%s" for foreign key "%s"'
message = message % (resource, fk)
raise ValueError(message)
fk['reference']['resource'] = mapping[resource]
return schemas | 0.001104 |
def in6_getRandomizedIfaceId(ifaceid, previous=None):
"""
Implements the interface ID generation algorithm described in RFC 3041.
The function takes the Modified EUI-64 interface identifier generated
as described in RFC 4291 and an optional previous history value (the
first element of the output of this function). If no previous interface
identifier is provided, a random one is generated. The function returns
a tuple containing the randomized interface identifier and the history
value (for possible future use). Input and output values are provided in
a "printable" format as depicted below.
ex:
>>> in6_getRandomizedIfaceId('20b:93ff:feeb:2d3')
('4c61:76ff:f46a:a5f3', 'd006:d540:db11:b092')
>>> in6_getRandomizedIfaceId('20b:93ff:feeb:2d3',
previous='d006:d540:db11:b092')
('fe97:46fe:9871:bd38', 'eeed:d79c:2e3f:62e')
"""
s = []
if previous is None:
#d = b"".join(map(chr, range(256)))
d = list(range(256))
for i in range(8):
s.append(random.choice(d))
s = bytes(s)
previous = s
s = inet_pton(socket.AF_INET6, "::"+ifaceid)[8:] + previous
import hashlib
s = hashlib.md5(s).digest()
s1,s2 = s[:8],s[8:]
s1 = bytes([(s1[0]) | 0x04]) + s1[1:]
s1 = inet_ntop(socket.AF_INET6, b"\xff"*8 + s1)[20:]
s2 = inet_ntop(socket.AF_INET6, b"\xff"*8 + s2)[20:]
return (s1, s2) | 0.005446 |
def _get(self, *args, **kwargs):
"""
A wrapper for getting things
:returns: The response of your get
:rtype: dict
"""
response = requests.get(*args, **kwargs)
response.raise_for_status()
return response.json() | 0.007246 |
def atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=None ):
"""
What's the zonefile inventory vector for this peer?
Return None if not defined
"""
inv = None
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return None
inv = ptbl[peer_hostport]['zonefile_inv']
return inv | 0.007937 |
def _set_node_output(self, node_id, no_call, next_nds=None, **kw):
"""
Set the node outputs from node inputs.
:param node_id:
Data or function node id.
:type node_id: str
:param no_call:
If True data node estimation function is not used.
:type no_call: bool
:return:
If the output have been evaluated correctly.
:rtype: bool
"""
# Namespace shortcuts.
node_attr = self.nodes[node_id]
node_type = node_attr['type']
if node_type == 'data': # Set data node.
return self._set_data_node_output(node_id, node_attr, no_call,
next_nds, **kw)
elif node_type == 'function': # Set function node.
return self._set_function_node_output(node_id, node_attr, no_call,
next_nds, **kw) | 0.002121 |
def AddSlur(self, item):
'''
Very simple method which is used for adding slurs.
:param item:
:return:
'''
if not hasattr(self, "slurs"):
self.slurs = []
self.slurs.append(item) | 0.008197 |
def get_structure_with_charges(self, structure_filename):
"""
get a Structure with Mulliken and Loewdin charges as site properties
Args:
structure_filename: filename of POSCAR
Returns:
Structure Object with Mulliken and Loewdin charges as site properties
"""
struct = Structure.from_file(structure_filename)
Mulliken = self.Mulliken
Loewdin = self.Loewdin
site_properties = {"Mulliken Charges": Mulliken, "Loewdin Charges": Loewdin}
new_struct = struct.copy(site_properties=site_properties)
return new_struct | 0.006441 |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(HBaseCollector, self).get_default_config()
config.update({
'path': 'hbase',
'metrics': ['/var/log/hbase/*.metrics'],
})
return config | 0.006452 |
def get_client_unread_messages_count(self, client_name=None):
"""Gets count of unread messages from client
"""
client = self._clients.get_with_name(client_name)[0]
return client.get_messages_count_in_buffer() | 0.008333 |
def iso_date_to_datetime(string):
"""
>>> iso_date_to_datetime('2013-12-26T10:11:12Z')
datetime.datetime(2013, 12, 26, 10, 11, 12)
>>> iso_date_to_datetime('2013-12-26T10:11:12.456789Z')
datetime.datetime(2013, 12, 26, 10, 11, 12, 456789)
>>> iso_date_to_datetime('2013-12-26T10:11:12.30Z')
datetime.datetime(2013, 12, 26, 10, 11, 12, 300000)
>>> iso_date_to_datetime('2013-12-26T10:11:12.00001Z')
datetime.datetime(2013, 12, 26, 10, 11, 12, 10)
>>> iso_date_to_datetime('2013-12-26T10:11:12.000001Z')
datetime.datetime(2013, 12, 26, 10, 11, 12, 1)
>>> iso_date_to_datetime('2013-12-26T10:11:12.0000001Z')
datetime.datetime(2013, 12, 26, 10, 11, 12)
>>> iso_date_to_datetime('2013-12-26T10:11:12.000000Z')
datetime.datetime(2013, 12, 26, 10, 11, 12)
"""
nums = DATE_TIMESPLIT.split(string)
if nums[-1] == '':
nums = nums[:-1]
if len(nums) == 7:
nums[6] = nums[6][:6]
nums[6] += PAD_MICRO[len(nums[6]):]
the_datetime = datetime.datetime(*(int(num) for num in nums))
return the_datetime | 0.000908 |
def verify(opts):
"""
Verify that one or more resources were downloaded successfully.
"""
resources = _load(opts.resources, opts.output_dir)
if opts.all:
opts.resource_names = ALL
invalid = _invalid(resources, opts.resource_names)
if not invalid:
if not opts.quiet:
print("All resources successfully downloaded")
return 0
else:
if not opts.quiet:
print("Invalid or missing resources: {}".format(', '.join(invalid)))
return 1 | 0.003846 |
def add_from_depend(self, node, from_module):
"""add dependencies created by from-imports
"""
mod_name = node.root().name
obj = self.module(mod_name)
if from_module not in obj.node.depends:
obj.node.depends.append(from_module) | 0.007194 |
def run(image_id, name=None, tags=None, key_name=None, security_groups=None,
user_data=None, instance_type='m1.small', placement=None,
kernel_id=None, ramdisk_id=None, monitoring_enabled=None, vpc_id=None,
vpc_name=None, subnet_id=None, subnet_name=None, private_ip_address=None,
block_device_map=None, disable_api_termination=None,
instance_initiated_shutdown_behavior=None, placement_group=None,
client_token=None, security_group_ids=None, security_group_names=None,
additional_info=None, tenancy=None, instance_profile_arn=None,
instance_profile_name=None, ebs_optimized=None,
network_interface_id=None, network_interface_name=None,
region=None, key=None, keyid=None, profile=None, network_interfaces=None):
#TODO: support multi-instance reservations
'''
Create and start an EC2 instance.
Returns True if the instance was created; otherwise False.
CLI Example:
.. code-block:: bash
salt myminion boto_ec2.run ami-b80c2b87 name=myinstance
image_id
(string) – The ID of the image to run.
name
(string) - The name of the instance.
tags
(dict of key: value pairs) - tags to apply to the instance.
key_name
(string) – The name of the key pair with which to launch instances.
security_groups
(list of strings) – The names of the EC2 classic security groups with
which to associate instances
user_data
(string) – The Base64-encoded MIME user data to be made available to the
instance(s) in this reservation.
instance_type
(string) – The type of instance to run. Note that some image types
(e.g. hvm) only run on some instance types.
placement
(string) – The Availability Zone to launch the instance into.
kernel_id
(string) – The ID of the kernel with which to launch the instances.
ramdisk_id
(string) – The ID of the RAM disk with which to launch the instances.
monitoring_enabled
(bool) – Enable detailed CloudWatch monitoring on the instance.
vpc_id
(string) - ID of a VPC to bind the instance to. Exclusive with vpc_name.
vpc_name
(string) - Name of a VPC to bind the instance to. Exclusive with vpc_id.
subnet_id
(string) – The subnet ID within which to launch the instances for VPC.
subnet_name
(string) – The name of a subnet within which to launch the instances for VPC.
private_ip_address
(string) – If you’re using VPC, you can optionally use this parameter to
assign the instance a specific available IP address from the subnet
(e.g. 10.0.0.25).
block_device_map
(boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping
data structure describing the EBS volumes associated with the Image.
(string) - A string representation of a BlockDeviceMapping structure
(dict) - A dict describing a BlockDeviceMapping structure
YAML example:
.. code-block:: yaml
device-maps:
/dev/sdb:
ephemeral_name: ephemeral0
/dev/sdc:
ephemeral_name: ephemeral1
/dev/sdd:
ephemeral_name: ephemeral2
/dev/sde:
ephemeral_name: ephemeral3
/dev/sdf:
size: 20
volume_type: gp2
disable_api_termination
(bool) – If True, the instances will be locked and will not be able to
be terminated via the API.
instance_initiated_shutdown_behavior
(string) – Specifies whether the instance stops or terminates on
instance-initiated shutdown. Valid values are: stop, terminate
placement_group
(string) – If specified, this is the name of the placement group in
which the instance(s) will be launched.
client_token
(string) – Unique, case-sensitive identifier you provide to ensure
idempotency of the request. Maximum 64 ASCII characters.
security_group_ids
(list of strings) – The ID(s) of the VPC security groups with which to
associate instances.
security_group_names
(list of strings) – The name(s) of the VPC security groups with which to
associate instances.
additional_info
(string) – Specifies additional information to make available to the
instance(s).
tenancy
(string) – The tenancy of the instance you want to launch. An instance
with a tenancy of ‘dedicated’ runs on single-tenant hardware and can
only be launched into a VPC. Valid values are:”default” or “dedicated”.
NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well.
instance_profile_arn
(string) – The Amazon resource name (ARN) of the IAM Instance Profile
(IIP) to associate with the instances.
instance_profile_name
(string) – The name of the IAM Instance Profile (IIP) to associate with
the instances.
ebs_optimized
(bool) – Whether the instance is optimized for EBS I/O. This
optimization provides dedicated throughput to Amazon EBS and an
optimized configuration stack to provide optimal EBS I/O performance.
This optimization isn’t available with all instance types.
network_interfaces
(boto.ec2.networkinterface.NetworkInterfaceCollection) – A
NetworkInterfaceCollection data structure containing the ENI
specifications for the instance.
network_interface_id
(string) - ID of the network interface to attach to the instance
network_interface_name
(string) - Name of the network interface to attach to the instance
'''
if all((subnet_id, subnet_name)):
raise SaltInvocationError('Only one of subnet_name or subnet_id may be '
'provided.')
if subnet_name:
r = __salt__['boto_vpc.get_resource_id']('subnet', subnet_name,
region=region, key=key,
keyid=keyid, profile=profile)
if 'id' not in r:
log.warning('Couldn\'t resolve subnet name %s.', subnet_name)
return False
subnet_id = r['id']
if all((security_group_ids, security_group_names)):
raise SaltInvocationError('Only one of security_group_ids or '
'security_group_names may be provided.')
if security_group_names:
security_group_ids = []
for sgn in security_group_names:
r = __salt__['boto_secgroup.get_group_id'](sgn, vpc_name=vpc_name,
region=region, key=key,
keyid=keyid, profile=profile)
if not r:
log.warning('Couldn\'t resolve security group name %s', sgn)
return False
security_group_ids += [r]
network_interface_args = list(map(int, [network_interface_id is not None,
network_interface_name is not None,
network_interfaces is not None]))
if sum(network_interface_args) > 1:
raise SaltInvocationError('Only one of network_interface_id, '
'network_interface_name or '
'network_interfaces may be provided.')
if network_interface_name:
result = get_network_interface_id(network_interface_name,
region=region, key=key,
keyid=keyid,
profile=profile)
network_interface_id = result['result']
if not network_interface_id:
log.warning(
"Given network_interface_name '%s' cannot be mapped to an "
"network_interface_id", network_interface_name
)
if network_interface_id:
interface = NetworkInterfaceSpecification(
network_interface_id=network_interface_id,
device_index=0)
else:
interface = NetworkInterfaceSpecification(
subnet_id=subnet_id,
groups=security_group_ids,
device_index=0)
if network_interfaces:
interfaces_specs = [NetworkInterfaceSpecification(**x) for x in network_interfaces]
interfaces = NetworkInterfaceCollection(*interfaces_specs)
else:
interfaces = NetworkInterfaceCollection(interface)
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
reservation = conn.run_instances(image_id, key_name=key_name, security_groups=security_groups,
user_data=user_data, instance_type=instance_type,
placement=placement, kernel_id=kernel_id, ramdisk_id=ramdisk_id,
monitoring_enabled=monitoring_enabled,
private_ip_address=private_ip_address,
block_device_map=_to_blockdev_map(block_device_map),
disable_api_termination=disable_api_termination,
instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior,
placement_group=placement_group, client_token=client_token,
additional_info=additional_info,
tenancy=tenancy, instance_profile_arn=instance_profile_arn,
instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized,
network_interfaces=interfaces)
if not reservation:
log.warning('Instance could not be reserved')
return False
instance = reservation.instances[0]
status = 'pending'
while status == 'pending':
time.sleep(5)
status = instance.update()
if status == 'running':
if name:
instance.add_tag('Name', name)
if tags:
instance.add_tags(tags)
return {'instance_id': instance.id}
else:
log.warning(
'Instance could not be started -- status is "%s"',
status
) | 0.002188 |
def _runargs(argstring):
""" Entrypoint for debugging
"""
import shlex
parser = cli.make_arg_parser()
args = parser.parse_args(shlex.split(argstring))
run(args) | 0.005435 |
def subscriber_has_active_subscription(subscriber, plan=None):
"""
Helper function to check if a subscriber has an active subscription.
Throws improperlyConfigured if the subscriber is an instance of AUTH_USER_MODEL
and get_user_model().is_anonymous == True.
Activate subscription rules (or):
* customer has active subscription
If the subscriber is an instance of AUTH_USER_MODEL, active subscription rules (or):
* customer has active subscription
* user.is_superuser
* user.is_staff
:param subscriber: The subscriber for which to check for an active subscription.
:type subscriber: dj-stripe subscriber
:param plan: The plan for which to check for an active subscription. If plan is None and
there exists only one subscription, this method will check if that subscription
is active. Calling this method with no plan and multiple subscriptions will throw
an exception.
:type plan: Plan or string (plan ID)
"""
if isinstance(subscriber, AnonymousUser):
raise ImproperlyConfigured(ANONYMOUS_USER_ERROR_MSG)
if isinstance(subscriber, get_user_model()):
if subscriber.is_superuser or subscriber.is_staff:
return True
from .models import Customer
customer, created = Customer.get_or_create(subscriber)
if created or not customer.has_active_subscription(plan):
return False
return True | 0.026276 |
def main(port, ip, command, loglevel):
"""Console script for satel_integra."""
numeric_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=numeric_level)
click.echo("Demo of satel_integra library")
if command == "demo":
demo(ip, port) | 0.005013 |
def reporter(self):
"""
Creates a report of the results
"""
# Create a set of all the gene names without alleles or accessions e.g. sul1_18_AY260546 becomes sul1
genedict = dict()
# Load the notes file to a dictionary
notefile = os.path.join(self.targetpath, 'notes.txt')
with open(notefile, 'r') as notes:
for line in notes:
# Ignore comment lines - they will break the parsing
if line.startswith('#'):
continue
# Split the line on colons e.g. stx1Aa: Shiga toxin 1, subunit A, variant a: has three variables after
# the split: gene(stx1Aa), description(Shiga toxin 1, subunit A, variant a), and _(\n)
try:
gene, description, _ = line.split(':')
# There are exceptions to the parsing. Some lines only have one :, while others have three. Allow for
# these possibilities.
except ValueError:
try:
gene, description = line.split(':')
except ValueError:
gene, description, _, _ = line.split(':')
# Set up the description dictionary
genedict[gene] = description.replace(', ', '_').strip()
# Find unique gene names with the highest percent identity
for sample in self.runmetadata.samples:
try:
if sample[self.analysistype].results:
# Initialise a dictionary to store the unique genes, and their percent identities
sample[self.analysistype].uniquegenes = dict()
for name, identity in sample[self.analysistype].results.items():
# Split the name of the gene from the string e.g. stx1:11:Z36899:11 yields stx1
if ':' in name:
sample[self.analysistype].delimiter = ':'
else:
sample[self.analysistype].delimiter = '_'
genename = name.split(sample[self.analysistype].delimiter)[0]
# Set the best observed percent identity for each unique gene
try:
# Pull the previous best identity from the dictionary
bestidentity = sample[self.analysistype].uniquegenes[genename]
# If the current identity is better than the old identity, save it
if float(identity) > float(bestidentity):
sample[self.analysistype].uniquegenes[genename] = float(identity)
# Initialise the dictionary if necessary
except KeyError:
sample[self.analysistype].uniquegenes[genename] = float(identity)
except AttributeError:
raise
# Create the path in which the reports are stored
make_path(self.reportpath)
# Initialise strings to store the results
data = 'Strain,Gene,Subtype/Allele,Description,Accession,PercentIdentity,FoldCoverage\n'
with open(os.path.join(self.reportpath, self.analysistype + '.csv'), 'w') as report:
for sample in self.runmetadata.samples:
try:
if sample[self.analysistype].results:
# If there are many results for a sample, don't write the sample name in each line of the report
for name, identity in sorted(sample[self.analysistype].results.items()):
# Check to see which delimiter is used to separate the gene name, allele, accession, and
# subtype information in the header
if len(name.split(sample[self.analysistype].delimiter)) == 4:
# Split the name on the delimiter: stx2A:63:AF500190:d; gene: stx2A, allele: 63,
# accession: AF500190, subtype: d
genename, allele, accession, subtype = name.split(sample[self.analysistype].delimiter)
elif len(name.split(sample[self.analysistype].delimiter)) == 3:
# Treat samples without a subtype e.g. icaC:intercellular adhesion protein C: differently.
# Extract the allele as the 'subtype', and the gene name, and accession as above
genename, subtype, accession = name.split(sample[self.analysistype].delimiter)
else:
genename = name
subtype = ''
accession = ''
# Retrieve the best identity for each gene
percentid = sample[self.analysistype].uniquegenes[genename]
# If the percent identity of the current gene matches the best percent identity, add it to
# the report - there can be multiple occurrences of genes e.g.
# sul1,1,AY224185,100.00,840 and sul1,2,CP002151,100.00,927 are both included because they
# have the same 100% percent identity
if float(identity) == percentid:
# Treat the initial vs subsequent results for each sample slightly differently - instead
# of including the sample name, use an empty cell instead
try:
description = genedict[genename]
except KeyError:
description = 'na'
# Populate the results
data += '{samplename},{gene},{subtype},{description},{accession},{identity},{depth}\n'\
.format(samplename=sample.name,
gene=genename,
subtype=subtype,
description=description,
accession=accession,
identity=identity,
depth=sample[self.analysistype].avgdepth[name])
else:
data += sample.name + '\n'
except (KeyError, AttributeError):
data += sample.name + '\n'
# Write the strings to the file
report.write(data) | 0.005295 |
def add_new_grid_headers(self, new_headers, er_items, pmag_items):
"""
Add in all user-added headers.
If those new headers depend on other headers, add the other headers too.
"""
def add_pmag_reqd_headers():
if self.grid_type == 'result':
return []
add_in = []
col_labels = self.grid.col_labels
for reqd_head in self.grid_headers[self.grid_type]['pmag'][1]:
if reqd_head in self.er_magic.double:
if reqd_head + "++" not in col_labels:
add_in.append(reqd_head + "++")
else:
if reqd_head not in col_labels:
add_in.append(reqd_head)
add_in = builder.remove_list_headers(add_in)
return add_in
#
already_present = []
for name in new_headers:
if name:
if name not in self.grid.col_labels:
col_number = self.grid.add_col(name)
# add to appropriate headers list
if name in er_items:
self.grid_headers[self.grid_type]['er'][0].append(str(name))
if name in pmag_items:
name = name.strip('++')
if name not in self.grid_headers[self.grid_type]['pmag'][0]:
self.grid_headers[self.grid_type]['pmag'][0].append(str(name))
# add any required pmag headers that are not in the grid already
for header in add_pmag_reqd_headers():
col_number = self.grid.add_col(header)
# add drop_down_menus for added reqd columns
if header in vocab.possible_vocabularies:
self.drop_down_menu.add_drop_down(col_number, name)
if header in ['magic_method_codes++']:
self.drop_down_menu.add_method_drop_down(col_number, header)
# add drop down menus for user-added column
if name in vocab.possible_vocabularies:
self.drop_down_menu.add_drop_down(col_number, name)
if name in ['magic_method_codes', 'magic_method_codes++']:
self.drop_down_menu.add_method_drop_down(col_number, name)
else:
already_present.append(name)
#pw.simple_warning('You are already using column header: {}'.format(name))
return already_present | 0.004842 |
def ReadBytes(self, address, num_bytes):
"""Reads at most num_bytes starting from offset <address>."""
address = int(address)
buf = ctypes.create_string_buffer(num_bytes)
bytesread = ctypes.c_size_t(0)
res = ReadProcessMemory(self.h_process, address, buf, num_bytes,
ctypes.byref(bytesread))
if res == 0:
err = wintypes.GetLastError()
if err == 299:
# Only part of ReadProcessMemory has been done, let's return it.
return buf.raw[:bytesread.value]
raise process_error.ProcessError("Error in ReadProcessMemory: %d" % err)
return buf.raw[:bytesread.value] | 0.006211 |
def addORFs(fig, seq, minX, maxX, offsetAdjuster):
"""
fig is a matplotlib figure.
seq is a Bio.Seq.Seq.
minX: the smallest x coordinate.
maxX: the largest x coordinate.
featureEndpoints: an array of features as returned by addFeatures (may be
empty).
offsetAdjuster: a function to adjust feature X axis offsets for plotting.
"""
for frame in range(3):
target = seq[frame:]
for (codons, codonType, color) in (
(START_CODONS, 'start', 'green'),
(STOP_CODONS, 'stop', 'red')):
offsets = list(map(offsetAdjuster, findCodons(target, codons)))
if offsets:
fig.plot(offsets, np.tile(frame, len(offsets)), marker='.',
markersize=4, color=color, linestyle='None')
fig.axis([minX, maxX, -1, 3])
fig.set_yticks(np.arange(3))
fig.set_ylabel('Frame', fontsize=17)
fig.set_title('Subject start (%s) and stop (%s) codons' % (
', '.join(sorted(START_CODONS)), ', '.join(sorted(STOP_CODONS))),
fontsize=20) | 0.000926 |
def validate_header(header, required_fields=None):
'''validate_header ensures that the first row contains the exp_id,
var_name, var_value, and token. Capitalization isn't important, but
ordering is. This criteria is very strict, but it's reasonable
to require.
Parameters
==========
header: the header row, as a list
required_fields: a list of required fields. We derive the required
length from this list.
Does not return, instead exits if malformed. Runs silently if OK.
'''
if required_fields is None:
required_fields = ['exp_id', 'var_name', 'var_value', 'token']
# The required length of the header based on required fields
length = len(required_fields)
# This is very strict, but no reason not to be
header = _validate_row(header, required_length=length)
header = [x.lower() for x in header]
for idx in range(length):
field = header[idx].lower().strip()
if required_fields[idx] != field:
bot.error('Malformed header field %s, exiting.' %field)
sys.exit(1) | 0.003524 |
def assert_series_equal(left, right, data_function=None, data_args=None):
"""
For unit testing equality of two Series.
:param left: first Series
:param right: second Series
:param data_function: if provided will use this function to assert compare the df.data
:param data_args: arguments to pass to the data_function
:return: nothing
"""
assert type(left) == type(right)
if data_function:
data_args = {} if not data_args else data_args
data_function(left.data, right.data, **data_args)
else:
assert left.data == right.data
assert left.index == right.index
assert left.data_name == right.data_name
assert left.index_name == right.index_name
assert left.sort == right.sort
if isinstance(left, rc.ViewSeries):
assert left.offset == right.offset
if isinstance(left, rc.Series):
assert left.blist == right.blist | 0.002186 |
def _advance(self):
""" Return the value of the current token and read the next one into
self.cur_token.
"""
cur_val = None if self.cur_token is None else self.cur_token.value
try:
self.cur_token = next(self._tokenizer)
except StopIteration:
self.cur_token = None
return cur_val | 0.005525 |
def add_reorganize_data(self, name, input_name, output_name, mode = 'SPACE_TO_DEPTH', block_size = 2):
"""
Add a data reorganization layer of type "SPACE_TO_DEPTH" or "DEPTH_TO_SPACE".
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
mode: str
- If mode == 'SPACE_TO_DEPTH': data is moved from the spatial to the channel dimension.
Input is spatially divided into non-overlapping blocks of size block_size X block_size
and data from each block is moved to the channel dimension.
Output CHW dimensions are: [C * block_size * block_size, H/block_size, C/block_size].
- If mode == 'DEPTH_TO_SPACE': data is moved from the channel to the spatial dimension.
Reverse of the operation 'SPACE_TO_DEPTH'.
Output CHW dimensions are: [C/(block_size * block_size), H * block_size, C * block_size].
block_size: int
Must be greater than 1. Must divide H and W, when mode is 'SPACE_TO_DEPTH'. (block_size * block_size)
must divide C when mode is 'DEPTH_TO_SPACE'.
See Also
--------
add_flatten, add_reshape
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.reorganizeData
# Set the parameters
if block_size < 2:
raise ValueError("Invalid block_size value %d. Must be greater than 1." % block_size)
spec_layer_params.blockSize = block_size
if mode == 'SPACE_TO_DEPTH':
spec_layer_params.mode = \
_NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value('SPACE_TO_DEPTH')
elif mode == 'DEPTH_TO_SPACE':
spec_layer_params.mode = \
_NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value('DEPTH_TO_SPACE')
else:
raise NotImplementedError(
'Unknown reorganization mode %s ' % mode) | 0.007191 |
def _property_create_dict(header, data):
'''
Create a property dict
'''
prop = dict(zip(header, _merge_last(data, len(header))))
prop['name'] = _property_normalize_name(prop['property'])
prop['type'] = _property_detect_type(prop['name'], prop['values'])
prop['edit'] = from_bool(prop['edit'])
if 'inherit' in prop:
prop['inherit'] = from_bool(prop['inherit'])
del prop['property']
return prop | 0.002273 |
def sample(a=None, temperature=1.0):
"""Sample an index from a probability array.
Parameters
----------
a : list of float
List of probabilities.
temperature : float or None
The higher the more uniform. When a = [0.1, 0.2, 0.7],
- temperature = 0.7, the distribution will be sharpen [0.05048273, 0.13588945, 0.81362782]
- temperature = 1.0, the distribution will be the same [0.1, 0.2, 0.7]
- temperature = 1.5, the distribution will be filtered [0.16008435, 0.25411807, 0.58579758]
- If None, it will be ``np.argmax(a)``
Notes
------
- No matter what is the temperature and input list, the sum of all probabilities will be one. Even if input list = [1, 100, 200], the sum of all probabilities will still be one.
- For large vocabulary size, choice a higher temperature or ``tl.nlp.sample_top`` to avoid error.
"""
if a is None:
raise Exception("a : list of float")
b = np.copy(a)
try:
if temperature == 1:
return np.argmax(np.random.multinomial(1, a, 1))
if temperature is None:
return np.argmax(a)
else:
a = np.log(a) / temperature
a = np.exp(a) / np.sum(np.exp(a))
return np.argmax(np.random.multinomial(1, a, 1))
except Exception:
# np.set_printoptions(threshold=np.nan)
# tl.logging.info(a)
# tl.logging.info(np.sum(a))
# tl.logging.info(np.max(a))
# tl.logging.info(np.min(a))
# exit()
message = "For large vocabulary_size, choice a higher temperature\
to avoid log error. Hint : use ``sample_top``. "
warnings.warn(message, Warning)
# tl.logging.info(a)
# tl.logging.info(b)
return np.argmax(np.random.multinomial(1, b, 1)) | 0.00324 |
def find_id(self, element_id):
"""Find a single element with the given ID.
Parameters
----------
element_id : str
ID of the element to find
Returns
-------
found element
"""
element = _transform.FigureElement.find_id(self, element_id)
return Element(element.root) | 0.005602 |
def search(self, search_phrase, limit=None):
""" Finds datasets by search phrase.
Args:
search_phrase (str or unicode):
limit (int, optional): how many results to return. None means without limit.
Returns:
list of DatasetSearchResult instances.
"""
query_string = self._make_query_from_terms(search_phrase)
self._parsed_query = query_string
schema = self._get_generic_schema()
parser = QueryParser('doc', schema=schema)
query = parser.parse(query_string)
datasets = defaultdict(DatasetSearchResult)
# collect all datasets
logger.debug('Searching datasets using `{}` query.'.format(query))
with self.index.searcher() as searcher:
results = searcher.search(query, limit=limit)
for hit in results:
vid = hit['vid']
datasets[vid].vid = hit['vid']
datasets[vid].b_score += hit.score
# extend datasets with partitions
logger.debug('Extending datasets with partitions.')
for partition in self.backend.partition_index.search(search_phrase):
datasets[partition.dataset_vid].p_score += partition.score
datasets[partition.dataset_vid].partitions.add(partition)
return list(datasets.values()) | 0.002212 |
def stats(args):
"""Create stats from the analysis
"""
logger.info("Reading sequeces")
data = parse_ma_file(args.ma)
logger.info("Get sequences from sam")
is_align = _read_sam(args.sam)
is_json, is_db = _read_json(args.json)
res = _summarise_sam(data, is_align, is_json, is_db)
_write_suma(res, os.path.join(args.out, "stats_align.dat"))
logger.info("Done") | 0.002519 |
def _start_callables(self, row, callables):
"""Start running `callables` asynchronously.
"""
id_vals = {c: row[c] for c in self.ids}
def callback(tab, cols, result):
if isinstance(result, Mapping):
pass
elif isinstance(result, tuple):
result = dict(zip(cols, result))
elif len(cols) == 1:
# Don't bother raising an exception if cols != 1
# because it would be lost in the thread.
result = {cols[0]: result}
result.update(id_vals)
tab._write(result)
if self._pool is None:
self._pool = Pool()
if self._lock is None:
self._lock = multiprocessing.Lock()
for cols, fn in callables:
cb_func = partial(callback, self, cols)
gen = None
if inspect.isgeneratorfunction(fn):
gen = fn()
elif inspect.isgenerator(fn):
gen = fn
if gen:
def callback_for_each():
for i in gen:
cb_func(i)
self._pool.apply_async(callback_for_each)
else:
self._pool.apply_async(fn, callback=cb_func) | 0.001555 |
def add_to_win32_PATH(script_fpath, *add_path_list):
r"""
Writes a registery script to update the PATH variable into the sync registry
CommandLine:
python -m utool.util_win32 --test-add_to_win32_PATH --newpath "C:\Program Files (x86)\Graphviz2.38\bin"
Example:
>>> # DISABLE_DOCTEST
>>> # SCRIPT
>>> from utool.util_win32 import * # NOQA
>>> script_fpath = join(ut.truepath('~'), 'Sync/win7/registry', 'UPDATE_PATH.reg')
>>> new_path = ut.get_argval('--newpath', str, default=None)
>>> result = add_to_win32_PATH(script_fpath, new_path)
>>> print(result)
"""
import utool as ut
write_dir = dirname(script_fpath)
key = '[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment]'
rtype = 'REG_EXPAND_SZ'
# Read current PATH values
win_pathlist = list(os.environ['PATH'].split(os.path.pathsep))
new_path_list = ut.unique_ordered(win_pathlist + list(add_path_list))
#new_path_list = unique_ordered(win_pathlist, rob_pathlist)
print('\n'.join(new_path_list))
pathtxt = pathsep.join(new_path_list)
varval_list = [('Path', pathtxt)]
regfile_str = make_regfile_str(key, varval_list, rtype)
ut.view_directory(write_dir)
print(regfile_str)
ut.writeto(script_fpath, regfile_str, mode='wb')
print('Please have an admin run the script. You may need to restart') | 0.005622 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.